repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
ldct/raymond-backend
https://github.com/ldct/raymond-backend
be9620c92a14e2a598feac99c6942ad9ef846d8e
acc7d1fbbdd8c4fa436dd3f05209f26f8ba96b68
7663db4330e0fc2911d3022736b6624c6a82d9e8
refs/heads/master
2021-05-28T17:56:50.357510
2015-01-18T14:50:00
2015-01-18T14:50:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5662691593170166, "alphanum_fraction": 0.5706984400749207, "avg_line_length": 24.521739959716797, "blob_id": "10f7f1851b4fa0e227d1adc2f82763dccdb6bfd4", "content_id": "bbcde809c7342fba763c84f2c225e2bf500697e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2935, "license_type": "no_license", "max_line_length": 117, "num_lines": 115, "path": "/app.py", "repo_name": "ldct/raymond-backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport binascii, os\n\nimport bottle\nfrom bottle import response, request\n\nfrom binascii import unhexlify\n\nimport json\n\[email protected]('after_request')\ndef enable_cors():\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\[email protected]('/')\ndef health():\n response.content_type = 'application/json'\n if os.path.isdir(\"./task_data\"):\n return { 'status': 'ok' }\n else:\n return {\n 'status': 'bad',\n 'message': 'task_data directory does not exist'\n\t}\[email protected]('/task')\ndef create_task():\n response.content_type = 'application/json'\n\n task_id = binascii.hexlify(os.urandom(8))\n category = request.query.get('category')\n img_bytes = request.body.read()\n\n os.mkdir('./task_data/' + task_id)\n\n with open('./task_data/' + task_id + '/status', 'a') as f:\n f.write('queued\\n')\n\n with open('./task_data/' + task_id + '/image.jpg', 'wb') as f:\n f.write(img_bytes)\n\n with open('./task_data/' + task_id + '/category', 'w') as f:\n f.write(category)\n\n with open('./task_data/' + task_id + '/result.json', 'w') as f:\n f.write('{}')\n\n print(task_id)\n return task_id\n\[email protected]('/task_data/<filename:path>')\ndef get_task_data(filename):\n return bottle.static_file(filename, root='./task_data')\n\[email protected]('/tasks.html')\ndef list_tokens():\n\n def make_li(dirname):\n return \"<li > <a href='/task_data/%s/status'> %s </a> </li>\" % (dirname, dirname)\n\n response.content_type = 'text/html'\n return \"\"\"\n <h1> Hi </h1>\n\n <ol>\n\n \"\"\" + \\\n \" \".join(make_li(dirname) for dirname in os.listdir('./task_data')) + \\\n \"\"\"\n\n </ol>\n\n \"\"\"\n\[email protected]('/batch_tasks')\ndef batch_tasks():\n\n response.content_type = \"application/json\"\n\n def get_refresh(token):\n try:\n with open('./task_data/' + token + '/status', 'r') as f:\n if 'done' not in f.read():\n return None\n else:\n\n with open('./task_data/' + token + '/result.json', 'r') as f:\n data = json.load(f)\n\n return (token, data)\n except IOError:\n pass\n\n tokens = request.query.get('tokens').split(',')\n refreshed = list(get_refresh(token) for token in tokens)\n\n refreshed = [r for r in refreshed if r is not None]\n\n ret = {}\n for (token, res) in refreshed:\n ret[token] = res\n\n return ret\n\[email protected]('/task/<name>')\ndef get_task(name):\n with open ('./task_data/' + name + '/status', 'r') as f:\n return '\\n'.join(f.readlines())\n\ntry:\n bottle.run(host='0.0.0.0', port=80, debug=False )\nexcept:\n bottle.run(host='localhost', port=8080, debug=True)\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 17, "blob_id": "452cf307190535e88e849d689a6f9e3ab9824d78", "content_id": "8f2d1a28502a050700ea44795943198afa3a507b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/README.md", "repo_name": "ldct/raymond-backend", "src_encoding": "UTF-8", "text": "# raymond-backend\n" } ]
2
PorterDalton1/Text_Editor
https://github.com/PorterDalton1/Text_Editor
988430f0862fc851d70a820fc5e0c45fcf789724
c8b54582c1e259968a69b805916b8f116045cce1
8c6d2c46bd8ec8f445e2090dbccbaa7cef648fa2
refs/heads/master
2021-01-03T05:18:43.461307
2020-02-12T06:36:33
2020-02-12T06:36:33
239,938,960
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6021034121513367, "alphanum_fraction": 0.605609118938446, "avg_line_length": 30.72222137451172, "blob_id": "7faac4ac18f4531d8501d24d921f15f4f3bbd79b", "content_id": "1483d4e52616e1805d0eaed0d0b1c92a4c915505", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 79, "num_lines": 36, "path": "/texteditorGUI.py", "repo_name": "PorterDalton1/Text_Editor", "src_encoding": "UTF-8", "text": "import tkinter as tk\n\nclass MainWindow:\n def __init__(self, root):\n self.root = root\n self.textWidget = tk.Text(self.root, height = 30, width = 70)\n self.textWidget.pack(side = tk.LEFT, fill = tk.BOTH, expand = tk.YES)\n #Create Menu\n self.topMenu = tk.Menu(self.root)\n self.root.config(menu=self.topMenu)\n #Add menus to menu\n self.fileMenu = tk.Menu(self.topMenu)\n self.topMenu.add_cascade(label=\"File\", menu=self.fileMenu)\n\n self.fileMenu.add_command(label=\"Open\", command = self.open)\n self.fileMenu.add_command(label=\"Save\", command = self.save)\n self.fileMenu.add_command(label=\"Save As\", command = self.saveAs)\n self.fileMenu.add_command(label=\"New Tab\", command = self.newTab)\n self.fileMenu.add_command(label=\"New Window\", command = self.newWindow)\n def save(self):\n pass\n def saveAs(self):\n pass\n def open(self):\n pass\n def newTab(self):\n pass\n def newWindow(self):\n pass\ndef main():\n root = tk.Tk()\n MainWindow(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5208675861358643, "alphanum_fraction": 0.5218534469604492, "avg_line_length": 33.57954406738281, "blob_id": "57ce9993b7d850871d9ecd964635061610481eb6", "content_id": "0f34f6f38b813150c93da0f3a4d7dc55d1fcc3b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3043, "license_type": "no_license", "max_line_length": 94, "num_lines": 88, "path": "/textStack.py", "repo_name": "PorterDalton1/Text_Editor", "src_encoding": "UTF-8", "text": "\"\"\"\nThis is a stack created by a linked list. This will be used for\nthe text editor logic.\n\"\"\"\nclass Stack:\n \"\"\"Class used with a first in first out basis. Uses a linked list as the data structure\"\"\"\n\n def __init__(self, initStack=[]):\n \"\"\"Initializer for the stack class. Defualts with an empty linked list\"\"\"\n self.head = None\n if not isinstance(initStack, list):\n raise ValueError(\"When initializing the stack, you must use a list object\")\n if initStack: #If there are items in the stack\n for i in initStack[::-1]:\n self.append(i)\n\n class __Node:\n \"\"\"A private node class used as the data points in the linked list\"\"\"\n def __init__(self, data, next = None):\n \"\"\"Initializer for the Node\"\"\"\n self.data = data\n self.next = next\n\n def append(self, item):\n \"\"\"Adds an item to the stack\"\"\"\n self.head = self.__Node(item, self.head)\n\n def pop(self):\n \"\"\"Removes the top element in the stack and returns the value\"\"\"\n if self.head is None:\n raise IndexError(\"You can't use pop() with no elements in the stack\")\n tmp = self.head\n self.head = self.head.next\n return tmp.data\n\n def peek(self):\n \"\"\"Returns, but doesn't remove, the top element from the stack\"\"\"\n if self.head is None:\n raise IndexError(\"You can't use peek() with no elements in the stack\")\n tmp = self.head.data\n return tmp\n \n def __str__(self):\n \"\"\"Returns the stack as a string object\"\"\"\n output = \"[\"\n if self.head is None:\n return output + \"]\"\n cursor = self.head\n while cursor.next is not None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"', \"\n else:\n output += str(cursor.data) + \", \"\n cursor = cursor.next\n if cursor.next is None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"'\"\n else:\n output += str(cursor.data)\n return output + \"]\"\n \n def __repr__(self):\n \"\"\"Returns the stack as a string\"\"\"\n output = \"[\"\n if self.head is None:\n return output + \"]\"\n cursor = self.head\n while cursor.next is not None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"', \"\n else:\n output += str(cursor.data) + \", \"\n cursor = cursor.next\n if cursor.next is None:\n if isinstance(cursor.data, str):\n output += \"'\" + str(cursor.data) + \"'\"\n else:\n output += str(cursor.data)\n return output + \"]\"\n\n def __len__(self):\n \"\"\"Returns the length of the stack\"\"\"\n cursor = self.head\n length = 0\n while cursor is not None:\n length += 1\n cursor = cursor.next\n return length\n" } ]
2
dajose/code_challenges
https://github.com/dajose/code_challenges
eab6d5c7ef3e5247caa6ab62e0707831a5984630
b888df3e7e5d0353ed144857ff2f8e9e82fe20d8
73f7ed83e9d98c31e2712bdbcb3f7816b02a5308
refs/heads/master
2020-05-29T08:51:47.190449
2018-03-17T01:50:06
2018-03-18T22:19:30
69,635,636
9
11
null
null
null
null
null
[ { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 16.600000381469727, "blob_id": "b03b49e1c01a57b42b5d2e906ee896c2b7d7ec57", "content_id": "1a70a17350dc5a5225c2b0d1e361add303c54e4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 57, "num_lines": 5, "path": "/elevator_maintenance/solution.py", "repo_name": "dajose/code_challenges", "src_encoding": "UTF-8", "text": "def answer(l):\n\n l.sort(key=lambda a : [int(b) for b in a.split('.')])\n\n return l\n" }, { "alpha_fraction": 0.5182390213012695, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 30.760000228881836, "blob_id": "f866561dabd312939536e9d3f03f6fd7ac059995", "content_id": "83c03c101f8c563cc4c80b5395d70ad2e34266d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "no_license", "max_line_length": 92, "num_lines": 25, "path": "/please_pass_the_coded_messages/solution.py", "repo_name": "dajose/code_challenges", "src_encoding": "UTF-8", "text": "def answer(l):\n # The idea is that the sum of all digits must be divisible by 3\n # All multiples of 3 meet that so, lets start there\n res = [i for i in l if i % 3 == 0]\n\n # remove then from l (there should be a better way)\n l = [i for i in l if i % 3 != 0]\n l.sort(reverse=True)\n\n partial = []\n start = 0\n\n for start, _ in enumerate(l):\n end = len(l)\n while end >= start:\n window = l[0:start] + l[end:len(l)]\n the_sum = sum(window)\n if the_sum % 3 == 0:\n\t\tif len(window) > len(partial) or (len(window) == len(partial) and the_sum > sum(partial)):\n partial = window\n end -= 1\n res = res + partial\n res.sort(reverse=True)\n res = int(''.join(str(d) for d in res) or 0)\n return res\n\n" }, { "alpha_fraction": 0.3663771152496338, "alphanum_fraction": 0.4404700994491577, "avg_line_length": 19.38541603088379, "blob_id": "13688d2a5382e55b51c32eb2bb66a9a8fa77a566", "content_id": "42959e29be61a1f593aad9a3f5eb47ea8a1ac7ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1957, "license_type": "no_license", "max_line_length": 68, "num_lines": 96, "path": "/doomsday_fuel/solution.py", "repo_name": "dajose/code_challenges", "src_encoding": "UTF-8", "text": "from fractions import gcd\nfrom fractions import Fraction as f\n\n\ndef compute_probabilies(m):\n res = [f(0, 1)] * len(m)\n terminal_states = []\n for i, row in enumerate(m):\n if sum(row) == 0:\n # It is a terminal state\n terminal_states.append(i)\n continue\n\n total = sum(row)\n p_past = []\n for j, element in enumerate(row):\n res[j] = f(element, total)\n if i == 0:\n continue\n\n if j < i and m[j][i]:\n p_past.append(f(m[j][i], (1 - res[j] * m[j][i])))\n continue\n\n last = 0\n ii = 0\n while ii < i:\n last += f(m[ii][j], (1 - (res[ii] * m[ii][ii + 1])))\n ii += 1\n\n res[j] = (res[j] * sum(p_past)) + last\n\n print('partial res {}: '.format(res[:]))\n m[i] = res[:]\n\n print(terminal_states)\n return [e for i, e in enumerate(res) if i in terminal_states]\n\n\ndef answer(m):\n probabilities = compute_probabilies(m)\n print(probabilities)\n denominator = reduce(gcd, probabilities)\n print(denominator)\n return [\n (f(p, denominator)).numerator for p in probabilities\n ] + [denominator.denominator]\n\n\nprint(1)\nm = [\n [0, 1, 0, 0, 0, 1],\n [4, 0, 0, 3, 2, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n ]\nres = answer(m)\nassert res == [0, 3, 2, 9, 14], res\n\nprint(2)\nm = [\n [0, 2, 1, 0, 0],\n [0, 0, 0, 3, 4],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]\n]\nres = answer(m)\n\nassert res == [7, 6, 8, 21], res\n\nprint(3)\nm = [\n [0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 0, 3, 1, 0]\n]\n\nres = answer(m)\nassert res == [1, 1], res\n\nprint(4444)\nm = [\n [0, 1, 0, 1, 0],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0]\n]\n\nres = answer(m)\nassert res == [1, 100], res\n" }, { "alpha_fraction": 0.30219781398773193, "alphanum_fraction": 0.4403846263885498, "avg_line_length": 26.57575798034668, "blob_id": "2c34b40035f2bff0ba5621796be87bd76cec353d", "content_id": "6d956a7127a815ccf6486c3b1c3bd5bbb4813478", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3640, "license_type": "no_license", "max_line_length": 71, "num_lines": 132, "path": "/prepare_the_bunnies_escape/solution.py", "repo_name": "dajose/code_challenges", "src_encoding": "UTF-8", "text": "from logging import getLogger\nlog = getLogger(__name__)\n\nfrom collections import deque\n\n\ndef answer(maze):\n return search_path(maze, [len(maze) - 1, len(maze[0]) - 1], [0, 0])\n\n\ndef search_path(maze, start, goal):\n queue = deque([(start, 0)])\n weight = {str(start): 1}\n walked_walls = {str(start): 0}\n while queue:\n position, walls = queue.popleft()\n for option, wall in step(maze, position, allow_wall=walls < 1):\n if (\n # First time here\n str(option) not in weight or\n # There is less weight by this new route\n weight[str(option)] > weight[str(position)] + 1 or\n # Same weight but less walls\n (weight[str(option)] == weight[str(position)] + 1 and\n walked_walls[str(option)] > walls + wall)\n ):\n weight[str(option)] = weight[str(position)] + 1\n walked_walls[str(option)] = walls + wall\n queue.append([option, walls + wall])\n\n return weight.get(str(goal), 999999999)\n\n\ndef step(maze, position, allow_wall=True):\n \"\"\"\n Get a list of posible next step\n\n :rtype: list(tuple(tuple(int), bool))\n :return: list with tuples (next step, there-is-wall)\n \"\"\"\n directions = [(0, 1), (1, 0), [-1, 0], [0, -1]]\n options = filter(\n lambda x: (\n all([\n # ignore positions that are off the grid\n x[0] >= 0, x[1] >= 0,\n x[0] < len(maze), x[1] < len(maze[0])\n ]) and\n # go through wall\n (maze[x[0]][x[1]] == 0 or allow_wall)\n ),\n # Search all posible directions\n [\n map(sum, zip(position, direction))\n for direction in directions\n ]\n )\n\n return [\n (option, maze[option[0]][option[1]])\n for option in options\n ]\n\n\nfrom logging import basicConfig, DEBUG\n\nbasicConfig(level=DEBUG)\n\nmaze1 = [\n [0, 1, 1, 0],\n [0, 0, 0, 1],\n [1, 1, 0, 0],\n [1, 1, 1, 0]\n]\n\nmaze2 = [\n [0, 0, 1, 1, 0, 0, 0],\n [1, 0, 0, 0, 1, 1, 0],\n [1, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]\n]\n\nmaze3 = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [1, 1, 1, 1, 1, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 1, 1, 1, 1, 1, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0,0,0,0,0,0,0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n]\n\nmaze4 = [\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0]\n]\n\nmaze5 = [\n [0, 0],\n [0, 0]\n]\n\nmaze6 = [\n [0, 1, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0]\n]\nassert answer(maze1) == 7\nassert answer(maze2) == 14\nassert answer(maze3) == 34\nassert answer(maze4) == 11\nassert answer(maze5) == 3\nassert answer(maze6) == 21\n" } ]
4
BullsEye34/NextUPProject
https://github.com/BullsEye34/NextUPProject
61e6a346ff3fd54357e5b90d9165ec31ac81915d
147e0b925514f57a0005c4238f9f5f5d6a2e382a
cbfb6db4051218045f779626d50fab2aab5d4bc4
refs/heads/master
2023-05-02T23:41:49.355834
2020-07-26T13:38:20
2020-07-26T13:38:20
280,177,464
0
2
null
2020-07-16T14:36:12
2020-07-26T13:38:23
2023-04-20T17:53:16
Jupyter Notebook
[ { "alpha_fraction": 0.6175752878189087, "alphanum_fraction": 0.6383238434791565, "avg_line_length": 21.345455169677734, "blob_id": "890667006a76b18b24dd2b2318d9cd3f485c3539", "content_id": "232ca121375a765ebf26f0842c177cee7900f598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2458, "license_type": "no_license", "max_line_length": 91, "num_lines": 110, "path": "/Web/next-up/src/scripts/main.py", "repo_name": "BullsEye34/NextUPProject", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom flask import Flask\nfrom flask import request\nimport json\n\nfrom flask_cors import CORS\n\n\n\"\"\" from sklearn.tree import DecisionTreeRegressor \"\"\"\n\napp = Flask(__name__)\ncors = CORS(app)\n\n\ndataset = pd.read_csv('cleandataset.csv')\nprint(dataset)\nle = LabelEncoder()\n\ndataset.Location = le.fit_transform(dataset.Location)\nprint(dataset)\n\nct = ColumnTransformer(\n [(\"Location\", OneHotEncoder(), [0])], remainder='passthrough')\ndataset = ct.fit_transform(dataset)\nprint(dataset)\n\nX = dataset[:, 1:-1]\ny = dataset[:, -1]\n\nprint(X)\nprint(y)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=0)\n\nmodel = RandomForestRegressor(max_depth=5, random_state=14)\n\n\"\"\" model = DecisionTreeRegressor(max_depth=10, random_state=14) \"\"\"\nmodel.fit(X_train, y_train)\n\n\nprint(\"RANDOM FOREST REGRESSOR\")\n\nprint(\"TRAINING DATA ACCURACY - \")\nprint(model.score(X_train, y_train))\n\nprint(\"TESTING DATA ACCURACY - \")\nprint(model.score(X_test, y_test))\n\nYpred = model.predict([[0, 0, 0, 3, 1, 2050, 5, 3]])\nprint(Ypred)\n\n\n\"\"\" @app.route('/time')\ndef get_current_time():\n return {'prediction': dataset}\napp.run(debug=True) \"\"\"\n\n\ndef myconverter(o):\n if isinstance(o, np.float32):\n return float(o)\n\n\nage = 0\nfur = 1\[email protected]('/predict',)\ndef get_prediction():\n city = request.args.get('city')\n age = request.args.get('age')\n floor = request.args.get('floor')\n bhk = request.args.get('bhk')\n sqft = request.args.get('sqft')\n furnish = request.args.get('furnish')\n\n if furnish == \"true\":\n fur = 1\n else:\n fur = 0\n\n if city == \"Jayanagar\":\n return {\n 'prediction': str(str(model.predict([[0, 1, 0, bhk, fur, sqft, age, floor]]))),\n }\n elif city == \"MG Road\":\n return {\n 'prediction': str(str(model.predict([[0, 0, 1, bhk, fur, sqft, age, floor]]))),\n }\n elif city == \"Bommanahalli\":\n return {\n 'prediction': str(str(model.predict([[1, 0, 0, bhk, fur, sqft, age, floor]]))),\n }\n else:\n return {\n 'prediction': str(str(model.predict([[0, 0, 0, 3, 1, 2050, 5, 3]]))),\n }\n\n\napp.run(debug=True)\n" }, { "alpha_fraction": 0.4555858373641968, "alphanum_fraction": 0.47465941309928894, "avg_line_length": 33.9523811340332, "blob_id": "581e77533725c46b0246871595e74b8fa3b67224", "content_id": "6a9531efcbb7b06554ff50697f559567cbd18bc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3672, "license_type": "no_license", "max_line_length": 199, "num_lines": 105, "path": "/Web/next-up/src/result.js", "repo_name": "BullsEye34/NextUPProject", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport './App.css';\nimport { makeStyles } from '@material-ui/core/styles';\nimport back from './assets/house.png';\nimport Grid from '@material-ui/core/Grid';\nimport CardContent from '@material-ui/core/CardContent';\nimport Button from \"@material-ui/core/Button\";\nimport Typography from '@material-ui/core/Typography';\nimport Card from '@material-ui/core/Card';\n\n\n\nconst useStyles = makeStyles({\n root: {\n minHeight: 700,\n zIndex: 10,\n backgroundColor: 'white',\n borderRadius: 20,\n boxShadow: 'rgba(0, 0, 0, 0.1) 0px 1px 350px, rgba(0, 0, 0, 0.1) 0px 1px 350px'\n },\n bullet: {\n display: 'inline-block',\n margin: '0 2px',\n transform: 'scale(0.8)',\n },\n title: {\n fontSize: 20,\n },\n pos: {\n marginBottom: 12,\n },\n});\n\nfunction Result() {\n const [predict, setPredict] = React.useState('');\n\n function getUser() {\n\n fetch(`http://127.0.0.1:5000/predict`)\n .then(function (response) {\n return response.json();\n })\n .then(function (json) {\n console.log(json);\n setPredict(json.prediction)\n });\n };\n getUser();\n if (window.innerWidth <= 500) {\n\n } else if (window.innerWidth > 500) {\n\n }\n\n const classes = useStyles();\n\n return (\n <div className=\"App\">\n <link\n rel=\"stylesheet\"\n href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css\"\n integrity=\"sha384-9aIt2nRpC12Uk9gS9baDl411NQApFmC26EwAOH8WgZl5MYYxFfc+NcPb1dKGj7Sk\"\n crossorigin=\"anonymous\"\n />\n <header className=\"background\">\n\n <div className=\"Background-image\">\n\n <Grid container spacing={3}\n justify=\"center\" style={{ zIndex: 1, }}>\n <Grid item xs={9}>\n <Typography variant=\"h2\" component=\"h2\" style={{ color: \"grey\" }}>\n House Price Detection\n </Typography>\n <br></br>\n <Card className={classes.root} >\n <CardContent>\n <Typography className={classes.title} color=\"textSecondary\" gutterBottom>\n Well, based on Whatever you've given us.... <br></br>We think that your dream property might cost you around<br></br><br></br></Typography>\n <Typography style={{ fontSize: 50 }} color=\"textSecondary\" gutterBottom>₹ {predict.toString().substring(1, 10)}<br></br><br></br></Typography>\n <Typography className={classes.title} color=\"textSecondary\" gutterBottom>You can click on the button below, so that we can get you in touch with one of our agents.\n </Typography><br></br>\n <br></br>\n <Button\n variant=\"contained\"\n color=\"primary\"\n style={{ backgroundColor: \"#681AFF\", color: 'white', fontSize: 20, width: (window.innerWidth / 4) }}\n disableElevation>\n Real Estate Agent</Button>\n </CardContent>\n\n </Card><br></br>\n </Grid>\n </Grid>\n\n <img src={back} className=\"image\"></img>\n </div>\n\n </header>\n </div>\n );\n}\n\n\nexport default Result;\n" }, { "alpha_fraction": 0.7882353067398071, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 27.33333396911621, "blob_id": "e8d4504e31e0802f71221a42c6d1db31868096bb", "content_id": "1b4c294f1e718fde55d283c412c06e6d58f499e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/main.py", "repo_name": "BullsEye34/NextUPProject", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\ndataset = pd.read_csv(\"assets/data.csv\")\nprint(dataset.describe())\ndataset.shape[0]\n" } ]
3
amegahed1994/poc-cli
https://github.com/amegahed1994/poc-cli
0796ab0c5f3460473d321b38af2c56a2e58fc99c
ad1e09a412fe7e02e07140b800b04b8fd0ae280c
6b20e525d4ea863dad76d310b754278fabc54559
refs/heads/main
2023-04-01T21:25:49.304480
2021-04-05T05:54:03
2021-04-05T05:54:03
354,620,180
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6200000047683716, "avg_line_length": 9, "blob_id": "1c837cea175fda8f98eecfa9c69a53d49d015adb", "content_id": "0fac529d4d1890225295fa05e6a4b1454344ad30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/__init__.py", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "from src.cli import root\n\n\ndef main():\n root()\n" }, { "alpha_fraction": 0.6164565086364746, "alphanum_fraction": 0.6164565086364746, "avg_line_length": 30.39583396911621, "blob_id": "30d078c78a8b28266b86a22797f0069a766e10c4", "content_id": "9dc951aae8253d35b64ad0562c329032450faffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1507, "license_type": "no_license", "max_line_length": 183, "num_lines": 48, "path": "/src/scripts/bq/datasets/compare.py", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "import logging\n\nfrom google.cloud import bigquery\n\nlogger = logging.getLogger(__name__)\n\n\ndef list_tables(dataset_id):\n tables = bigquery.Client().list_tables(dataset_id)\n\n return {table.table_id for table in tables}\n\n\ndef compare(src_dataset_id, dest_dataset_id):\n equals = True # init\n\n src_tables = list_tables(src_dataset_id)\n dest_tables = list_tables(dest_dataset_id)\n\n if src_tables.symmetric_difference(dest_tables):\n equals = False\n logger.info(\n f\"These datasets contain the following symmetric difference: {src_tables ^ dest_tables}\"\n )\n\n client = bigquery.Client() \n tables_intersection = src_tables.intersection(dest_tables)\n\n for table in tables_intersection:\n src_table = client.get_table(f\"{src_dataset_id}.{table}\")\n dest_table = client.get_table(f\"{dest_dataset_id}.{table}\")\n\n if (\n src_table.num_rows == dest_table.num_rows\n and src_table.num_bytes == dest_table.num_bytes\n ):\n logger.debug(f\"{src_dataset_id}.{table} equals {dest_dataset_id}.{table}\")\n else:\n equals = False\n logger.info(\n f\"{src_dataset_id}.{table} *doesn't* equal {dest_dataset_id}.{table}.\"\n )\n\n logger.info(\n f\"Stats: the former contains {src_table.num_rows} rows totalling {src_table.num_bytes} bytes as compared to {dest_table.num_rows} rows & {dest_table.num_bytes} bytes.\"\n )\n\n return equals\n" }, { "alpha_fraction": 0.57337886095047, "alphanum_fraction": 0.5904436707496643, "avg_line_length": 21.538461685180664, "blob_id": "605dc0273c991bc7aede4a72095b60f2306a3893", "content_id": "caf984b6e85930a3cdfced56ada63ca596dbc05c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/setup.py", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name=\"poc-cli\",\n version=\"0.0.1\",\n packages=find_packages(),\n python_requires='>=3.6',\n install_requires=[\"click\", \"google-cloud-bigquery\"],\n entry_points=\"\"\"\n [console_scripts]\n mvt=__init__:main\n \"\"\",\n)\n" }, { "alpha_fraction": 0.6292335391044617, "alphanum_fraction": 0.6372548937797546, "avg_line_length": 20.169811248779297, "blob_id": "cf760d5dbe19c397437978dfd4668d4cedbaf571", "content_id": "1856419d0e1b7064a7e216fe81572e1386f14be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 154, "num_lines": 53, "path": "/README.md", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "## mvt-cli:\n\nA command line interface (built using Click) whose's goal is to unify all of our scripts. Click reduces boilerplate code significantly and is easy to use.\n\n## Installation\n\n1. Clone this repository\n ```sh\n git clone <URL>\n ```\n2. Create a virtual environment\n ```sh\n virtualenv venv\n ```\n3. Activate the environment\n ```sh\n . venv/bin/activate\n ```\n5. Install the python application\n ```sh\n python3 install ./poc-cli\n ```\n4. Start mvt-cli, hurray!\n ```sh\n mvt\n ```\n \n## Example usage:\n\nLet's use this cli to compare 2 BigQuery datasets; to see if they are equal (in terms of data).\n\n ```sh\n mvt bq datasets compare --from-json=sample.json\n ```\n\nwhere `sample.json` needs to conform to the following format:\n\n```json\n[\n {\n \"src_dataset_name\":\"mydataset\",\n \"src_project_id\":\"sourceproject\",\n \"dest_dataset_name\":\"mydataset\",\n \"dest_project_id\":\"destinationproject\"\n },\n {\n \"src_dataset_name\":\"mydataset2\",\n \"src_project_id\":\"sourceproject\",\n \"dest_dataset_name\":\"mydataset2\",\n \"dest_project_id\":\"destinationproject\"\n }\n]\n```\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.738095223903656, "avg_line_length": 20.5, "blob_id": "e7a397cb50dbc67a85be065cc1807293eb95a6c3", "content_id": "685d221e32ba9b7d460e6ea8197a7a8dfcbc6194", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 42, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/requirements.txt", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "click==7.1.2\ngoogle-cloud-bigquery==2.13.1" }, { "alpha_fraction": 0.6510663628578186, "alphanum_fraction": 0.6510663628578186, "avg_line_length": 23.114286422729492, "blob_id": "5290c11e8b501cfe1d88e1538f51c735f8945d1c", "content_id": "6b03bb27338622bf9afbdb6611d12608d5957b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1688, "license_type": "no_license", "max_line_length": 88, "num_lines": 70, "path": "/src/cli.py", "repo_name": "amegahed1994/poc-cli", "src_encoding": "UTF-8", "text": "import json\nimport logging\n\nimport click\n\nfrom .scripts.bq.datasets.compare import compare as bq_datasets_compare\n\n\[email protected]()\[email protected](\n \"--log-level\",\n type=click.Choice([\"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\"]),\n default=\"INFO\",\n show_default=True,\n help=\"The desired logging level.\",\n)\ndef root(log_level):\n \"Goal of this command-line interface is to centralize mmvt's scripts.\"\n logging.basicConfig(level=log_level)\n\n\[email protected]()\ndef bq():\n \"Scripts that performs QA on datasets post copy/migration.\"\n pass\n\n\[email protected]()\ndef datasets():\n pass\n\n\[email protected]()\[email protected](\n \"-f\",\n \"--from-json\",\n required=True,\n type=click.File(\"r\"),\n help=\"Path to the input json file.\",\n)\ndef compare(from_json):\n \"Compares datasets lazily by checking their metadata.\"\n\n for dataset in json.load(from_json):\n src_dataset_id = f\"{dataset['src_project_id']}.{dataset['src_dataset_name']}\"\n dest_dataset_id = f\"{dataset['dest_project_id']}.{dataset['dest_dataset_name']}\"\n result = bq_datasets_compare(src_dataset_id, dest_dataset_id)\n click.echo(f\"{src_dataset_id} == {dest_dataset_id} resolves to: {result}\")\n\n\[email protected]()\ndef bqts():\n \"Scripts that generates bqts transfer-configs to an output file.\"\n pass\n\n\[email protected]()\[email protected](\n \"-f\",\n \"--from-json\",\n type=click.File(\"r\"),\n required=True,\n help=\"Path to the json file.\",\n)\[email protected](\n \"-f\", \"--to-json\", type=click.File(\"w\"), default=\"-\", help=\"Path to the json file.\"\n)\ndef create(from_json, to_json):\n \"Creates transfer configurations using params loaded from a file.\"\n to_json.write(from_json.read())\n" } ]
6
g2-inc/openc2-oif-orchestrator
https://github.com/g2-inc/openc2-oif-orchestrator
819ec4a02dd9c475e1279fc38b53a05e39f021b8
85102bb41aa0d558a3fa088e4fd6f51613599ad0
6ff44f941c5e6486eae3e26a4e3371b2c6b547c4
refs/heads/master
2020-05-17T09:49:39.800493
2020-04-30T19:10:24
2020-04-30T19:10:24
183,642,877
1
0
Apache-2.0
2019-04-26T14:27:15
2019-04-19T14:10:32
2018-04-03T14:42:07
null
[ { "alpha_fraction": 0.5770533680915833, "alphanum_fraction": 0.5795935392379761, "avg_line_length": 25.539325714111328, "blob_id": "31a78336d22b2de83ef9adafd87d535374d4ec18", "content_id": "a4624adc2be3b88db6f4b962fcf2132bb25a3fdc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2362, "license_type": "permissive", "max_line_length": 117, "num_lines": 89, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/json_field/map.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Collapse,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_json,\n Field\n} from './'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faMinusSquare, faPlusSquare } from '@fortawesome/free-solid-svg-icons'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass MapField extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.state = {\n open: false,\n }\n }\n\n render() {\n let parent = \"\"\n if (this.props.parent) {\n parent = [this.props.parent, this.props.name].join('.')\n } else if (this.props.name.match(/^[a-z]/)) {\n parent = this.props.name\n }\n\n let def_opts = []\n if (this.props.def.hasOwnProperty(\"properties\")) {\n def_opts = Object.keys(this.props.def.properties).map((field, i) => (\n <Field\n key={ i }\n parent={ parent }\n name={ field }\n def={ this.props.def.properties[field] }\n required={ isOptional_json(this.props.def.required, field) }\n optChange={ this.props.optChange }\n />\n ))\n }\n\n if (this.props.def.hasOwnProperty(\"patternProperties\")) {\n // TODO: Pattern Properties\n console.log(\"Map Pattern Props\", this.props.def.patternProperties)\n }\n\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>\n <Button\n color={ this.state.open ? \"primary\" : \"info\" }\n className='float-right p-1'\n onClick={ () => this.setState(prevState => ({ open: !prevState.open })) }\n >\n <FontAwesomeIcon icon={ this.state.open ? faMinusSquare : faPlusSquare } size=\"lg\" />\n </Button>\n { (this.props.required ? '*' : '') + this.props.name }\n </legend>\n { this.props.def.description != '' ? <FormText color=\"muted\">{ this.props.def.description }</FormText> : '' }\n <Collapse isOpen={ this.state.open }>\n <div className=\"col-12 my-1 px-0\">\n { def_opts }\n </div>\n </Collapse>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(MapField)\n" }, { "alpha_fraction": 0.5646341443061829, "alphanum_fraction": 0.5701773762702942, "avg_line_length": 33.29657745361328, "blob_id": "9f761eb8590c572a44891a801b4d3573a69bf4fe", "content_id": "d4fac338b1ce31d3a23c3d1a5ab5c63ca9c4bf1a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9020, "license_type": "permissive", "max_line_length": 123, "num_lines": 263, "path": "/orchestrator/core/orc_server/command/views/actions.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import bleach\nimport json\nimport time\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom dynamic_preferences.registries import global_preferences_registry\n\n# Local imports\nfrom actuator.models import Actuator, ActuatorProfile\nfrom device.models import Device\nfrom orchestrator.models import Protocol, Serialization\nfrom tracking import log\nfrom utils import get_or_none, safe_cast\nfrom ..models import SentHistory, ResponseHistory\n\nglobal_preferences = global_preferences_registry.manager()\n\n\nclass Validator:\n _usr: get_user_model()\n _cmd: dict\n _actuator: str\n _channel: dict\n\n def __init__(self, usr, cmd: dict, actuator: str, channel: dict):\n \"\"\"\n Process a command prior to sending it to the specified actuator(s)/profile\n :param usr: user sending command\n :param cmd: OpenC2 command\n :param actuator: actuator/profile receiving command\n :param channel: serialization & protocol to send the command\n \"\"\"\n self._usr = usr\n self._cmd = cmd or {}\n self._actuator = actuator\n self._channel = channel or {}\n\n def validate(self):\n \"\"\"\n Validate given the class vars\n :return: response tuple or data tuple\n \"\"\"\n err = self._val_user()\n if err:\n return err\n\n err = self._val_cmd()\n if err:\n return err\n\n actuators = None\n err = self._val_actuator()\n if err:\n if isinstance(err, (Actuator, list)):\n actuators = err\n else:\n return err\n\n protocol, serialization = self._val_channel(actuators)\n\n return actuators, protocol, serialization\n\n def _val_user(self):\n if self._usr is None:\n log.error(msg=\"invalid user attempted to send a command\")\n return dict(\n detail=\"user invalid\",\n response=\"User Invalid: command must be send by a valid user\"\n ), 401\n return None\n\n def _val_cmd(self):\n if len(self._cmd.keys()) == 0:\n log.error(usr=self._usr, msg=\"User attempted to send an empty command\")\n return dict(\n detail=\"command invalid\",\n response=\"Command Invalid: command cannot be empty\"\n ), 400\n\n # TODO: Validate command\n return None\n\n def _val_actuator(self):\n if self._actuator is None: # TODO: Actuator broadcast??\n log.error(usr=self._usr, msg=\"User attempted to send to a null actuator\")\n return dict(\n detail=\"actuator invalid\",\n response=\"Actuator Invalid: actuator cannot be none\"\n ), 400\n\n act_type = self._actuator.split(\"/\", 1)\n if len(act_type) != 2:\n log.error(usr=self._usr, msg=f\"User attempted to send to an invalid actuator - {self._actuator}\")\n return dict(\n detail=\"actuator invalid\",\n response=\"Actuator Invalid: application error\"\n ), 400\n\n _type, _act_prof = act_type\n _type = bleach.clean(str(_type))\n _act_prof = bleach.clean(str(_act_prof).replace(\"_\", \" \"))\n\n if _type == \"actuator\": # Single Actuator\n actuators = get_or_none(Actuator, actuator_id=_act_prof)\n rtn = [actuators, ]\n if actuators is None:\n rtn = dict(\n detail=\"actuator invalid\",\n response=\"Actuator Invalid: actuator must be specified with a command\"\n ), 404\n return rtn\n\n if _type == \"profile\": # Profile Actuators\n actuators = get_or_none(ActuatorProfile, name__iexact=_act_prof)\n if actuators is None:\n return dict(\n detail=f\"profile cannot be found\",\n response=f\"Profile Invalid: profile must be a valid registered profile with the orchestrator\"\n ), 400\n return list(Actuator.objects.filter(profile__iexact=_act_prof.replace(\" \", \"_\")))\n\n return dict(\n detail=\"actuator invalid\",\n response=\"Actuator Invalid: application error\"\n ), 400\n\n def _val_channel(self, act: Actuator):\n if len(act) == 1:\n act = act[0]\n if isinstance(act, Actuator):\n dev = get_or_none(Device, device_id=act.device.device_id)\n\n proto = self._channel.get(\"protocol\", None)\n if proto:\n proto = get_or_none(dev.transport, protocol__name=bleach.clean(str(proto)))\n proto = proto.protocol if proto else None\n\n serial = self._channel.get(\"serialization\", None)\n if serial:\n serial = get_or_none(Serialization, name=bleach.clean(str(serial)))\n\n return proto, serial\n return None, None\n\n\ndef get_headers(proto: Protocol, com: SentHistory, proto_acts, serial: Serialization):\n orc_ip = global_preferences.get(\"orchestrator__host\", \"127.0.0.1\")\n orc_id = global_preferences.get(\"orchestrator__id\", \"\")\n corr_id = com.coap_id or str(com.command_id)\n\n headers = dict(\n source=dict(\n orchestratorID=orc_id,\n transport=dict(\n type=proto.name,\n socket=f\"{orc_ip}:{proto.port}\"\n ),\n correlationID=corr_id,\n date=f\"{com.received_on:%a, %d %b %Y %X %Z}\"\n ),\n destination=[]\n )\n\n for act in proto_acts:\n com.actuators.add(act)\n trans = act.device.transport.filter(protocol__name=proto.name).first()\n encoding = (serial if serial else trans.serialization.first()).name.lower()\n\n dev = [d for d in headers[\"destination\"] if d[\"deviceID\"] == str(act.device.device_id)]\n profile = str(act.profile).lower()\n\n if len(dev) == 1:\n idx = headers[\"destination\"].index(dev[0])\n headers[\"destination\"][idx][\"profile\"].append(profile)\n\n else:\n dest = dict(\n deviceID=str(act.device.device_id),\n socket=f\"{trans.host}:{trans.port}\",\n profile=[profile],\n encoding=encoding\n )\n if trans.protocol.pub_sub:\n dest.update(\n topic=trans.topic,\n channel=trans.channel\n )\n\n headers[\"destination\"].append(dest)\n return headers\n\n\ndef action_send(usr, cmd: dict, actuator: str, channel: dict):\n \"\"\"\n Process a command prior to sending it to the specified actuator(s)/profile\n :param usr: user sending command\n :param cmd: OpenC2 command\n :param actuator: actuator/profile receiving command\n :param channel: serialization & protocol to send the command\n :return: response Tuple(dict, int)\n \"\"\"\n val = Validator(usr, cmd, actuator, channel)\n actuators, protocol, serialization = val.validate()\n\n # Store command in db\n cmd_id = cmd.get(\"id\", uuid.uuid4())\n if get_or_none(SentHistory, command_id=cmd_id):\n return dict(\n command_id=[\n \"This ID is used by another command.\"\n ]\n ), 400\n\n com = SentHistory(command_id=cmd_id, user=usr, command=cmd)\n try:\n com.save()\n except ValueError as e:\n return dict(\n detail=\"command error\",\n response=str(e)\n ), 400\n\n # Process Actuators that should receive command\n processed_acts = set()\n\n # Process Protocols\n for proto in [protocol] if protocol else Protocol.objects.all():\n proto_acts = [a for a in actuators if a.device.transport.filter(protocol__name=proto.name).exists()]\n proto_acts = list(filter(lambda a: a.id not in processed_acts, proto_acts))\n processed_acts.update({act.id for act in proto_acts})\n\n if len(proto_acts) >= 1:\n if proto.name.lower() == \"coap\" and com.coap_id == b\"\":\n com.gen_coap_id()\n com.save()\n\n # Send command to transport\n log.info(usr=usr, msg=f\"Send command {com.command_id}/{com.coap_id.hex()} to buffer\")\n settings.MESSAGE_QUEUE.send(\n msg=json.dumps(cmd),\n headers=get_headers(proto, com, proto_acts, serialization),\n routing_key=proto.name.lower().replace(\" \", \"_\")\n )\n\n wait = safe_cast(global_preferences.get(\"command__wait\", 1), int, 1)\n rsp = None\n for _ in range(wait):\n rsp = get_or_none(ResponseHistory, command=com)\n if rsp:\n break\n time.sleep(1)\n\n rsp = [r.response for r in rsp] if hasattr(rsp, \"__iter__\") else ([rsp.response] if hasattr(rsp, \"response\") else None)\n\n return dict(\n detail=f\"command {'received' if rsp is None else 'processed'}\",\n response=rsp if rsp else \"pending\",\n command_id=com.command_id,\n command=com.command,\n wait=wait\n ), 200\n" }, { "alpha_fraction": 0.6268116235733032, "alphanum_fraction": 0.6280193328857422, "avg_line_length": 27.586206436157227, "blob_id": "ef7ed17914e5488eb063e7026b1aa53e562eea48", "content_id": "0107d4c0be9367f094afa20c60474b70021a9d38", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 828, "license_type": "permissive", "max_line_length": 97, "num_lines": 29, "path": "/orchestrator/gui/client/src/store/refreshMiddleware.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Automatically refresh the authentication JWT before it expires\nimport { isRSAA } from 'redux-api-middleware'\nimport * as AuthActions from '../actions/auth'\nimport {\n differenceInMinutes,\n fromUnixTime,\n toDate\n} from 'date-fns'\n\nexport default ({ getState }) => {\n return next => action => {\n if (isRSAA(action)) {\n let auth = getState().Auth\n\n if (auth.access) {\n let exp = fromUnixTime(auth.access.exp)\n let orig_iat = fromUnixTime(auth.access.orig_iat)\n let diff = differenceInMinutes(exp, orig_iat)\n\n if (differenceInMinutes(new Date(), orig_iat) > (diff-5) && !auth.refresh) {\n return next(AuthActions.refreshAccessToken(auth.access.token)).then(() => next(action))\n } else {\n return next(action)\n }\n }\n }\n return next(action)\n }\n}" }, { "alpha_fraction": 0.6468926668167114, "alphanum_fraction": 0.6468926668167114, "avg_line_length": 23.136363983154297, "blob_id": "91fcf702ff365d1edeba8029cf59bb7be3da2162", "content_id": "ac4fe4fbda58b08448665eb414d94fd92e247d91", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1062, "license_type": "permissive", "max_line_length": 98, "num_lines": 44, "path": "/orchestrator/core/orc_server/orchestrator/apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import atexit\nimport sys\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\nfrom utils import MessageQueue\n\n\nclass OrchestratorConfig(AppConfig):\n name = 'orchestrator'\n _FALSE_READY = (\n 'runserver',\n 'orchestrator.wsgi',\n 'uwsgi'\n )\n\n def ready(self):\n \"\"\"\n App ready, init runtime objects\n :return: None\n \"\"\"\n if all(state not in sys.argv for state in self._FALSE_READY):\n return\n\n from command.processors import command_response # pylint: disable=import-outside-toplevel\n settings.MESSAGE_QUEUE = MessageQueue(**settings.QUEUE, callbacks=[command_response])\n\n\[email protected]\ndef shutdown(*args, **kwargs):\n \"\"\"\n App shutdown and cleanup\n :return: None\n \"\"\"\n\n if isinstance(settings.MESSAGE_QUEUE, MessageQueue):\n settings.MESSAGE_QUEUE.shutdown()\n\n try:\n import uwsgi # pylint: disable=import-outside-toplevel\n print(f\"worker {uwsgi.worker_id()} has passed\")\n except ModuleNotFoundError:\n pass\n" }, { "alpha_fraction": 0.7585139274597168, "alphanum_fraction": 0.7585139274597168, "avg_line_length": 20.53333282470703, "blob_id": "a0af7d76c610a968390fedccb2f91c146bf2b720", "content_id": "8b67d623e3eb3b455c2d7a0fae2a42a7b6539a72", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "permissive", "max_line_length": 55, "num_lines": 15, "path": "/orchestrator/core/orc_server/backup/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework_files import routers\n\nfrom . import views\n\nrouter = routers.ImportExportRouter()\nrouter.register('actuator', views.ActuatorImportExport)\nrouter.register('device', views.DeviceImportExport)\n\n\nurlpatterns = [\n # Router Views\n path('', include(router.urls)),\n\n]\n" }, { "alpha_fraction": 0.6736658215522766, "alphanum_fraction": 0.6736658215522766, "avg_line_length": 30.77777862548828, "blob_id": "decf89a3ba529e9478918c5f5146b01ba7beac62", "content_id": "7dfbfff599ee2204980bd09772e7272c081126c2", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1143, "license_type": "permissive", "max_line_length": 126, "num_lines": 36, "path": "/orchestrator/gui/server/gui_server/webApp/jwt_handlers.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom calendar import timegm\nfrom rest_framework_jwt.settings import api_settings\n\n\ndef jwt_payload_handler(user):\n \"\"\"\n Custom payload handler\n Token encrypts the dictionary returned by this function, and can be decoded by rest_framework_jwt.utils.jwt_decode_handler\n :param user: user instance to create a JWT token\n :return: create JWT token dict\n \"\"\"\n return dict(\n username=user.username,\n email=user.email,\n admin=(user.is_staff or user.is_superuser),\n exp=datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,\n orig_iat=timegm(datetime.utcnow().utctimetuple())\n )\n\n\ndef jwt_response_payload_handler(token, user=None, request=None):\n \"\"\"\n Custom response payload handler.\n This function controls the custom payload after login or token refresh. This data is returned through the web API.\n :param token: JWT token to validate\n :param user: user the token is created\n :param request: request instance\n :return:\n \"\"\"\n return {\n 'token': token,\n 'user': {\n 'username': user.username,\n }\n }" }, { "alpha_fraction": 0.6403287053108215, "alphanum_fraction": 0.6460176706314087, "avg_line_length": 28.849056243896484, "blob_id": "a042201482dd2717a1c9dda0eedad7bf86c33ee4", "content_id": "7a128bfd4742802cf74655f98cc196920aeadc92", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1582, "license_type": "permissive", "max_line_length": 103, "num_lines": 53, "path": "/orchestrator/core/orc_server/account/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import bleach\nimport coreapi\nimport coreschema\n\nfrom django.contrib.auth.models import Group, User\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes, schema\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.response import Response\n\n# Local imports\nfrom utils import OrcSchema\n\n\n@api_view(['DELETE'])\n@permission_classes((permissions.IsAdminUser,))\n@schema(OrcSchema(\n fields=[\n coreapi.Field(\n \"username\",\n required=True,\n location=\"path\",\n schema=coreschema.String(\n description='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'\n )\n ),\n coreapi.Field(\n \"actuator_id\",\n required=True,\n location=\"path\",\n schema=coreschema.String(\n description='Required. 150 characters or fewer. Letters, digits, and spaces only.'\n )\n )\n ]\n))\ndef actuatorDelete(request, username, actuator_id, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n API endpoint that removes an actuator from a users access.\n \"\"\"\n user = User.objects.get(username=username)\n if user is None:\n return ParseError(detail='User cannot be found', code=404)\n\n rtn = []\n actuator = bleach.clean(actuator_id)\n\n group = Group.objects.exclude(actuatorgroup__isnull=True).filter(name=actuator).first()\n if group is not None:\n rtn.append(group.name)\n user.groups.remove(group)\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.6116328239440918, "alphanum_fraction": 0.6142957210540771, "avg_line_length": 26.44230842590332, "blob_id": "eeb3a177fe15634a0e79553db21bf9a843fe921e", "content_id": "a62faec1c876c26f9c53f822434d6002fc6ca05c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7135, "license_type": "permissive", "max_line_length": 120, "num_lines": 260, "path": "/orchestrator/core/orc_server/actuator/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom drf_queryfields import QueryFieldsMixin\nfrom jsonfield import JSONField\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\n# Local imports\nfrom device.models import Device, DeviceSerializer\nfrom utils import prefixUUID\n\n\ndef defaultName():\n \"\"\"\n Unique name generation\n :return: 30 character\n \"\"\"\n return prefixUUID('Actuator', 30)\n\n\nclass Actuator(models.Model):\n \"\"\"\n Actuator instance base\n \"\"\"\n actuator_id = models.UUIDField(\n default=uuid.uuid4,\n help_text=\"Unique UUID of the actuator\",\n unique=True\n )\n name = models.CharField(\n default=defaultName,\n help_text=\"Unique display name of the actuator\",\n max_length=30,\n unique=True\n )\n device = models.ForeignKey(\n Device,\n blank=True,\n default=None,\n help_text=\"Device the actuator is located on\",\n null=True,\n on_delete=models.CASCADE\n )\n schema = JSONField(\n blank=True,\n help_text=\"Schema of the actuator\",\n null=True\n )\n schema_format = models.CharField(\n choices=((f.lower(), f.upper()) for f in settings.SCHEMA_FORMATS),\n default='jadn',\n help_text=f\"Format of the schema ({'|'.join(f.upper() for f in settings.SCHEMA_FORMATS)}), set from the schema\",\n max_length=4\n )\n profile = models.CharField(\n default='N/A',\n help_text=\"Profile of the actuator, set from the schema\",\n max_length=60\n )\n\n @property\n def url_name(self):\n return self.name.lower().replace(' ', '_')\n\n def __str__(self):\n return '{} on {}'.format(self.name, self.device)\n\n\nclass AbstractGroup(models.Model):\n \"\"\"\n Actuator Group base model\n \"\"\"\n name = models.CharField(\n max_length=80,\n help_text=\"Unique name of the group\",\n unique=True\n )\n\n @property\n def actuator_count(self):\n if hasattr(self, \"actuators\"):\n return self.actuators.count()\n return 0\n\n def __str__(self):\n return self.name\n\n def natural_key(self):\n return self.name\n\n class Meta:\n abstract = True\n\n\nclass ActuatorGroup(AbstractGroup):\n \"\"\"\n Actuator Group instance base\n \"\"\"\n users = models.ManyToManyField(\n User,\n blank=True,\n help_text=\"Users in the group\"\n )\n\n actuators = models.ManyToManyField(\n Actuator,\n blank=True,\n help_text=\"Actuators available to users in the group\"\n )\n\n @property\n def user_count(self):\n \"\"\"\n get the number of users in the group\n :return: users count of group\n \"\"\"\n if hasattr(self, \"users\"):\n return self.users.count()\n return 0\n\n class Meta:\n verbose_name = 'group'\n verbose_name_plural = 'groups'\n\n\nclass ActuatorProfile(AbstractGroup):\n \"\"\"\n Actuator Profile instance base\n \"\"\"\n actuators = models.ManyToManyField(\n Actuator,\n blank=True,\n help_text=\"Actuators of the groups profile\"\n )\n\n class Meta:\n verbose_name = 'profile'\n verbose_name_plural = 'profiles'\n\n\n@receiver(pre_save, sender=Actuator)\ndef actuator_pre_save(sender, instance=None, **kwargs):\n \"\"\"\n Set the profile name base on the actuators schema\n :param sender: model \"sending\" the action - Actuator\n :param instance: SENDER instance\n :param kwargs: key/value args\n :return: None\n \"\"\"\n profile = 'None'\n schema_keys = set(instance.schema.keys())\n\n if isinstance(instance.schema, dict):\n if len(schema_keys - {\"meta\", \"types\"}) == 0: # JADN\n instance.schema_format = 'jadn'\n profile = instance.schema.get('meta', {}).get('title', '').replace(' ', '_')\n profile = 'None' if profile in ('', ' ', None) else profile\n else: # JSON\n instance.schema_format = 'json'\n profile = instance.schema.get('title', '').replace(' ', '_')\n profile = 'None' if profile in ('', ' ', None) else profile\n\n instance.profile = profile\n\n\n@receiver(post_save, sender=Actuator)\ndef actuator_post_save(sender, instance=None, **kwargs):\n \"\"\"\n Set the profile group based on the saved schema\n :param sender: model \"sending\" the action - Actuator\n :param instance: SENDER instance\n :param kwargs: key/value args\n :return: None\n \"\"\"\n if instance is not None:\n profile_name = instance.profile.replace('_', ' ')\n\n # Check for old profile groups\n for old_profile in instance.actuatorprofile_set.all():\n old_profile.actuators.remove(instance)\n if old_profile.actuators.count() == 0:\n old_profile.delete()\n\n # Create Profile Group\n profile_group, _ = ActuatorProfile.objects.get_or_create(name=profile_name)\n\n # Add Actuator\n profile_group.actuators.add(instance)\n\n if instance.device is not None:\n # Meta Schema??\n # print(instance.device)\n pass\n\n\nclass ActuatorSerializer(QueryFieldsMixin, serializers.ModelSerializer):\n \"\"\"\n Actuator API Serializer\n \"\"\"\n actuator_id = serializers.UUIDField(format='hex_verbose')\n device = serializers.SlugRelatedField(\n queryset=Device.objects.all(),\n slug_field='device_id'\n )\n schema = serializers.JSONField()\n\n def create(self, validated_data):\n dev = validated_data.get('device')\n if dev:\n if dev.actuator_set.count() == 1 and not dev.multi_actuator:\n raise ValidationError(\"Cannot add actuators to a combination device/actuator\")\n\n return super().create(validated_data)\n\n class Meta:\n model = Actuator\n fields = ('actuator_id', 'name', 'device', 'profile', 'schema')\n read_only_fields = ('profile',)\n\n\nclass ActuatorSerializerReadOnly(ActuatorSerializer):\n \"\"\"\n Actuator Extra API Serializer\n \"\"\"\n device = serializers.SerializerMethodField()\n\n def get_device(self, instance):\n d = DeviceSerializer(instance.device).data\n d['transport'] = list(map(dict, d['transport']))\n return d\n\n class Meta:\n model = Actuator\n fields = ('actuator_id', 'name', 'device', 'profile', 'schema')\n read_only_fields = ('actuator_id', 'name', 'device', 'profile', 'schema')\n\n\nclass ActuatorGroupSerializer(QueryFieldsMixin, serializers.ModelSerializer):\n \"\"\"\n Actuator Group API Serializer\n \"\"\"\n name = serializers.CharField(max_length=80)\n users = serializers.SlugRelatedField(\n queryset=User.objects.all(),\n slug_field='username'\n )\n actuators = serializers.SlugRelatedField(\n queryset=Actuator.objects.all(),\n slug_field='name'\n )\n\n class Meta:\n model = ActuatorGroup\n fields = ('name', 'users', 'actuators')\n read_only_fields = ('profile',)\n" }, { "alpha_fraction": 0.6023374199867249, "alphanum_fraction": 0.6146239042282104, "avg_line_length": 51.96825408935547, "blob_id": "da152f603f227df04825dd794658b417af9859ad", "content_id": "8609f52d8a5fb7339a8f0af6742a94d23938f32c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3337, "license_type": "permissive", "max_line_length": 175, "num_lines": 63, "path": "/orchestrator/core/orc_server/device/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-04 18:39\n\nimport device.models\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('orchestrator', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Device',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('device_id', models.UUIDField(default=uuid.uuid4, help_text='Unique ID of the device', unique=True)),\n ('name', models.CharField(default=device.models.defaultName, help_text='Unique display name of the device', max_length=30, unique=True)),\n ('note', models.TextField(blank=True, help_text='Extra information about the device', null=True)),\n ],\n options={\n 'permissions': (('use_device', 'Can use device'),),\n },\n ),\n migrations.CreateModel(\n name='Transport',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('transport_id', models.CharField(default=device.models.shortID, editable=False, help_text='Unique ID of the transport', max_length=30, unique=True)),\n ('host', models.CharField(default='127.0.0.1', help_text='Hostname/IP of the device', max_length=60)),\n ('port', models.IntegerField(default=8080, help_text='Port of the device')),\n ('exchange', models.CharField(default='exchange', help_text='Exchange for the specific device, only necessary for Pub/Sub protocols', max_length=30)),\n ('routing_key', models.CharField(default='routing_key', help_text='Routing Key for the specific device, only necessary for Pub/Sub protocols', max_length=30)),\n ('protocol', models.ForeignKey(help_text='Protocol supported by the device', on_delete=django.db.models.deletion.CASCADE, to='orchestrator.Protocol')),\n ('serialization', models.ManyToManyField(help_text='Serialization(s) supported by the device', to='orchestrator.Serialization')),\n ],\n ),\n migrations.CreateModel(\n name='DeviceGroup',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Unique display name of the device group', max_length=80, unique=True)),\n ('devices', models.ManyToManyField(blank=True, help_text='Devices available to users in the group', to='device.Device')),\n ('users', models.ManyToManyField(blank=True, help_text='Users in the group', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'group',\n 'verbose_name_plural': 'groups',\n },\n ),\n migrations.AddField(\n model_name='device',\n name='transport',\n field=models.ManyToManyField(help_text='Transports the device supports', to='device.Transport'),\n ),\n ]\n" }, { "alpha_fraction": 0.6838046312332153, "alphanum_fraction": 0.6947301030158997, "avg_line_length": 26.298246383666992, "blob_id": "887f71caa1e7116fb9dff253abec83416ed9d05b", "content_id": "fc55838ab83ee13e34193f7da55e529e50bd331e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1556, "license_type": "permissive", "max_line_length": 94, "num_lines": 57, "path": "/orchestrator/gui/server/gui_server/webApp/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom rest_framework.schemas import get_schema_view\n\nfrom rest_framework_swagger.views import get_swagger_view\n\nfrom . import views\n\nadmin.site.site_title = 'OpenC2 Orchestrator Administration'\nadmin.site.site_header = 'OpenC2 Orchestrator Admin'\nadmin.site.index_title = 'OpenC2 Orchestrator'\n\n# Catch all URL\nhandler400 = views.bad_request\nhandler403 = views.permission_denied\nhandler404 = views.page_not_found\nhandler500 = views.server_error\n\napi_patterns = [\n # Root Info\n path('', views.api_root, name='api.root'),\n\n # Account App\n path('account/', include('account.urls')),\n\n # Orchestrator App\n # path('orchestrator/', include('orchestrator.urls.api'), name='orchestrator.api_root'),\n\n # Schema\n path('schema/', include([\n path('', get_schema_view(title='OpenC2 Orchestrator API'), name='api.schema'),\n path('swagger/', get_swagger_view(title='OpenC2 Orchestrator API'), name='api.schema')\n ])),\n]\n\ngui_patterns = [\n # Account URLs - Needed for schema views by user permissions\n path('account/', include('django.contrib.auth.urls')),\n]\n\nif settings.ADMIN_GUI is True:\n # Admin GUI URLs\n gui_patterns.append(path('admin/', admin.site.urls))\nelse:\n # Admin GUI Redirect\n gui_patterns.append(path(r'admin/', views.gui_redirect))\n\n\nurlpatterns = [\n # API Patterns\n path('api/', include(api_patterns), name='api'),\n\n # GUI Patterns\n path('', include(gui_patterns), name='gui')\n]\n" }, { "alpha_fraction": 0.6993007063865662, "alphanum_fraction": 0.6993007063865662, "avg_line_length": 30.77777862548828, "blob_id": "65b937cdda19a751d00aaaa35eca79bf675bb8d3", "content_id": "c8da2de9bd623f305f188df1cd1d1a47152ad538", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 286, "license_type": "permissive", "max_line_length": 66, "num_lines": 9, "path": "/logger/gui/config/config.eslint.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "/* eslint import/no-unresolved: off, import/no-self-import: off */\nrequire('@babel/register');\nconst package = require('../package.json');\n\nconst config = require('./dev.config.babel').default;\nconfig.externals = [...Object.keys(package.dependencies || {})];\n\n\nmodule.exports = config;\n" }, { "alpha_fraction": 0.7580645084381104, "alphanum_fraction": 0.7580645084381104, "avg_line_length": 11.600000381469727, "blob_id": "f9acb3628e3da3e439e8efe108724326de61cd29", "content_id": "71cc951c5828d1e7824f64b9468472cffca2416c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 62, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/orchestrator/gui/client/src/components/actuator/lib/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import ActuatorModal from './modal'\n\nexport {\n ActuatorModal\n}" }, { "alpha_fraction": 0.5218750238418579, "alphanum_fraction": 0.534375011920929, "avg_line_length": 16.29729652404785, "blob_id": "527ceb320a2ce9c4cfb0750d7f97a7031e130cf0", "content_id": "1c3b2a9be53e72260d8d6d1f1a38ee3bc7db61e0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 640, "license_type": "permissive", "max_line_length": 49, "num_lines": 37, "path": "/logger/gui/.eslintrc.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "module.exports = {\n env: {\n browser: true,\n es6: true,\n node: true\n },\n parserOptions: {\n allowImportExportEverywhere: true,\n ecmaFeatures: {\n generators: false,\n jsx: true,\n objectLiteralDuplicateProperties: false\n },\n ecmaVersion: 2018,\n sourceType: 'module'\n },\n plugins: [\n 'compat',\n 'flowtype',\n 'import',\n 'jsx-a11y',\n 'prettier',\n 'promise',\n 'react'\n ],\n settings: {\n 'import/resolver': {\n webpack: {\n config: require('./config/config.eslint')\n }\n }\n },\n rules: {\n ...require('./config/eslint_rules')\n // 'semi': [2, 'always']\n }\n}\n" }, { "alpha_fraction": 0.6455773711204529, "alphanum_fraction": 0.6474201679229736, "avg_line_length": 34.0107536315918, "blob_id": "32a21253aa3534fa02dc5c2a1ef5ef90d4c727f1", "content_id": "aba8b3cac5461b427afacafb7c0a567cbb514d67", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "permissive", "max_line_length": 111, "num_lines": 93, "path": "/orchestrator/core/orc_server/account/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\n\n# Local imports\nfrom actuator.models import ActuatorGroup\nfrom device.models import DeviceGroup\nfrom .exceptions import EditException\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \"\"\"\n Users API Serializer\n \"\"\"\n auth_groups = serializers.SerializerMethodField()\n actuator_groups = serializers.SerializerMethodField()\n device_groups = serializers.SerializerMethodField()\n token = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ('username', 'password', 'email', 'first_name', 'last_name', 'token', 'is_active', 'is_staff',\n 'auth_groups', 'actuator_groups', 'device_groups')\n extra_kwargs = {\n 'password': {'write_only': True},\n 'is_active': {'default': False},\n 'is_staff': {'default': False}\n }\n\n def get_auth_groups(self, obj):\n return [g.name for g in obj.groups.all()]\n\n def get_actuator_groups(self, obj):\n return [g.name for g in ActuatorGroup.objects.filter(users__in=[obj.id])]\n\n def get_device_groups(self, obj):\n return [g.name for g in DeviceGroup.objects.filter(users__in=[obj.id])]\n\n def get_token(self, obj):\n token = Token.objects.get(user=obj)\n return token.key if token is not None and hasattr(token, 'key') else 'N/A'\n\n def create(self, validated_data):\n validated_data.setdefault('is_superuser', False)\n user = super(UserSerializer, self).create(validated_data)\n if 'password' in validated_data:\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n def update(self, instance, validated_data):\n validated_data.setdefault('is_superuser', False)\n\n super_users = list(User.objects.filter(is_superuser=True))\n staff_users = list(User.objects.filter(is_staff=True))\n\n if instance in super_users:\n if len(super_users) == 1:\n raise EditException(\"Cannot edit last super user\")\n\n if instance in staff_users:\n if len(staff_users) == 1:\n raise EditException(\"Cannot edit last admin user\")\n\n if 'password' in validated_data:\n password = validated_data.pop('password')\n instance.set_password(password)\n return super(UserSerializer, self).update(instance, validated_data)\n\n\nclass PasswordSerializer(serializers.Serializer):\n \"\"\"\n Serializer for password change endpoint\n \"\"\"\n old_password = serializers.CharField(required=True)\n new_password_1 = serializers.CharField(required=True)\n new_password_2 = serializers.CharField(required=True)\n\n def create(self, validated_data):\n pass\n\n def update(self, instance, validated_data):\n pass\n\n def validate(self, attrs):\n \"\"\"\n Validate the old password given is correct adn the two new passwords match\n :param attrs: data to validate\n :return: data/exception\n \"\"\"\n if attrs['new_password_1'] != attrs['new_password_2']:\n raise serializers.ValidationError(\"New Passwords do not match\")\n return attrs\n" }, { "alpha_fraction": 0.5245786309242249, "alphanum_fraction": 0.5365168452262878, "avg_line_length": 26.384614944458008, "blob_id": "a4e7fdca53af08a0aa834713798ddb99d9e23466", "content_id": "4eaa3a8666f95b6891d524a38364de990a63707b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2848, "license_type": "permissive", "max_line_length": 114, "num_lines": 104, "path": "/orchestrator/gui/client/src/components/static/error.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\nimport { toast } from 'react-toastify'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faCog } from '@fortawesome/free-solid-svg-icons'\n\nconst str_fmt = require('string-format')\n\n\nclass Error extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Oops...'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.iconGeneral = {\n display: 'inline-block',\n width: 1+'em',\n height: 1+'em',\n fontSize: 4+'em',\n textAlign: 'center',\n position: 'absolute',\n top: 0,\n left: 0,\n MozAnimationDuration: 5+'s',\n OAnimationDuration: 5+'s',\n WebkitAnimationDuration: 5+'s',\n animationDuration: 5+'s'\n }\n\n this.reverseAnimation = {\n MozAnimationDirection: 'reverse',\n OAnimationDirection: 'reverse',\n WebkitAnimationDirection: 'reverse',\n animationDirection: 'reverse'\n }\n }\n\n goBack() {\n if (this.props.history.length === 1) {\n toast(<p>Looks like this is a new tab, try closing it instead of going back</p>, {type: toast.TYPE.WARNING})\n } else {\n this.props.history.goBack()\n }\n }\n\n render() {\n return (\n <div className=\"jumbotron well col-md-10 col-12 mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <h1>Whoops</h1>\n <h3>This isn't a valid page, are you sure this is where you wanted to go?</h3>\n <div className='mx-auto' style={{\n height: 9+'em',\n width: 9+'em',\n fontSize: 1+'em',\n position: 'relative'\n }}>\n <FontAwesomeIcon\n icon={ faCog }\n spin\n style={ this.iconGeneral }\n />\n <FontAwesomeIcon\n icon={ faCog }\n spin\n style={{\n ...this.iconGeneral,\n ...this.reverseAnimation,\n fontSize: 6+'em',\n top: 0.53+'em',\n left: 0.25+'em'\n }}\n />\n <FontAwesomeIcon\n icon={ faCog }\n spin\n style={{\n ...this.iconGeneral,\n fontSize: 3+'em',\n top: 0.25+'em',\n left: 1.7+'em'\n }}\n />\n </div>\n <button className='btn btn-primary' onClick={ () => { this.goBack() } }>Go Back</button>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title\n})\n\nexport default connect(mapStateToProps)(Error)\n" }, { "alpha_fraction": 0.6845607757568359, "alphanum_fraction": 0.7076308727264404, "avg_line_length": 52.02941131591797, "blob_id": "c3e7e792b1985ec8138371a77b38a990dcf74052", "content_id": "2cf37e9f4576bbc4bab3dc48db0e05885a6feae5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9016, "license_type": "permissive", "max_line_length": 646, "num_lines": 170, "path": "/docs/Transport.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Adding your own transport to O.I.F.\n\nThis is a tutorial on adding additional, custom transport mechanisms to the O.I.F.\n\n## Adding Transport to Docker Stack\n\nOpen the [Orchestrator Compose file](orchestrator-compose.yaml) to add your transport to the stack. You can copy-paste either the `transport-https` or `transport-mqtt` images and replace it with your own transport's info. Read more on Docker Compose [here](https://docs.docker.com/compose/overview/).\n\nHere is what our HTTPS transport looks like:\n\n```yaml\ntransport-https: # container name\n hostname: transport-https # hostname of container\n image: oif/transport:orchestrator-https # image name\n build:\n context: ./orchestrator/transport/https # location of dockerfile\n dockerfile: Dockerfile # dockerfile name\n working_dir: /opt/transport # directory internal to the image which it calls from\n env_file:\n - ./environment/queue.connect.env # path to shared environment variables\n external_links:\n - queue # link to internal buffer (used to send/receive commands internally within O.I.F.)\n ports:\n - 5000:5000 # port exposed for HTTP\n depends_on:\n - queue # indicates that this container should wait for queue to exist before running\n entrypoint:\n - sh\n - dev_start.sh # indicates script used to start-up desired functionality within image\n```\n\nOnce added to the compose, your transport will be brought up as a part of the docker-compose stack and be added to the stack's docker network.\n\n## Adding port information to the O.I.F.\n\nWhen a transport sends a command while residing on a [Docker Network](https://docs.docker.com/network/), the Docker Network will obscure the IP and Port such that it appears that the message came from the location of the Docker Stack and not the originating machine. To ensure that the response is able to return to the originating machine we need to send the IP and Port as a part of the headers. \n\nThe IP can be set in the O.I.F. Admin Page > Global Preferences > Orchestrator Host. The default value is `127.0.0.1`.\n\nThe Port for each transport needs to be set in the data fixtures file [orchestrator.json](../orchestrator/core/orc_server/data/fixtures/orchestrator.json). (See example at bottom of this page.) The port is specified under orchestrator.protocol under the \"fields\" section once you have added your transport to the list of orchestator.protocols. \n\n## Listening to the Internal Buffer\n\nThe Orchestrator and Device routes messages to the correct transport by using an internal AMQP broker. This buffer is a structure that is a part of the O.I.F. for routing messages to the correct locations, but NOT a part of OpenC2 itself. Note that the port does not appear in the docker-compose file, because although the image utilizes default port 5672 for AMQP, the port is not exposed. The [sb_utils](../modules/utils/sb_utils/amqp_tools.py) module has a Consumer wrapper available for use to easily implement for your transport. You can view an example [here](../orchestrator/transport/https/https/https_transport.py) which looks like this:\n\n```python\nfrom sb_utils import Consumer\n\n print(\"Connecting to RabbitMQ...\")\n try:\n consumer = Consumer(\n exchange=\"transport\",\n routing_key=\"https\",\n callbacks=[process_message])\n\n except Exception as error:\n print(error)\n consumer.shutdown()\n```\n\n### Listening to Orchestrator\n\nAs you can see the O.I.F. utilizes the convention of `exchange=\"transport\"` and `routing_key=transportProtocolName`. Upon receiving a message, the consumer triggers a callback method. It is within this callback in which you will execute your own transport methods in order to send the OpenC2 Command in the desired protocol.\n\n### Listening to the Actuator\n\nTo add the transport to the device side the process is the same, except it is listening and writes to the actuator (in the example of our demo actuators). \n\nThey listen on `exchange=\"actuator\"` and `routing_key=actuatorProfileName` (eg. OpenC2_ISR_Actuator_Profile) where the transports follow the same convention as on the orchestrator-side. When the device-transport receives a message from the transport on the orchestrator-side, it forwards it to the correct actuator by checking the actuator profile name and placing it onto the queue. After the actuator has processed the command and performed the desired action, it will send its response to the transport by sending it back to the orchestrator.\n\n## Responding to the Orchestrator\n\nTo send a response/error message back to the Orchestrator, you will instantiate a Producer which can also be found in [sb_utils](modules/utils/sb_utils/amqp_tools.py). You can find a response example [here](../transport/https/https/main.py) which looks like this:\n\n```python\nfrom sb_utils import Producer\n\n producer = Producer()\n producer.publish(message=data, header=headers, exchange=\"orchestrator\", routing_key=\"response\")\n```\n\n* `message`: The response or error message to be sent.\n* `header`: Any headers that were included in the response (such as CorrelationID for tracking the command).\n* `exchange` and `routing_key`: The queue on the broker in which the Orchestrator is listening to.\n\n## Utilizing and Formatting the Headers\n\n### In order to make sure that we route all messages properly, the O.I.F. sends *custom* headers to each of the transports.\n\n```json\n{\n \"source\": \n {\n \"orchestratorID\": \"600661ba-1977-420e-8c21-92aff67b900f\",\n \"transport\":\n {\n \"type\": \"HTTPS\", \n \"socket\": \"127.0.0.1:5000\"\n }, \n \"correlationID\": \"79472795-81e8-4d94-b229-bee114bc7a7f\", \n \"date\": \"Wed, 27 Feb 2019 16:12:23 UTC\"\n }, \n \"destination\": \n [{\n \"deviceID\": \"337917b5-9330-4107-94d2-5d7929019c23\", \n \"socket\": \"127.0.0.1:5001\", \n \"profile\": [\"openc2_isr_actuator_profile\"], \n \"encoding\": \"json\"\n }]\n}\n```\n\n`Source`: Information for the O.I.F. orchestrator to allow responses back to it. \n* `orchestratorID`: Relates to this specific orchestrator so that actuators and transports can send responses appropriately. \n* `transport`: The desired transport mechanism as well the location to reach it. \n* `correlationID`: Identifier for this specific command being sent. \n* `date`: Timestamp for the created message. \n\n`Destination`: This is a list of locations in which to send the commands. \n* `deviceID`: Identifier for the device that contains the desired actuator. \n* `socket`: Location of the device which is ready to receive the command. \n* `profile`: The actuator profile name. \n* `encoding`: Format in which the message is encoded to. \n\nFrom this information, you are able to build the headers for your transport as needed to follow existing transport specs as closely as possible.\n\n### Example OpenC2 Headers for HTTPS (constructed from the headers sent by the O.I.F.):\n\n```json\n{\n \"Host\": \"[email protected]:5001\", \n \"From\": \"[email protected]:5000\",\n \"Content-type\": \"application/openc2-cmd+json;version=1.0\",\n \"X-Correlation-ID\": \"79472795-81e8-4d94-b229-bee114bc7a7f\",\n}\n```\n\n* `Host`: Composed of the actuator profile name of the destination actuator + socket where the actuator is listening.\n* `From`: Composed of the OrchestratorID + socket where the orchestrator-side transport is listening.\n* `Content-type`: The content type of the message.\n* `X-Correlation-ID`: ID used to track the OpenC2 message with its response.\n\n## Making the transport usable in the O.I.F. GUI\n\nIn order to have the transport that you have created selectable in the \"Register Device\" section of the GUI you will need to add it to the [fixtures file](../orchestrator/core/orc_server/data/fixtures/orchestrator.json) and add it as an additional `orchestrator.protocol`. The other transports look like this:\n\n```json\n {\n \"model\": \"orchestrator.protocol\",\n \"pk\": 1,\n \"fields\": {\n \"name\": \"HTTPS\",\n \"port\": 5000\n }\n },\n {\n \"model\": \"orchestrator.protocol\",\n \"pk\": 2,\n \"fields\": {\n \"name\": \"MQTT\",\n \"pub_sub\": true,\n \"port\": 1883\n }\n }\n```\n\n* `pub_sub`: If the transport being added follows a publish/subscribe model and utilizes a broker (or similar) set `pub_sub` to true. This will allow multiple transports to have the same ip:port since they may be connecting to the same broker under different queue/exchange/topic/etc.\n* `pk` (private key): Increment this value to the next unused integer of model orchestrator.protocol\n* `name`: Set this value to the desired protocol name.\n* `port`: This is the port in which your transport will be listening on, and receiving responses from.\n\n" }, { "alpha_fraction": 0.5907522439956665, "alphanum_fraction": 0.591442346572876, "avg_line_length": 25.345455169677734, "blob_id": "4e49cf9cce05c5ba44dfaabcfe73410b151fd561", "content_id": "12b38071390f47cb66982d029ada7b2a3f93c86e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1449, "license_type": "permissive", "max_line_length": 70, "num_lines": 55, "path": "/orchestrator/core/orc_server/backup/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.apps import apps\nfrom django.http import JsonResponse\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\n\nexclude = {\n \"actuator\": (\"actuatorprofile\", ),\n \"admin\": (),\n \"auth\": (),\n \"contenttypes\": (),\n \"sessions\": (),\n \"rest_framework.authtoken\": (),\n}\n\nbackupModels = None\n\n\n@api_view(['GET'])\n@permission_classes((permissions.IsAdminUser,))\ndef backupRoot(request):\n \"\"\"\n API endpoint that lists available apps for backup\n \"\"\"\n global backupModels # pylint: disable=global-statement\n\n if backupModels is None:\n backupModels = {}\n for app in apps.get_app_configs():\n app_name = app.name.replace(\"django.contrib.\", \"\").lower()\n if app_name in exclude and len(exclude[app_name]) == 0:\n continue\n\n exclude_models = exclude.get(app_name, ())\n models = [m.__name__.lower() for m in app.get_models()]\n models = [m for m in models if m not in exclude_models]\n\n if models:\n backupModels[app_name] = models\n\n backupModels = {k: v for k, v in backupModels.items() if v}\n\n return JsonResponse({\n \"backupFormats\": [\n \"json\",\n \"xml\",\n \"yaml\"\n ],\n \"models\": backupModels\n })\n\n\n@api_view(['GET'])\n@permission_classes((permissions.IsAdminUser,))\ndef backupFile(request):\n return \"\"\n" }, { "alpha_fraction": 0.5486268401145935, "alphanum_fraction": 0.5512116551399231, "avg_line_length": 25.913043975830078, "blob_id": "e0bc05755cd56391c4dba236ac33db04725a7e53", "content_id": "687d5264c829e0a79e97bc9bebcf0fa136a5069c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3095, "license_type": "permissive", "max_line_length": 187, "num_lines": 115, "path": "/orchestrator/gui/client/src/components/admin/pages/users.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faCheck, faTimes } from '@fortawesome/free-solid-svg-icons'\n\nimport { UserModal } from '../lib'\n\nimport { RemotePageTable } from '../../utils'\n\nimport * as AccountActions from '../../../actions/account'\n\nconst str_fmt = require('string-format')\n\nclass Users extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Admin - Users'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.tableColumns = [\n {\n text: 'Active',\n dataField: 'is_active',\n sort: true,\n formatter: (cell, row) => ( <div className=\"d-flex justify-content-center\"><FontAwesomeIcon icon={ cell ? faCheck : faTimes } size=\"lg\" color={ cell ? \"green\" : \"red\" } /></div> )\n },{\n text: 'Username',\n dataField: 'username',\n sort: true\n },{\n text: 'First Name',\n dataField: 'first_name',\n sort: true\n },{\n text: 'Last Name',\n dataField: 'last_name',\n sort: true\n },{\n text: 'Email',\n dataField: 'email',\n sort: false\n }\n ]\n\n this.editOptions = {\n modal: UserModal,\n delete: this.props.deleteAccount\n }\n\n if (this.props.accounts.loaded === 0) {\n this.props.getAccounts()\n }\n\n this.props.getAccounts()\n }\n\n render() {\n return (\n <div className=\"row mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <div className=\"col-12\">\n <div className=\"col-12\">\n <UserModal register className=\"float-right mt-2\" />\n <h1>Users</h1>\n </div>\n\n <RemotePageTable\n keyField='username'\n dataKey='Account.accounts'\n dataGet={ this.props.getAccounts }\n columns={ this.tableColumns }\n editRows\n editOptions={ this.editOptions }\n defaultSort={[\n {\n dataField: 'username',\n order: 'desc'\n },{\n dataField: 'last_name',\n order: 'desc'\n },{\n dataField: 'first_name',\n order: 'desc'\n }\n ]}\n />\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title,\n accounts: {\n users: state.Account.accounts,\n loaded: state.Account.accounts.length,\n total: state.Account.count\n }\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n getAccounts: () => dispatch(AccountActions.getAccounts()),\n deleteAccount: (acnt) => dispatch(AccountActions.deleteAccount(acnt))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Users)\n" }, { "alpha_fraction": 0.6511565446853638, "alphanum_fraction": 0.6568047404289246, "avg_line_length": 27.38167953491211, "blob_id": "8a1dd3d586ccffd2067eb0dc92dabdaa8af20b25", "content_id": "2336e1ce94b4d44c3f2ce14f0f8dcf523a022d73", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3718, "license_type": "permissive", "max_line_length": 117, "num_lines": 131, "path": "/base/modules/utils/root/sb_utils/message/serialize.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nMessage Conversion functions\n\"\"\"\nimport base64\nimport bson\nimport cbor2\nimport edn_format\nimport json\nimport msgpack\nimport shutil\nimport toml\nimport ubjson\nimport yaml\n\nfrom typing import Union\n\n\nfrom . import (\n helpers,\n pybinn,\n pysmile\n)\nfrom .. import (\n ext_dicts,\n general\n)\n\n\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\noptionals = dict(\n encode={},\n decode={}\n)\n\nif shutil.which(\"json-to-vpack\") and shutil.which(\"vpack-to-json\"):\n optionals[\"encode\"][\"vpack\"] = helpers.vpack_encode\n optionals[\"decode\"][\"vpack\"] = helpers.vpack_decode\n\n\nserializations = ext_dicts.FrozenDict(\n encode=ext_dicts.FrozenDict(\n binn=pybinn.dumps,\n bencode=helpers.bencode_encode,\n bson=bson.dumps,\n cbor=cbor2.dumps,\n edn=edn_format.dumps,\n json=json.dumps,\n msgpack=lambda m: msgpack.packb(m, use_bin_type=True),\n s_expression=helpers.sp_encode,\n smile=pysmile.encode,\n toml=toml.dumps,\n xml=helpers.xml_encode,\n ubjson=ubjson.dumpb,\n yaml=lambda m: yaml.dump(m, Dumper=Dumper),\n **optionals[\"encode\"]\n ),\n decode=ext_dicts.FrozenDict(\n binn=pybinn.loads,\n bencode=helpers.bencode_decode,\n bson=bson.loads,\n cbor=cbor2.loads,\n edn=edn_format.loads,\n json=json.loads,\n msgpack=msgpack.unpackb,\n s_expression=helpers.sp_decode,\n smile=pysmile.decode,\n toml=toml.loads,\n xml=helpers.xml_decode,\n ubjson=ubjson.loadb,\n yaml=lambda m: yaml.load(m, Loader=Loader),\n **optionals[\"decode\"]\n )\n)\n\ndel optionals\n\n\ndef encode_msg(msg: dict, enc: str, raw: bool = False) -> Union[bytes, str]:\n \"\"\"\n Encode the given message using the serialization specified\n :param msg: message to encode\n :param enc: serialization to encode\n :param raw: message is in raw form (bytes/string) or safe string (base64 bytes as string)\n :return: encoded message\n \"\"\"\n enc = enc.lower()\n msg = general.default_encode(msg)\n\n if enc not in serializations.encode:\n raise ReferenceError(f\"Invalid encoding specified, must be one of {', '.join(serializations.encode.keys())}\")\n\n if not isinstance(msg, dict):\n raise TypeError(f\"Message is not expected type {dict}, got {type(msg)}\")\n\n if len(msg.keys()) == 0:\n raise KeyError(\"Message should have at minimum one key\")\n\n encoded = serializations[\"encode\"].get(enc, serializations.encode[\"json\"])(msg)\n if raw:\n return encoded\n return base64.b64encode(encoded).decode(\"utf-8\") if isinstance(encoded, bytes) else encoded\n\n\ndef decode_msg(msg: Union[bytes, str], enc: str, raw: bool = False) -> dict:\n \"\"\"\n Decode the given message using the serialization specified\n :param msg: message to decode\n :param enc: serialization to decode\n :param raw: message is in raw form (bytes/string) or safe string (base64 bytes as string)\n :return: decoded message\n \"\"\"\n enc = enc.lower()\n\n if isinstance(msg, dict):\n return msg\n\n if enc not in serializations.decode:\n raise ReferenceError(f\"Invalid encoding specified, must be one of {', '.join(serializations.decode.keys())}\")\n\n if not isinstance(msg, (bytes, bytearray, str)):\n raise TypeError(f\"Message is not expected type {bytes}/{bytearray}/{str}, got {type(msg)}\")\n\n if not raw and general.isBase64(msg):\n msg = base64.b64decode(msg if isinstance(msg, bytes) else msg.encode())\n\n msg = serializations[\"decode\"].get(enc, serializations.decode[\"json\"])(msg)\n return general.default_encode(msg, {bytes: bytes.decode})\n" }, { "alpha_fraction": 0.6762743592262268, "alphanum_fraction": 0.6786762475967407, "avg_line_length": 36.0990104675293, "blob_id": "fba5f8db84aee86c192858b6d325a25b2293bb37", "content_id": "da941fdfde91e9d733de47723c22585004cae4e6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3747, "license_type": "permissive", "max_line_length": 130, "num_lines": 101, "path": "/orchestrator/gui/client/src/actions/account.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for account API\nimport { RSAA } from 'redux-api-middleware'\nimport { withGUIAuth } from './util'\n\nconst str_fmt = require('string-format')\n\n// API Base URL\nconst baseAPI = '/api/account'\n\n// Helper Functions\n// None\n\n// API Calls\n// GET - /api/account/ - all users\nconst GET_ACCOUNTS_REQUEST = '@@account/GET_ACCOUNTS_REQUEST'\nexport const GET_ACCOUNTS_SUCCESS = '@@account/GET_ACCOUNTS_SUCCESS'\nexport const GET_ACCOUNTS_FAILURE = '@@account/GET_ACCOUNTS_FAILURE'\nexport const getAccounts = ({page=1, count=10, sort='name', refresh=false}={}) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}?page={page}&length={count}&ordering={sort}', {base: baseAPI, page: page, count: count, sort: sort}),\n method: 'GET',\n headers: withGUIAuth({'Content-Type': 'application/json'}),\n types: [\n GET_ACCOUNTS_REQUEST,\n {\n type: GET_ACCOUNTS_SUCCESS,\n meta: {\n sort: sort,\n refresh: refresh\n }\n }, GET_ACCOUNTS_FAILURE\n ]\n }\n})\n\n// POST - /api/account/ - create user (username, password, email, first_name, last_name, is_active, is_staff)\nconst CREATE_ACCOUNT_REQUEST = '@@account/CREATE_ACCOUNT_REQUEST'\nexport const CREATE_ACCOUNT_SUCCESS = '@@account/CREATE_ACCOUNT_SUCCESS'\nexport const CREATE_ACCOUNT_FAILURE = '@@account/CREATE_ACCOUNT_FAILURE'\nexport const createAccount = (user) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/', {base: baseAPI}),\n method: 'POST',\n headers: withGUIAuth({'Content-Type': 'application/json'}),\n body: JSON.stringify(user),\n types: [\n CREATE_ACCOUNT_REQUEST, CREATE_ACCOUNT_SUCCESS, CREATE_ACCOUNT_FAILURE\n ]\n }\n})\n\n// PATCH - /api/account/{username} - update specific user\nconst UPDATE_ACCOUNT_REQUEST = '@@account/UPDATE_ACCOUNT_REQUEST'\nexport const UPDATE_ACCOUNT_SUCCESS = '@@account/UPDATE_ACCOUNT_SUCCESS'\nexport const UPDATE_ACCOUNT_FAILURE = '@@account/UPDATE_ACCOUNT_FAILURE'\nexport const updateAccount = (username, user) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{username}/', {base: baseAPI, username: username}),\n method: 'PATCH',\n headers: withGUIAuth({'Content-Type': 'application/json'}),\n body: JSON.stringify(user),\n types: [\n UPDATE_ACCOUNT_REQUEST, UPDATE_ACCOUNT_SUCCESS, UPDATE_ACCOUNT_FAILURE\n ]\n }\n})\n\n// DELETE - /api/account/{username} - delete specific user\nconst DELETE_ACCOUNT_REQUEST = '@@account/DELETE_ACCOUNT_REQUEST'\nexport const DELETE_ACCOUNT_SUCCESS = '@@account/DELETE_ACCOUNT_SUCCESS'\nexport const DELETE_ACCOUNT_FAILURE = '@@account/DELETE_ACCOUNT_FAILURE'\nexport const deleteAccount = (username) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{username}/', {base: baseAPI, username: username}),\n method: 'DELETE',\n headers: withGUIAuth({'Content-Type': 'application/json'}),\n types: [\n DELETE_ACCOUNT_REQUEST, DELETE_ACCOUNT_SUCCESS, DELETE_ACCOUNT_FAILURE\n ]\n }\n})\n\n// POST - /api/account/{username}/change_password - change specific users password\nconst CHANGE_ACCOUNT_PASSWORD_REQUEST = '@@account/CHANGE_ACCOUNT_PASSWORD_REQUEST'\nexport const CHANGE_ACCOUNT_PASSWORD_SUCCESS = '@@account/CHANGE_ACCOUNT_PASSWORD_SUCCESS'\nexport const CHANGE_ACCOUNT_PASSWORD_FAILURE = '@@account/CHANGE_ACCOUNT_PASSWORD_FAILURE'\nexport const changeAccountPassword = (username, old_pass, new_pass1, new_pass2) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{username}/change_password/', {base: baseAPI, username: username}),\n method: 'POST',\n headers: withGUIAuth(),\n body: JSON.stringify({\n old_password: old_pass || '',\n new_password_1: new_pass1 || '',\n new_password_2: new_pass2 || ''\n }),\n types: [\n CHANGE_ACCOUNT_PASSWORD_REQUEST, CHANGE_ACCOUNT_PASSWORD_SUCCESS, CHANGE_ACCOUNT_PASSWORD_FAILURE\n ]\n }\n})\n" }, { "alpha_fraction": 0.6173688769340515, "alphanum_fraction": 0.62482088804245, "avg_line_length": 63.61111068725586, "blob_id": "253ddcb324606a7b23acd9b19dd1062a185bfc06", "content_id": "b09bdd815c7ab2b8c1dc3e0c86b9fdd7192614b3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3489, "license_type": "permissive", "max_line_length": 203, "num_lines": 54, "path": "/orchestrator/core/orc_server/tracking/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-04 18:39\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RequestLog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('requested_at', models.DateTimeField(db_index=True, default=django.utils.timezone.now, help_text='Time the initial request was received')),\n ('response_ms', models.PositiveIntegerField(default=0, help_text='Time it took to process the request in milliseconds')),\n ('path', models.CharField(db_index=True, help_text='URL path for the request', max_length=200)),\n ('view', models.CharField(blank=True, db_index=True, help_text='Method that was called to process the request', max_length=200, null=True)),\n ('view_method', models.CharField(blank=True, db_index=True, help_text='HTTP Method of the request', max_length=30, null=True)),\n ('remote_addr', models.GenericIPAddressField(blank=True, help_text='Remote IP Address of the system that made the requested', null=True)),\n ('host', models.URLField(blank=True, help_text='Host of the system that received the request', null=True)),\n ('method', models.CharField(help_text='HTTP Method of the request', max_length=10)),\n ('query_params', models.TextField(blank=True, help_text='Data received in the URL as Query Parameters', null=True)),\n ('data', models.TextField(blank=True, help_text='Data received in the Body/JSON of the request', null=True)),\n ('response', models.TextField(blank=True, help_text='Data sent back to the remote system', null=True)),\n ('errors', models.TextField(blank=True, help_text='Errors raised in processing the request', null=True)),\n ('status_code', models.PositiveIntegerField(blank=True, help_text='HTTP response status code', null=True)),\n ('user', models.ForeignKey(blank=True, help_text='User that requested the page', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Request Log',\n },\n ),\n migrations.CreateModel(\n name='EventLog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('occurred_at', models.DateTimeField(default=django.utils.timezone.now, help_text='Time the event occurred')),\n ('level', models.CharField(choices=[('D', 'Debug'), ('E', 'Error'), ('F', 'Fatal'), ('I', 'Info'), ('T', 'Trace'), ('W', 'Warn')], help_text='Level of severity the event', max_length=1)),\n ('message', models.TextField(blank=True, help_text='Event message', null=True)),\n ('user', models.ForeignKey(blank=True, help_text='User that caused the event', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Event Log',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5784140825271606, "alphanum_fraction": 0.5806167125701904, "avg_line_length": 32.13868713378906, "blob_id": "8abfb89410d3ff8b4b886528b66b9adc6e92b2d6", "content_id": "e8edea07e422eb1fbe77892f0def6b86abafb529", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4540, "license_type": "permissive", "max_line_length": 114, "num_lines": 137, "path": "/orchestrator/core/orc_server/es_mirror/utils/general.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import string\n\nfrom django.db.models import signals, Model\nfrom django.db.models.fields.related import ForeignKey, ManyToManyField\nfrom elasticsearch.exceptions import NotFoundError\nfrom elasticsearch_dsl import connections, Document, Field, Nested, Object\nfrom typing import (\n Dict,\n List,\n Tuple,\n Union\n)\n\n\nFIELDS = Union[str, Union[None, 'FIELDS']]\n_ignore_keys = [\n '__doc__',\n '__module__',\n '_state',\n 'password'\n]\n\n\ndef es_dict(model: Model, fields: FIELDS = None) -> Union[dict, None]:\n if not isinstance(model, Model):\n return model\n\n fields = fields or {}\n data = dict(\n _id=model.pk\n )\n opts = getattr(model, '_meta')\n model_fields = [*opts.concrete_fields, *opts.private_fields, *opts.many_to_many]\n for f in model_fields:\n if isinstance(f, ForeignKey):\n val = es_dict(getattr(model, f.name), fields.get(f.name))\n elif isinstance(f, ManyToManyField):\n val = [es_dict(mod, fields.get(f.name)) for mod in getattr(model, f.name).all()]\n else:\n val = getattr(model, f.name)\n data[f.name] = val\n data = {k: v for k, v in data.items() if k not in _ignore_keys}\n\n if fields:\n return {k: v for k, v in data.items() if k in fields}\n\n return data\n\n\ndef get_nestedFields(fields: List[Tuple[str, Field, bool]]) -> dict:\n nested_fields = {}\n for f_name, f_type, _ in fields:\n if isinstance(f_type, (Nested, Object)):\n nested_fields[f_name] = get_nestedFields(list(f_type._doc_class._ObjectBase__list_fields()))\n else:\n nested_fields[f_name] = None\n\n return nested_fields if nested_fields else None\n\n\nclass ElasticHooks:\n _mirror: bool\n _exists: set\n _models: Dict[Model, Document]\n _prefix: str\n\n def __init__(self, host: Union[list, str] = None, prefix: str = '', **kwargs) -> None:\n self._exists = set()\n self._mirror = False\n self._models = {}\n self._prefix = self._clean_string(prefix)\n\n if host is not None:\n self._mirror = True\n connections.configure(\n default=dict(\n hosts=host,\n timeout=kwargs.get('timeout', 60)\n )\n )\n\n def add_model(self, model: Model, doc: Document) -> None:\n # print(f\"{model.__name__} add -> {doc.Index.name}\")\n self._models[model] = doc\n # Listen to all model saves\n signals.post_save.connect(self.handle_save, sender=model)\n signals.post_delete.connect(self.handle_delete, sender=model)\n\n # Use to manage related objects update\n m2m = getattr(model, '_meta').many_to_many\n if m2m:\n for field in m2m:\n # print(f\"M2M thought - {field}\")\n signals.m2m_changed.connect(self.handle_m2m_changed, sender=getattr(model, field.attname).through)\n\n def handle_save(self, sender, instance=None, **kwargs):\n # print(f\"{sender.__name__} save\")\n if self._mirror:\n doc = self._check_mirror(sender)\n d = doc.model_init(instance)\n d.save(index=self._prefix_index(doc.Index.name))\n\n def handle_delete(self, sender, instance=None, **kwargs):\n # print(f\"{sender.__name__} delete\")\n if self._mirror:\n doc = self._check_mirror(sender)\n d = doc.model_init(instance)\n try:\n d.delete(index=self._prefix_index(doc.Index.name))\n except (NotFoundError, TypeError):\n pass\n\n def handle_m2m_changed(self, sender, instance, action, **kwargs):\n if action.startswith('post_') and self._mirror:\n # print(f\"{sender.__name__} m2m change - {action} - {instance}\")\n self.handle_save(instance.__class__, instance)\n\n # Helper functions\n def _check_mirror(self, model) -> Document:\n doc = self._models[model]\n\n # Create model index if not exists...\n if self._mirror:\n conn = connections.get_connection()\n if not conn.indices.exists(index=self._prefix_index(doc.Index.name)):\n doc.init(index=self._prefix_index(doc.Index.name))\n self._exists.add(model.__name__)\n\n return doc\n\n def _clean_string(self, s: str) -> str:\n cleaned = filter(lambda c: c in string.printable, s)\n return ''.join(cleaned).replace(\" \", \"-\")\n\n def _prefix_index(self, index: str) -> str:\n prefix = self._prefix.replace('/', '_')\n return f\"{prefix}_{index}\"\n" }, { "alpha_fraction": 0.5636856555938721, "alphanum_fraction": 0.5921409130096436, "avg_line_length": 31.086956024169922, "blob_id": "f653cee7610de0f41736cb19ce6216d5f12f1922", "content_id": "977d34a791e866fef34ef37da60136220183b51a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "permissive", "max_line_length": 178, "num_lines": 23, "path": "/orchestrator/core/orc_server/actuator/migrations/0002_auto_20190417_1319.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-17 13:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('actuator', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='actuator',\n name='schema_format',\n field=models.CharField(choices=[('jadn', 'JADN'), ('json', 'JSON')], default='jadn', help_text='Format of the schema (JADN|JSON), set from the schema', max_length=4),\n ),\n migrations.AlterField(\n model_name='actuator',\n name='profile',\n field=models.CharField(default='N/A', help_text='Profile of the actuator, set from the schema', max_length=60),\n ),\n ]\n" }, { "alpha_fraction": 0.6049469709396362, "alphanum_fraction": 0.6137809157371521, "avg_line_length": 24.963302612304688, "blob_id": "f5d4e6239cfe9be5cf78905fced84de0b411ac09", "content_id": "0a775afd0ed325ac7ac31ac3bd91fc3498c518e4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2830, "license_type": "permissive", "max_line_length": 79, "num_lines": 109, "path": "/orchestrator/gui/server/gui_server/orchestrator/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport uuid\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nfrom rest_framework import serializers\n\nPROTO_CHOICES = (\n (\"http\", \"HTTP\"),\n (\"https\", \"HTTPS\"),\n (\"ws\", \"WebSocket\"),\n (\"wss\", \"WebSocket Secure\")\n)\n\n\nclass Orchestrator(models.Model):\n orc_id = models.UUIDField(\n default=uuid.uuid4,\n help_text=\"Unique ID of the transport\",\n unique=True\n )\n name = models.CharField(\n default=\"Orchestrator\",\n max_length=30,\n unique=True\n )\n host = models.CharField(\n default=\"127.0.0.1\",\n help_text=\"Hostname/IP of the orchestrator\",\n max_length=60\n )\n port = models.IntegerField(\n default=8080,\n help_text=\"Port of the device\",\n validators=[\n MinValueValidator(1),\n MaxValueValidator(65535)\n ]\n )\n proto = models.CharField(\n choices=PROTO_CHOICES,\n default=\"http\",\n help_text=\"Protocol supported by the device\",\n max_length=5\n )\n\n class Meta:\n verbose_name = \"Orchestrator\"\n unique_together = ((\"host\", \"port\"),)\n\n def __str__(self):\n return f\"Orchestrator - {self.host}:{self.port}\"\n\n\nclass OrchestratorAuth(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=False,\n help_text=\"User the saved token is associated\",\n on_delete=models.CASCADE,\n null=False\n )\n orchestrator = models.ForeignKey(\n Orchestrator,\n blank=False,\n help_text=\"Orchestrator the saved token is associated\",\n on_delete=models.CASCADE,\n null=False,\n )\n token = models.CharField(\n help_text=\"Users authentication token for the associated orchestrator\",\n max_length=50,\n unique=True\n )\n\n class Meta:\n verbose_name = \"Orchestrator Auth\"\n unique_together = ((\"user\", \"orchestrator\"),)\n\n def __str__(self):\n return f\"Event - {self.user} - {self.orchestrator}\"\n\n\nclass OrchestratorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Orchestrator\n fields = (\"orc_id\", \"name\", \"proto\", \"host\", \"port\")\n\n\nclass OrchestratorAuthSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(\n allow_null=True,\n queryset=get_user_model().objects.all(),\n slug_field=\"username\"\n )\n orchestrator = serializers.SlugRelatedField(\n allow_null=True,\n queryset=Orchestrator.objects.all(),\n slug_field=\"orc_id\"\n )\n\n class Meta:\n model = OrchestratorAuth\n fields = (\"user\", \"orchestrator\", \"token\")\n" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6673427820205688, "avg_line_length": 28, "blob_id": "1e0265e4e8fc5bc59e217f8a910b54b5c50f00b4", "content_id": "7704d560f7fcbe0c05d2b8e1b10e3879873d2a88", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "permissive", "max_line_length": 107, "num_lines": 34, "path": "/orchestrator/gui/server/gui_server/utils/model.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n\ndef get_or_none(model, *args, **kwargs):\n tmp_qry = model.objects.filter(*args, **kwargs)\n\n if len(tmp_qry) == 0:\n return None\n elif len(tmp_qry) == 1:\n return tmp_qry.first()\n else:\n return tmp_qry\n\n\nclass ReadOnlyModelAdmin(admin.ModelAdmin):\n \"\"\"\n ModelAdmin class that prevents modifications through the admin.\n The changelist and the detail view work, but a 403 is returned if one actually tries to edit an object.\n Source: https://gist.github.com/aaugustin/1388243\n \"\"\"\n actions = None\n\n def get_readonly_fields(self, request, obj=None):\n return self.fields or [f.name for f in self.model._meta.fields]\n\n def has_add_permission(self, request):\n return False\n\n # Allow viewing objects but not actually changing them.\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n" }, { "alpha_fraction": 0.6072953343391418, "alphanum_fraction": 0.6096236109733582, "avg_line_length": 23.074766159057617, "blob_id": "93deebc2251216393c165d12090e63393ca1edaa", "content_id": "a6d6eaa4e463afee0cf8874c8a9df3869998291a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2577, "license_type": "permissive", "max_line_length": 107, "num_lines": 107, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport JSONPretty from 'react-json-pretty'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label\n} from 'reactstrap'\n\nimport ArrayOf from './arrayOf'\nimport Basic from './basicField'\nimport Choice from './choice'\nimport Enumerated from './enumerated'\nimport Map from './map'\nimport Record from './record'\n\nimport {\n isOptional_jadn,\n keys,\n opts2arr,\n zip\n} from '../'\n\nimport { safeGet } from '../../../../../utils'\n\n\nclass Field extends Component {\n constructor(props, context) {\n super(props, context)\n this.schema_types = safeGet(safeGet(this.props, \"schema\", {}), \"types\", [])\n this.ignore_fields = [\"Enumerated\"]\n }\n\n shouldComponentUpdate(nextProps, nextState) {\n let props_update = this.props != nextProps\n let state_update = this.state != nextState\n\n if (props_update) {\n this.schema_types = safeGet(safeGet(this.props, 'schema', {}), 'types', [])\n }\n return props_update || state_update\n }\n\n buildField(def, key=null) {\n let fieldArgs = {\n key: key,\n name: def.name,\n parent: this.props.parent || '',\n def: def,\n optChange: (k, v) => this.props.optChange(k, v, this.props.idx)\n }\n\n if (def.hasOwnProperty(\"id\")) {\n let typeDef = this.schema_types.filter((type) => type[0] == def.type )\n if (typeDef.length === 1) {\n fieldArgs.def = zip(keys.Structure, typeDef[0])\n fieldArgs.def.desc = def.desc\n }\n }\n\n switch(fieldArgs.def.type) {\n case 'Array':\n return <FormText key={ key } >Array: { fieldArgs.name }</FormText>\n case 'ArrayOf':\n return <ArrayOf { ...fieldArgs } />\n case 'Choice':\n return <Choice { ...fieldArgs } />\n case 'Enumerated':\n return <Enumerated { ...fieldArgs } />\n case 'Map':\n return <Map { ...fieldArgs } />\n case 'Record':\n return <Record { ...fieldArgs } />\n default:\n return <Basic { ...fieldArgs } />\n }\n }\n\n render() {\n if (this.props.def.hasOwnProperty(\"fields\") && this.ignore_fields.indexOf(this.props.def.type) == -1) {\n return this.props.def.fields.map((def, i) => this.buildField(zip(keys.Gen_Def, def), i))\n } else {\n return this.buildField(this.props.def)\n }\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nconst connectedField = connect(mapStateToProps)(Field)\n\nexport {\n connectedField as default,\n connectedField as Field,\n isOptional_jadn,\n keys,\n opts2arr,\n zip,\n Map\n}\n\n" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.641791045665741, "avg_line_length": 18.14285659790039, "blob_id": "2e0f97e550d9646a10aefef56b30d03f93bd070e", "content_id": "83487df8c0d9859a56fe2219c40f64a2f9cf6654", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "permissive", "max_line_length": 61, "num_lines": 7, "path": "/base/modules/utils/root/sb_utils/message/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .serialize import decode_msg, encode_msg, serializations\n\n__all__ = [\n 'decode_msg',\n 'encode_msg',\n 'serializations'\n]\n" }, { "alpha_fraction": 0.7122905254364014, "alphanum_fraction": 0.7122905254364014, "avg_line_length": 27.639999389648438, "blob_id": "5169a661b0a4b5f856d632b5a042c7772f3116fc", "content_id": "281497e3a02b4a3b0ace0a0bee4d78b45f927825", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 716, "license_type": "permissive", "max_line_length": 91, "num_lines": 25, "path": "/logger/gui/src/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport ReactDOM from 'react-dom';\nimport { HelmetProvider } from 'react-helmet-async';\n\n// Styles\nimport 'bootstrap';\nimport { ThemeSwitcher } from './components/utils';\nimport './components/dependencies/css/searchkit.less';\n\nimport App from './app';\nimport registerServiceWorker from './registerServiceWorker';\n\n// Theme Options\nconst validThemes = ['cyborg', 'darkly', 'lumen', 'slate', 'solar', 'superhero'];\n\nconst Root = () => (\n <HelmetProvider>\n <ThemeSwitcher storeThemeKey=\"theme\" defaultTheme=\"lumen\" themeOptions={ validThemes }>\n <App />\n </ThemeSwitcher>\n </HelmetProvider>\n);\n\nReactDOM.render(<Root />, document.getElementById('root'));\nregisterServiceWorker();\n" }, { "alpha_fraction": 0.5807722806930542, "alphanum_fraction": 0.5823482871055603, "avg_line_length": 24.389999389648438, "blob_id": "f1853ba958534cfac04935345772188883ecdb43", "content_id": "ed25aa23530c81393a2ed895417a5da9cf706df2", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2538, "license_type": "permissive", "max_line_length": 131, "num_lines": 100, "path": "/orchestrator/gui/client/src/components/auth/login.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Redirect } from 'react-router-dom'\nimport { toast } from 'react-toastify'\nimport {\n Alert,\n Button,\n Form,\n FormGroup,\n FormFeedback,\n Input,\n Jumbotron,\n Label,\n} from 'reactstrap'\n\nimport { InputField } from '../utils'\n\nimport * as AuthActions from '../../actions/auth'\n\nclass Login extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.errPopup = null\n\n this.state = {\n username: '',\n password: ''\n }\n }\n\n handleInputChange(event) {\n const target = event.target\n const value = target.type === 'checkbox' ? target.checked : target.value\n const name = target.name\n\n this.setState({\n [name]: value\n });\n }\n\n onSubmit(event) {\n event.preventDefault()\n this.props.onSubmit(this.state.username, this.state.password)\n }\n\n render() {\n let { from } = this.props.location.state || { from: { pathname: '/' } }\n\n if (this.props.isAuthenticated) {\n return (\n <Redirect to={ from } />\n )\n } else {\n const errors = this.props.authErrors || {}\n if (errors.non_field_errors && !this.errPopup) {\n this.errPopup = toast(<p>{ errors.non_field_errors }</p>, {type: toast.TYPE.INFO, onClose: () => { this.errPopup = null }})\n }\n\n return (\n <Jumbotron className=\"col-md-4 col-lg-3 mx-auto\">\n <Form onSubmit={ this.onSubmit.bind(this) }>\n <h1>Login</h1>\n\n <InputField\n name=\"username\"\n label=\"Username\"\n error={ errors.username }\n onChange={ this.handleInputChange.bind(this) }\n />\n\n <InputField\n name=\"password\"\n label=\"Password\"\n error={ errors.password }\n type=\"password\"\n onChange={ this.handleInputChange.bind(this) }\n />\n\n <div className=\"float-right btn-group\" role=\"group\">\n <Button type=\"submit\" color=\"primary\">Log In</Button>\n <Button type=\"reset\" color=\"warning\">Reset</Button>\n </div>\n </Form>\n </Jumbotron>\n )\n }\n }\n}\n\nconst mapStateToProps = (state) => ({\n authErrors: state.Auth.errors,\n isAuthenticated: AuthActions.isAuthenticated(state.Auth)\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n onSubmit: (username, password) => dispatch(AuthActions.login(username, password))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Login)" }, { "alpha_fraction": 0.8916256427764893, "alphanum_fraction": 0.8916256427764893, "avg_line_length": 16, "blob_id": "b526dd360f320de49b3d6709b258f6b476c0b2c7", "content_id": "3ef6c752f1139b715a8a2bc38b0259a2b5965614", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 203, "license_type": "permissive", "max_line_length": 30, "num_lines": 12, "path": "/orchestrator/gui/server/requirements.txt", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "bleach\nchannels\ndaphne\ndjango\ndjango-cors-headers\ndjangorestframework\ndjangorestframework-datatables\ndjangorestframework-jwt\ndjango-rest-swagger\nmysql-connector-python\nsimple-rest-client\nwebsocket_client" }, { "alpha_fraction": 0.5707762837409973, "alphanum_fraction": 0.6073059439659119, "avg_line_length": 22.105262756347656, "blob_id": "d4690fc65dfd48e7529727427b8e064957f70f54", "content_id": "0722d65c7f2663eb0cf17404a580d26eafc5ccd5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 438, "license_type": "permissive", "max_line_length": 70, "num_lines": 19, "path": "/orchestrator/gui/client/src/components/utils/array.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Array utility functions\n\nexport const mergeByProperty = (arr1, arr2, prop) => {\n let tmp = [ ...(arr1 || []) ]\n\n arr2.forEach(arr2obj => {\n let obj = tmp.find(arr1obj => arr1obj[prop] === arr2obj[prop])\n obj ? Array.prototype.push.apply(obj, arr2obj) : tmp.push(arr2obj)\n })\n return tmp\n}\n\nexport const updateArray = (arr1={}, arr2={}) => {\n Object.keys(arr2).forEach(key => {\n arr1[key] = arr2[key]\n })\n\n return arr1\n}" }, { "alpha_fraction": 0.6828086972236633, "alphanum_fraction": 0.6828086972236633, "avg_line_length": 23.294116973876953, "blob_id": "ffac65447a27a92582a44006a06182aa9afccb69", "content_id": "9b70fa7619a8c12730401d1957579b0b42147d7c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "permissive", "max_line_length": 54, "num_lines": 17, "path": "/orchestrator/transport/mqtt/MQTT/mqtt_transport.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# mqtt_transport.py\n\nfrom sb_utils import Consumer\nfrom callbacks import Callbacks\n\n# Begin consuming messages from internal message queue\nprint(\"Connecting to RabbitMQ...\")\ntry:\n consumer = Consumer(\n exchange=\"transport\",\n routing_key=\"mqtt\",\n callbacks=[Callbacks.send_mqtt],\n debug=True\n )\nexcept Exception as err:\n print(f\"Consumer Error: {err}\")\n consumer.shutdown()\n" }, { "alpha_fraction": 0.5334405303001404, "alphanum_fraction": 0.535048246383667, "avg_line_length": 28.619047164916992, "blob_id": "4eebcdbd096b1a340fe95da36da02a835cef770a", "content_id": "f871044ce988bc8cae029493899594d336ae13f0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3110, "license_type": "permissive", "max_line_length": 105, "num_lines": 105, "path": "/orchestrator/core/orc_server/backup/utils/xls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import json\nimport re\n\nfrom collections import OrderedDict\nfrom io import BytesIO as StringIO\nfrom pyexcel_xls import get_data, save_data\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import BaseParser\nfrom rest_framework.renderers import BaseRenderer\n\n\ndef simpleType(obj):\n if isinstance(obj, (str, float, int)):\n return obj\n\n try:\n return json.dumps(obj)\n except TypeError:\n return str(obj)\n\n\nclass XLSParser(BaseParser):\n media_type = 'application/vnd.ms-excel'\n\n def parse(self, stream, media_type=None, parser_context=None):\n \"\"\"\n Parses the incoming bytestream as XLS and return resulting data\n \"\"\"\n stream_data = dict(get_data(stream))\n sheet = list(stream_data.keys())[0] if len(stream_data) == 1 else None\n\n if sheet is None or sheet != 'Data':\n raise ParseError('XLS parse error - spreadsheet should contain one sheet named `Data`')\n\n stream_data = stream_data[sheet]\n headers = stream_data[0]\n data = []\n\n try:\n for row in stream_data[1:]:\n row = {k: self._json_loads(v) for k, v in zip(headers, row)}\n row.update({key: \"\" for key in set(headers).difference(set(row.keys()))})\n data.append(row)\n\n except ValueError as e:\n raise ParseError(f\"XLS parse error - invalid data in spreadsheet {getattr(e, 'message', e)}\")\n\n return data\n\n def _json_loads(self, val):\n \"\"\"\n Attempt to load the value as json\n \"\"\"\n try:\n return json.loads(val)\n except ValueError:\n if isinstance(val, str):\n if re.match(r\"^[\\[{].*[\\]}]\", val):\n raise ParseError(f\"XLS parse error - data appears to be JSON, cannot load\")\n return val\n\n\nclass XLSRenderer(BaseRenderer):\n media_type = 'application/vnd.ms-excel'\n format = 'xls'\n charset = None\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n \"\"\"\n Render `data` into XLS\n \"\"\"\n xls_file = StringIO()\n xls_data = OrderedDict()\n\n if data is None:\n xls_data.update({\"Data\": [[\"No Data\"]]})\n\n else:\n try:\n data = dict(data)\n # TODO: change to more specific exceptions\n except Exception: # pylint: disable=broad-except\n data = list(data)\n\n if isinstance(data, list):\n headers = list(dict(data[0]).keys())\n rows = []\n\n for row in data:\n row = dict(row)\n rows.append([simpleType(row[c]) for c in headers])\n\n xls_data.update({\n \"Data\": [\n headers,\n *rows\n ]\n })\n else:\n xls_data.update({\n \"Data\": [[k, simpleType(v)] for k, v in data.items()]\n })\n\n save_data(xls_file, xls_data)\n return xls_file.getvalue()\n" }, { "alpha_fraction": 0.5558176040649414, "alphanum_fraction": 0.5735062956809998, "avg_line_length": 24.450000762939453, "blob_id": "cbc1cb932cab138e0d70d396879edf669a829b23", "content_id": "71978a813ddb311a8b2fc44764def50b27ba6f0a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2544, "license_type": "permissive", "max_line_length": 46, "num_lines": 100, "path": "/logger/gui/config/eslint_rules/import_rules.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Import Rules\n\nmodule.exports = {\n 'import/default': 0,\n 'import/dynamic-import-chunkname': [0, {\n importFunctions: [],\n webpackChunknameFormat: '[0-9a-zA-Z-_/.]+'\n }],\n 'import/export': 2,\n 'import/exports-last': 0,\n 'import/extensions': [2, 'ignorePackages', {\n js: 'never',\n jsx: 'never',\n mjs: 'never'\n }],\n 'import/first': 2,\n 'import/group-exports': 0,\n 'import/imports-first': 0,\n 'import/max-dependencies': [0, {\n max: 10\n }],\n 'import/named': 2,\n 'import/namespace': 0,\n 'import/newline-after-import': 2,\n 'import/no-absolute-path': 2,\n 'import/no-amd': 2,\n 'import/no-anonymous-default-export': [0, {\n allowAnonymousClass: false,\n allowAnonymousFunction: false,\n allowArray: false,\n allowArrowFunction: false,\n allowLiteral: false,\n allowObject: false\n }],\n 'import/no-commonjs': 0,\n 'import/no-cycle': [2, {\n // maxDepth: null\n }],\n 'import/no-default-export': 0,\n 'import/no-deprecated': 0,\n 'import/no-duplicates': 2,\n 'import/no-dynamic-require': 2,\n 'import/no-extraneous-dependencies': [0, {\n devDependencies: [\n 'test/**',\n 'tests/**',\n 'spec/**',\n '**/__tests__/**',\n '**/__mocks__/**',\n 'test.{js,jsx}',\n 'test-*.{js,jsx}',\n '**/*{.,_}{test,spec}.{js,jsx}',\n '**/jest.config.js',\n '**/jest.setup.js',\n '**/vue.config.js',\n '**/webpack.config.js',\n '**/webpack.config.*.js',\n '**/rollup.config.js',\n '**/rollup.config.*.js',\n '**/gulpfile.js',\n '**/gulpfile.*.js',\n '**/Gruntfile{,.js}',\n '**/protractor.conf.js',\n '**/protractor.conf.*.js'\n ],\n optionalDependencies: false\n }],\n 'import/no-internal-modules': [0, {\n allow: []\n }],\n 'import/no-mutable-exports': 2,\n 'import/no-named-as-default': 2,\n 'import/no-named-as-default-member': 2,\n 'import/no-named-default': 2,\n 'import/no-named-export': 0,\n 'import/no-namespace': 0,\n 'import/no-nodejs-modules': 0,\n 'import/no-relative-parent-imports': 0,\n 'import/no-restricted-paths': 0,\n 'import/no-self-import': 2,\n 'import/no-unassigned-import': 0,\n 'import/no-unresolved': [2, {\n caseSensitive: true,\n commonjs: true\n }],\n 'import/no-unused-modules': [0, {\n ignoreExports: [],\n missingExports: true,\n unusedExports: true\n }],\n 'import/no-useless-path-segments': 2,\n 'import/no-webpack-loader-syntax': 2,\n 'import/order': [2, {\n groups: [\n ['builtin', 'external', 'internal']\n ]\n }],\n 'import/prefer-default-export': 0,\n 'import/unambiguous': 0\n}" }, { "alpha_fraction": 0.7617449760437012, "alphanum_fraction": 0.7617449760437012, "avg_line_length": 23.83333396911621, "blob_id": "2879dab51c04579dddda2612296b9388921eba9f", "content_id": "0fa85af8e22848199d7ea3ccadf03f5abb77bec3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "permissive", "max_line_length": 65, "num_lines": 12, "path": "/orchestrator/core/orc_server/conformance/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register('unittest', views.UnitTests, basename='unittest')\nrouter.register('test', views.ConformanceViewSet)\n\nurlpatterns = [\n path('', include(router.urls))\n]\n" }, { "alpha_fraction": 0.6631473898887634, "alphanum_fraction": 0.6645418405532837, "avg_line_length": 33.620689392089844, "blob_id": "22bb25641145a2f15e5c24f69e1a2c59f249234e", "content_id": "f7aa212fa330a6d5b09e80d7acb0ea6adac06114", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5020, "license_type": "permissive", "max_line_length": 106, "num_lines": 145, "path": "/orchestrator/core/orc_server/conformance/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import threading\nimport unittest\nimport uuid\n\nfrom io import StringIO\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import viewsets, filters\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n# Local imports\nfrom actuator.models import Actuator, ActuatorSerializerReadOnly\nfrom utils import FrozenDict\nfrom ..models import ConformanceTest, ConformanceTestSerializer\nfrom ..tests import get_tests, load_test_suite, tests_in_suite, TestResults\n\n\ndef test_thread(test_suite, db_test):\n test_log = StringIO()\n results = unittest.TextTestRunner(\n stream=test_log,\n failfast=False,\n resultclass=TestResults\n ).run(test_suite)\n\n db_test.test_results = results.getReport(verbose=True)\n db_test.save()\n\n\ndef toFrozen(o) -> FrozenDict:\n if isinstance(o, dict):\n return FrozenDict({k: toFrozen(v) for k, v in o.items()})\n if isinstance(o, list):\n return tuple(map(toFrozen, o))\n return o\n\n\nclass ConformanceViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_class = ConformanceTestSerializer\n lookup_field = 'test_id'\n\n permissions = {\n 'retrieve': (IsAuthenticated,),\n }\n\n queryset = ConformanceTest.objects.order_by('-test_time')\n filter_backends = (filters.OrderingFilter,)\n ordering_fields = ('test_id', 'actuator_tested', 'test_time', 'tests_run', 'test_results')\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all conformance tests that the user has executed, all tests if admin\n \"\"\"\n self.pagination_class.page_size_query_param = 'length'\n self.pagination_class.max_page_size = 100\n queryset = self.filter_queryset(self.get_queryset())\n\n # if not request.user.is_staff: # Standard User\n # queryset = queryset.filter(user=request.user)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific conformance test that the user has executed, any test if admin\n \"\"\"\n command = self.get_object()\n\n # if not request.user.is_staff: # Standard User\n # if command.user is not request.user:\n # raise PermissionDenied(detail='User not authorised to access command', code=401)\n\n serializer = self.get_serializer(command)\n return Response(serializer.data)\n\n\nclass UnitTests(viewsets.ViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_class = None # ConformanceTestSerializer\n lookup_field = 'profile'\n\n permissions = {\n 'create': (IsAuthenticated,),\n 'retrieve': (IsAuthenticated,),\n }\n\n # Custom attributes\n unittest_Suite = load_test_suite()\n loaded_tests = tests_in_suite(unittest_Suite)\n\n # Override methods\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n # View methods\n def create(self, request, *args, **kwargs):\n \"\"\"\n Create and run a new conformance test based on the given tests\n \"\"\"\n actuator = request.data.get('actuator', None)\n try:\n uuid.UUID(actuator, version=4)\n actuator = Actuator.objects.get(actuator_id=actuator)\n except (ObjectDoesNotExist, ValueError):\n raise NotFound(f\"Actuator uuid not valid/found\")\n\n act = toFrozen(ActuatorSerializerReadOnly(actuator).data)\n testSuite = get_tests(self.unittest_Suite, request.data.get('tests', {}), actuator=act)\n test = ConformanceTest(\n actuator_tested=actuator,\n tests_run=tests_in_suite(testSuite)\n )\n test.save()\n threading.Thread(target=test_thread, args=(testSuite, test)).start()\n\n return Response(ConformanceTestSerializer(test).data)\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all conformance tests that the user has executed, all tests if admin\n \"\"\"\n return Response(tests_in_suite(self.unittest_Suite))\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return specific conformance tests given the profile specified\n \"\"\"\n return Response(self.loaded_tests.get(kwargs['profile'], {}))\n" }, { "alpha_fraction": 0.593794047832489, "alphanum_fraction": 0.5966149568557739, "avg_line_length": 29.17021369934082, "blob_id": "9dc80caa91cd54660cef276c0d0c9245b1058cad", "content_id": "311f0670324551e6b500616ecfa9dcff9ecd75e8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1418, "license_type": "permissive", "max_line_length": 119, "num_lines": 47, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/enumerated.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n FormGroup,\n FormText,\n Label,\n Input\n} from 'reactstrap';\n\nimport { isOptional_jadn } from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\nclass EnumeratedField extends Component {\n render() {\n let name = this.props.name || this.props.def.name\n let msgName = (this.props.parent ? [this.props.parent, name] : [name]).join('.')\n let def_opts = this.props.def.fields.map(opt => <option key={ opt[0] } data-subtext={ opt[2] }>{ opt[1] }</option>)\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (isOptional_jadn(this.props.def) ? '' : '*') + name }</legend>\n { this.props.def.desc != '' ? <FormText color=\"muted\">{ this.props.def.desc }</FormText> : '' }\n <Input\n type=\"select\"\n name={ name }\n title={ name }\n className=\"selectpicker\"\n onChange={ e => this.props.optChange(msgName, e.target.value) }\n >\n <option data-subtext={ name + ' options' } value='' >{ name + ' options' }</option>\n { def_opts }\n </Input>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(EnumeratedField)\n" }, { "alpha_fraction": 0.613007664680481, "alphanum_fraction": 0.6141847968101501, "avg_line_length": 35.1489372253418, "blob_id": "29f4b2fef4341b19c860320615b0d64beb4b5ca4", "content_id": "14d85cd76a0157e39bc8c27d4ac16f1c1ed79e4a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3398, "license_type": "permissive", "max_line_length": 163, "num_lines": 94, "path": "/logger/gui/internals/scripts/DownloadBootswatch.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env node\nconst path = require('path');\nconst fs = require('fs-extra');\nconst NamedRegExp = require('named-regexp-groups');\nconst download = require('download-file');\nconst request = require('sync-request');\nconst csso = require('csso');\n\nconst ROOT_DIR = path.join(__dirname, '..', '..', 'src', 'components', 'utils', 'theme-switcher');\nconst CHECK_DIRS = ['assets', 'assets/css', 'assets/fonts'];\n\nconst THEME_API = 'https://bootswatch.com/api/4.json';\nconst THEME_FONT_DIR = '/assets/';\nconst THEME_FONT_URL = '../assets/';\n\nconst CSS_URL_IMPORT = new NamedRegExp(/^@import url\\([\"'](:<url>.*?)[\"']\\);\\s*?$/);\nconst FILE_URL_IMPORT = new NamedRegExp(/\\s*?src:( local\\(.*?\\),)? local\\(['\"](:<name>.*?)['\"]\\), url\\(['\"]?(:<url>.*?)['\"]?\\) format\\(['\"](:<format>.*?)['\"]\\);/);\nconst URL_REPLACE = new NamedRegExp(/url\\([\"\"]?(:<url>.*?)[\"\"]?\\)/);\n\nCHECK_DIRS.forEach(d => {\n const dir = path.join(ROOT_DIR, d);\n if (!fs.pathExistsSync(dir)) {\n fs.mkdirSync(dir);\n }\n});\n\nlet BootswatchThemes = request('GET', THEME_API);\nBootswatchThemes = JSON.parse(BootswatchThemes.getBody('utf8'));\nconst themeNames = [];\n\nBootswatchThemes.themes.forEach(theme => {\n console.log(`Downloading Theme: ${theme.name}`);\n const themeName = theme.name.toLowerCase();\n themeNames.push(themeName);\n\n let preProcessCss = [];\n const css = request('GET', theme.css).getBody('utf8');\n\n css.split(/\\n\\r?/gm).forEach(line => {\n if (line.startsWith('@import url(')) {\n const cssImportURL = line.replace(CSS_URL_IMPORT, '$+{url}');\n const cssImport = request('GET', cssImportURL).getBody('utf8');\n\n preProcessCss.push(`/* ${line} */`);\n preProcessCss = preProcessCss.concat(cssImport.split(/\\n\\r?/g));\n } else {\n preProcessCss.push(line);\n }\n });\n\n // set imports to local & download files\n const postProcessCss = preProcessCss.map(line => {\n let processedLine = line;\n if (line.match(/\\s*?src:.*url\\([\"']?https?:\\/\\/.*/) && !line.startsWith('/*')) {\n const src = FILE_URL_IMPORT.exec(line).groups;\n const ext = path.extname(src.url);\n const fileName = `fonts/${src.name}${ext}`;\n\n if (!fs.existsSync(path.join(ROOT_DIR, THEME_FONT_DIR, fileName))) {\n const opts = {\n directory: path.join(ROOT_DIR, THEME_FONT_DIR, 'fonts'),\n filename: src.name + ext\n };\n download(src.url, opts, err => {\n if (err) throw err;\n console.log(`Downloaded reference: ${opts.filename}`);\n });\n }\n processedLine = processedLine.replace(URL_REPLACE, `url('${THEME_FONT_URL}${fileName}')`);\n }\n\n processedLine = processedLine.replace(/\\\\[^\\\\]/g, '\\\\\\\\');\n processedLine = processedLine.replace(/^\\s+\\*/, '*');\n processedLine = processedLine.replace(/^\\s+/, '\\t');\n return processedLine;\n });\n\n const themeFile = fs.createWriteStream(path.join(ROOT_DIR, 'assets', 'css', `${themeName}.css`), {flags: 'w'});\n\n const minStyles = csso.minify(postProcessCss.join(''), {\n comments: false,\n restructure: true,\n sourceMap: false\n }).css;\n\n themeFile.write(minStyles);\n themeFile.end();\n});\n\n// make theme index file\nconst themeIndexFile = fs.createWriteStream(path.join(ROOT_DIR, 'themes.js'), {flags: 'w'});\nthemeIndexFile.write(`const validThemes = [\\n '${themeNames.join(\"',\\n '\")}'\\n];\\n`);\nthemeIndexFile.write('export default validThemes;');\nthemeIndexFile.end();\n" }, { "alpha_fraction": 0.5361841917037964, "alphanum_fraction": 0.5460526347160339, "avg_line_length": 15.88888931274414, "blob_id": "3a478a5660e814aad44c77016dc3518542e91af9", "content_id": "8b943d1e632778c5d7d5dfbb19feacca98f087e1", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "permissive", "max_line_length": 57, "num_lines": 18, "path": "/base/modules/utils/root/setup.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nversion = dict(\n major=0,\n minor=1,\n bugfix=0\n)\n\nsetup(\n name='ScreamingBunny Utils',\n version='{major}.{minor}.{bugfix}'.format(**version),\n package_data={\n 'SB_Utils': [\n './sb_utils/*',\n ]\n },\n include_package_data=True\n)\n" }, { "alpha_fraction": 0.5406976938247681, "alphanum_fraction": 0.542151153087616, "avg_line_length": 19.235294342041016, "blob_id": "ebd33cb669c35364398ccef5d5760613e9c38dc4", "content_id": "6fa08556efbd16fc06b01c874f586b74be94e7da", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "permissive", "max_line_length": 69, "num_lines": 34, "path": "/orchestrator/gui/server/gui_server/webApp/apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom django.apps import AppConfig\n\nfrom utils import OrchestratorAPI\n\n\nclass WebAppConfig(AppConfig):\n name = 'webApp'\n _FALSE_READY = (\n 'runserver',\n 'orchestrator.wsgi',\n 'uwsgi'\n )\n\n def ready(self):\n \"\"\"\n App ready, init runtime objects\n :return: None\n \"\"\"\n if all(state not in sys.argv for state in self._FALSE_READY):\n return\n\n '''\n orc_api = OrchestratorAPI()\n resp = orc_api.root.info()\n print(f\"\\n{resp.body}\\n\")\n\n resp = orc_api.root.api()\n print(f\"\\n{resp.body}\\n\")\n '''\n" }, { "alpha_fraction": 0.6060078144073486, "alphanum_fraction": 0.607313871383667, "avg_line_length": 37.28333282470703, "blob_id": "f8875adb36c4acd49a3050bf7578d603d4e97a0f", "content_id": "3bc4b58699172d0111af99177e3828599a58fbe4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2297, "license_type": "permissive", "max_line_length": 126, "num_lines": 60, "path": "/orchestrator/gui/server/gui_server/utils/schema.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.urls import resolve\nfrom django.utils.six.moves.urllib import parse as urlparse\n\nfrom rest_framework.compat import coreapi\nfrom rest_framework.schemas import AutoSchema\n\n\nclass OrcSchema(AutoSchema):\n \"\"\"\n Schema View creation based on HTTP Method\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(OrcSchema, self).__init__(manual_fields=kwargs.get('manual_fields', []))\n self.methods_fields = {k: v for k, v in kwargs.items() if k != 'manual_fields'}\n self.all_fields = kwargs.get('fields', [])\n\n for _, fields in self.methods_fields.items():\n self.all_fields.extend(field for field in fields if field not in self.all_fields)\n\n def get_link(self, path, method, base_url):\n fields = [\n *self.get_path_fields(path, method),\n *self.get_serializer_fields(path, method),\n *self.get_pagination_fields(path, method),\n *self.get_filter_fields(path, method)\n ]\n\n manual_fields = self.get_manual_fields(path, method)\n fields = self.update_fields(fields, manual_fields)\n\n http_method_fields = self.methods_fields.get(f\"{method.lower()}_fields\", [])\n fields = self.update_fields(fields, http_method_fields)\n\n try:\n url_name = resolve(path).url_name\n if url_name is not None:\n url_name = '_'.join(url_name.split('-')[1:]).lower()\n view_method_fields = tuple(self.methods_fields.get(f\"{url_name}_fields\", []))\n fields = self.update_fields(fields, view_method_fields)\n except Exception as e:\n # print(f\"URL Error: {e}\")\n pass\n\n path = path[1:] if base_url and path.startswith('/') else path\n encoding = self.get_encoding(path, method) if fields and any(f.location in ('form', 'body') for f in fields) else None\n\n return coreapi.Link(\n url=urlparse.urljoin(base_url, path),\n action=method.lower(),\n encoding=encoding,\n fields=fields,\n description=self.get_description(path, method)\n )\n\n @property\n def query_fields(self):\n return [field for field in self.all_fields if field.location == 'query']\n" }, { "alpha_fraction": 0.7554585337638855, "alphanum_fraction": 0.7598253488540649, "avg_line_length": 24.44444465637207, "blob_id": "169450e085d81c5bdb9ded36350e7d3eeb29def4", "content_id": "f05618de7f45a7e493ab4624d071c3d5f4729aca", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "permissive", "max_line_length": 52, "num_lines": 9, "path": "/orchestrator/gui/server/gui_server/webApp/views/gui.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import reverse\n\n\ndef gui_redirect(request):\n return HttpResponseRedirect(reverse('api.root'))\n" }, { "alpha_fraction": 0.7393183708190918, "alphanum_fraction": 0.7585203647613525, "avg_line_length": 34.17836380004883, "blob_id": "2bf3f6edec3b9633d41f3688f76c7fc634b375d5", "content_id": "1fb60771840a434027dd9757e701d6efb2e394e6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 12030, "license_type": "permissive", "max_line_length": 122, "num_lines": 342, "path": "/base/modules/utils/.pylintrc", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "[MASTER]\n# Add files or directories to the blacklist. They should be base names, not paths.\nignore=dev_test.py,conversion_examples.py\n\n# Pickle collected data for later comparisons.\npersistent=no\n\n# Use multiple processes to speed up Pylint.\njobs=4\n\n# A comma-separated list of package or module names from where C extensions may be loaded. Extensions are loading into\n# the active Python interpreter and may run arbitrary code\nextension-pkg-whitelist=\n\nprofile=no\ncache-size=500\n\n[MESSAGES CONTROL]\n# Only show warnings with the listed confidence levels. Leave empty to show all.\n# Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED\n#confidence=\n\n# Enable the message, report, category or checker with the given id(s). You can either give multiple identifier\n# separated by comma (,) or put this option multiple time (only on the command line, not in the configuration file\n# where it should appear only once). See also the \"--disable\" option for examples.\n#enable=\n\n# Disable the message, report, category or checker with the given id(s). You can either give multiple identifiers\n# separated by comma (,) or put this option multiple times (only on the command line, not in the configuration file\n# where it should appear only once). You can also use \"--disable=all\" to disable everything first and then reenable\n# specific checks. For example, if you want to run only the similarities checker, you can use\n# \"--disable=all --enable=similarities\". If you want to run only the classes checker, but have no Warning level\n# messages displayed, use \"--disable=all --enable=classes --disable=W\"\n#\n# run `pylint --list-msgs` to view all available options\ndisable=C1001,C0103,C0111,C0411,I0011,I0012,R0201,R0901,W0142,W0212,W0232,W0613,W0702,W0704\n\n\n[REPORTS]\n# Set the output format. Available formats are text, parseable, colorized, msvs (visual studio) and html. You can also\n# give a reporter class, eg mypackage.mymodule.MyReporterClass.\noutput-format=colorized\n\n# Put messages in a separate file for each module / package specified on the command line instead of printing them on\n# stdout. Reports (if any) will be written in a file name \"pylint_global.[txt|html]\". This option is deprecated and\n# it will be removed in Pylint 2.0.\nfiles-output=no\n\n# Tells whether to display a full report or only the messages\nreports=no\n\n# Python expression which should return a note less than 10 (10 is the highest note). You have access to the variables\n# errors warning, statement which respectively contain the number of errors / warnings messages and the total number\n# of statements analyzed. This is used by the global evaluation report (RP0004).\nevaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)\n\n# Template used to display messages. This is a python new-style format string used to format the message information.\n# See doc for all details\nmsg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}\n\ninclude-ids=yes\n\n\n[BASIC]\n# Good variable names which should always be accepted, separated by a comma\ngood-names=_,i,j,k,e,qs,pk,setUp,tearDown\n\n# Bad variable names which should always be refused, separated by a comma\nbad-names=foo,bar,baz,toto,tutu,tata\n\n# Colon-delimited sets of names that determine each other's naming style when the name regexes allow several styles.\nname-group=\n\n# Include a hint for the correct naming format with invalid-name\ninclude-naming-hint=no\n\n# List of decorators that produce properties, such as abc.abstractproperty. Add to this list to register other\n# decorators that produce valid properties.\n# property-classes=abc.abstractproperty\n\n# Regular expression matching correct function names\nfunction-rgx=[a-zA_][a-zA-Z0-9_]{2,70}$\n\n# Naming hint for function names\nfunction-name-hint=[a-zA_][a-zA-Z0-9_]{2,70}$\n\n# Regular expression matching correct variable names\nvariable-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for variable names\nvariable-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct constant names\nconst-rgx=(([A-Z_][A-Z0-9_]*)|([a-z_][a-z0-9_]*)|(__.*__)|register|urlpatterns)$\n\n# Naming hint for constant names\nconst-name-hint=(([A-Z_][A-Z0-9_]*)|([a-z_][a-z0-9_]*)|(__.*__)|register|urlpatterns)$\n\n# Regular expression matching correct attribute names\nattr-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for attribute names\nattr-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct argument names\nargument-rgx=[a-z_][a-z0-9_]{2,30}$\n\n# Naming hint for argument names\nargument-name-hint=[a-z_][a-z0-9_]{2,30}$\n\n# Regular expression matching correct class attribute names\nclass-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$\n\n# Naming hint for class attribute names\nclass-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$\n\n# Regular expression matching correct inline iteration names\ninlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$\n\n# Naming hint for inline iteration names\ninlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$\n\n# Regular expression matching correct class names\nclass-rgx=[A-Z_][a-zA-Z0-9_]+$\n\n# Naming hint for class names\nclass-name-hint=[A-Z_][a-zA-Z0-9]+$\n\n# Regular expression matching correct module names\nmodule-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$\n\n# Naming hint for module names\nmodule-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$\n\n# Regular expression matching correct method names\nmethod-rgx=[a-z_][a-zA-Z0-9_]{2,70}$\n\n# Naming hint for method names\nmethod-name-hint=[a-z_][a-zA-Z0-9_]{2,70}$\n\n# Regular expression which should only match function or class names that do\n# not require a docstring.\nno-docstring-rgx=__.*__|_.*\n\n# Minimum line length for functions/classes that require docstrings, shorter\n# ones are exempt.\ndocstring-min-length=-1\n\n\n[ELIF]\n# Maximum number of nested blocks for function / method body\nmax-nested-blocks=10\n\n\n[TYPECHECK]\n# Tells whether missing members accessed in mixin class should be ignored. A mixin class is detected if its name ends\n# with \"mixin\" (case insensitive).\nignore-mixin-members=yes\n\n# List of classes names for which member attributes should not be checked\n# (useful for classes with attributes dynamically set).\n# ignored-classes=SQLObject,WSGIRequest\n\n# List of decorators that produce context managers, such as contextlib.contextmanager.\n# Add to this list to register other decorators that produce valid context managers.\ncontextmanager-decorators=contextlib.contextmanager\n\n# When zope mode is activated, add a predefined set of Zope acquired attributes\n# to generated-members.\nzope=no\n\n# List of members which are set dynamically and missed by pylint inference\n# system, and so shouldn't trigger E0201 when accessed.\ngenerated-members=objects,DoesNotExist,id,pk,_meta,base_fields,context\n\n# List of method names used to declare (i.e. assign) instance attributes\ndefining-attr-methods=__init__,__new__,setUp\n\n\n[FORMAT]\n# Maximum number of characters on a single line.\nmax-line-length=128\n\n# Regexp for a line that is allowed to be longer than the limit.\nignore-long-lines=^\\s*?(#|<?https?://\\S+>)?.*$\n\n# Allow the body of an if to be on the same line as the test if there is no else.\nsingle-line-if-stmt=no\n\n# List of optional constructs for which whitespace checking is disabled. `dict- separator` is used to allow tabulation\n# in dicts, etc.: {1 : 1,\\n222: 2}. `trailing-comma` allows a space between comma and closing bracket: (a, ).\n# `empty-line` allows space-only lines.\nno-space-check=trailing-comma,dict-separator\n\n# Maximum number of lines in a module\nmax-module-lines=850\n\n# String used as indentation unit. This is usually \" \" (4 spaces) or \"\\t\" (1 tab).\n# Use 2 spaces consistent with TensorFlow style.\nindent-string=' '\n\n# Number of spaces of indent required inside a hanging or continued line.\nindent-after-paren=4\n\n# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.\nexpected-line-ending-format=\n\n\n[MISCELLANEOUS]\n# List of note tags to take in consideration, separated by a comma.\nnotes=FIXME,XXX,TODO\n\n\n[VARIABLES]\n# Tells whether we should check for unused import in __init__ files.\ninit-import=no\n\n# A regular expression matching the name of dummy variables (i.e. expectedly not used).\ndummy-variables-rgx=_|dummy\n\n# List of additional names supposed to be defined in builtins. Remember that\n# you should avoid to define new builtins when possible.\nadditional-builtins=\n\n# List of strings which can identify a callback function by name. A callback\n# name must start or end with one of those strings.\ncallbacks=cb_,_cb\n\n# List of qualified module names which can have objects that can redefine builtins.\nredefining-builtins-modules=six.moves,future.builtins\n\n[LOGGING]\n# Logging modules to check that the string format arguments are in logging function parameter format\nlogging-modules=logging\n\n[SIMILARITIES]\n# Minimum lines number of a similarity.\nmin-similarity-lines=6\n\n# Ignore comments when computing similarities.\nignore-comments=yes\n\n# Ignore docstrings when computing similarities.\nignore-docstrings=yes\n\n# Ignore imports when computing similarities.\nignore-imports=no\n\n[SPELLING]\n# Spelling dictionary name. Available dictionaries: none. To make it working\n# install python-enchant package.\nspelling-dict=\n\n# List of comma separated words that should not be checked.\nspelling-ignore-words=\n\n# A path to a file that contains private dictionary; one word per line.\nspelling-private-dict-file=\n\n# Tells whether to store unknown words to indicated private dictionary in\n# --spelling-private-dict-file option instead of raising a message.\nspelling-store-unknown-words=no\n\n\n[IMPORTS]\n# Deprecated modules which should not be used, separated by a comma\ndeprecated-modules=regsub,TERMIOS,Bastion,rexec\n\n# Create a graph of every (i.e. internal and external) dependencies in the given file (report RP0402 must not be disabled)\nimport-graph=\n\n# Create a graph of external dependencies in the given file (report RP0402 must not be disabled)\next-import-graph=\n\n# Create a graph of internal dependencies in the given file (report RP0402 must not be disabled)\nint-import-graph=\n\n# Force import order to recognize a module as part of the standard compatibility libraries.\nknown-standard-library=\n\n# Force import order to recognize a module as part of a third party library.\nknown-third-party=enchant\n\n# Analyse import fallback blocks. This can be used to support both Python 2 and 3 compatible code, which means that\n# the block might have code that exists only in one or another interpreter, leading to false positives when analysed.\nanalyse-fallback-blocks=no\n\n\n[DESIGN]\n# Maximum number of arguments for function / method\nmax-args=10\n\n# Argument names that match this expression will be ignored. Default to name with leading underscore\nignored-argument-names=_.*\n\n# Maximum number of locals for function / method body\nmax-locals=20\n\n# Maximum number of return / yield for function / method body\nmax-returns=10\n\n# Maximum number of branch for function / method body\nmax-branches=15\n\n# Maximum number of statements in function / method body\nmax-statements=50\n\n# Maximum number of parents for a class (see R0901).\nmax-parents=7\n\n# Maximum number of attributes for a class (see R0902).\nmax-attributes=15\n\n# Minimum number of public methods for a class (see R0903).\nmin-public-methods=0\n\n# Maximum number of public methods for a class (see R0904).\nmax-public-methods=50\n\n# Maximum number of boolean expressions in a if statement\nmax-bool-expr=5\n\n\n[CLASSES]\n# List of method names used to declare (i.e. assign) instance attributes.\ndefining-attr-methods=__init__,__new__,setUp\n\n# List of valid names for the first argument in a class method.\nvalid-classmethod-first-arg=cls\n\n# List of valid names for the first argument in a metaclass class method.\nvalid-metaclass-classmethod-first-arg=mcs\n\n# List of member names, which should be excluded from the protected access\n# warning.\nexclude-protected=_asdict,_fields,_replace,_source,_make\n\n\n[EXCEPTIONS]\n# Exceptions that will emit a warning when being caught. Defaults to\n# \"Exception\"\novergeneral-exceptions=Exception" }, { "alpha_fraction": 0.5872842669487, "alphanum_fraction": 0.5891008377075195, "avg_line_length": 33.841773986816406, "blob_id": "6df75aeac7277265499ed274a5f915d29bb33923", "content_id": "fb6adfe7918bc4b72d72c4e21783c327b8563ff3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5505, "license_type": "permissive", "max_line_length": 117, "num_lines": 158, "path": "/orchestrator/core/orc_server/tracking/middleware.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import ast\nimport re\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.timezone import now\n\nfrom .conf import settings, TrackingConfig\nfrom .models import RequestLog\n\n\nclass LoggingMiddleware(MiddlewareMixin):\n \"\"\"\n Adapted from DRF-Tracking - drf-tracking.readthedocs.io\n Applied as middleware to catch all API requests rather than per view/apiview\n \"\"\"\n _CLEANED_SUBSTITUTE = \"********************\"\n _SENSITIVE_FIELDS = {\"api\", \"token\", \"key\", \"secret\", \"password\", \"password1\", \"password2\", \"signature\"}\n _PREFIX = TrackingConfig.Meta.prefix\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = {}\n\n self._SENSITIVE_FIELDS.update({f.lower() for f in getattr(settings, f\"{self._PREFIX}_SENSITIVE_FIELDS\")})\n\n def process_request(self, request):\n \"\"\"\n Begin processing request, make initial log key/values\n :param request: request instance\n :return: None\n \"\"\"\n self.log = dict(\n requested_at=now(),\n method=request.method,\n path=request.path,\n host=request.get_host(),\n data=self._clean_data(getattr(request, request.method, request.body))\n )\n\n def process_response(self, request, response):\n \"\"\"\n Finish processing request, make final log key/values and save log to database\n :param request: request instance\n :param response: response instance\n :return: None\n \"\"\"\n if self._should_log(request, response):\n self.log.update(dict(\n remote_addr=self._get_ip_address(request),\n view=self._get_view_name(request),\n view_method=self._get_view_method(request),\n query_params=self._clean_data(getattr(request, \"query_params\", {})),\n user=self._get_user(request),\n response_ms=self._get_response_ms(),\n response=response.rendered_content if hasattr(response, \"rendered_content\") else response.getvalue(),\n status_code=response.status_code\n ))\n\n RequestLog.objects.create(**self.log)\n\n return response\n\n def process_exception(self, request, exception):\n \"\"\"\n Gracefully process the exception that was raised\n :param request: request instance\n :param exception: exception raised\n :return:\n \"\"\"\n print(f\"Tracking Exception - {exception.__class__.__name__} - {exception}\")\n\n def _should_log(self, request, response):\n \"\"\"\n Check if the request should be logged\n :param request: request instance\n :param response: response instance\n \"\"\"\n\n log_prefixes = getattr(settings, f\"{self._PREFIX}_URL_PREFIXES\")\n log_levels = getattr(settings, f\"{self._PREFIX}_REQUEST_LEVELS\")\n\n return (\n any(re.compile(prefix).match(request.path) for prefix in log_prefixes)\n and\n any(response.status_code in levels for levels in log_levels)\n )\n\n def _get_user(self, request):\n \"\"\"\n Get requesting user, if authenticated\n :param request: request instance\n :return: user of the request or None\n \"\"\"\n user = request.user\n return None if user.is_anonymous else user\n\n def _get_ip_address(self, request):\n \"\"\"\n Get the remote ip address the request was generated from\n :param request: request instance\n :return: remote IP Address\n \"\"\"\n ipaddr = request.META.get(\"HTTP_X_FORWARDED_FOR\", None)\n # X_FORWARDED_FOR returns client1, proxy1, proxy2,...\n return ipaddr.split(\",\")[0].strip() if ipaddr else request.META.get(\"REMOTE_ADDR\", \"\")\n\n def _get_view_method(self, request):\n \"\"\"\n Get view method\n :param request: request instance\n :return: method of the request\n \"\"\"\n return getattr(self, \"action\", request.method).lower()\n\n def _get_view_name(self, request):\n \"\"\"\n Get view name\n :param request: request instance\n :return: function name that was called\n \"\"\"\n return getattr(request.resolver_match, 'view_name', None)\n\n def _get_response_ms(self):\n \"\"\"\n Get the duration of the request response cycle is milliseconds, 0 if a negative\n :return: duration of the response in milliseconds\n \"\"\"\n response_timedelta = now() - self.log[\"requested_at\"]\n response_ms = int(response_timedelta.total_seconds() * 1000)\n return max(response_ms, 0)\n\n def _clean_data(self, data):\n \"\"\"\n Clean a dictionary of data of potentially sensitive info before sending to the database\n :param data: dictionary to clean\n :return: cleaned dictionary\n \"\"\"\n if isinstance(data, list):\n return [self._clean_data(d) for d in data]\n\n if isinstance(data, dict):\n clean_data = dict(data)\n\n for key, value in clean_data.items():\n try:\n value = ast.literal_eval(value)\n except (ValueError, SyntaxError):\n pass\n\n if isinstance(value, (dict, list)):\n clean_data[key] = self._clean_data(value)\n\n if key.lower() in self._SENSITIVE_FIELDS:\n clean_data[key] = self._CLEANED_SUBSTITUTE\n\n return clean_data\n\n return data\n" }, { "alpha_fraction": 0.683700442314148, "alphanum_fraction": 0.6977973580360413, "avg_line_length": 44.41999816894531, "blob_id": "8c5316807ccf638c40deaf6d439306d6476ace7a", "content_id": "1c2ad61ec6cca230dd58d887f16a7893069a159f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2270, "license_type": "permissive", "max_line_length": 124, "num_lines": 50, "path": "/logger/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OASIS TC Open: oif-logger\n\n## Server\n### About this Image\n- This image is Alpine 3.10 with a simple syslog server for use with [Elasticsearch](https://hub.docker.com/_/elasticsearch)\n- Listens on port 514 default, configurable\n- Logs to file and ElasticSearch\n- Caches logs if ElasticSearch cannot be found\n\n### How to use this image\nNote: Pulling an images requires using a specific tag (server or gui), the latest tag is not supported.\n\nEnvironment Variables\n\n| Variable | Type | Description | Default|\n| ----------- | ----------- | ----------- | ----------- |\n| ES_HOST | String | Host/IP of the Elasticsearch node | es_logger\n| ES_PORT | Integer | Port of the Elasticsearch Node | 9200\n| LOG_PREFIX | String | Prefix for the index in the format of `log_{LOG_PREFIX}-{YYYY.mm.dd}` | logger\n| ES_TRIES | Integer | Max attempts to connect to ElasticSearch in 1s intervals | 60\n\n### Resources\n- Based on [Gist from marcelom](https://gist.github.com/marcelom/4218010)\n\n## GUI\n### About this Image\n- This image is Alpine 3.10 with a simple GUI for use with [Elasticsearch](https://hub.docker.com/_/elasticsearch)\n- UI port - 8081\n\n### How to use this image\nNote: Pulling an images requires using a specific tag (server or gui), the latest tag is not supported.\n\nEnvironment Variables\n\n| Variable | Type | Description | Default|\n| ----------- | ----------- | ----------- | ----------- |\n| ES_HOST | String | Host/IP of the Elasticsearch node | es_logger\n| ES_PORT | Integer | Port of the Elasticsearch Node | 9200\n\n### Resources\n- [Lodash](https://www.npmjs.com/package/lodash) - Lodahs library for node\n- [Moment](https://www.npmjs.com/package/moment) - DateTime formatting/parsing\n- [Query String](https://www.npmjs.com/package/query-string) - Parse and stringify URL query strings\n- [React](https://reactjs.org/) - Core Framework\n - [DOM](https://www.npmjs.com/package/react-dom)\n - [Moment](https://www.npmjs.com/package/react-moment) - Date/Time Formatting\n\t- [Base Scripts](https://www.npmjs.com/package/react-scripts)\n- [Reactstrap](https://www.npmjs.com/package/reactstrap) - Bootstrap v4 components for React\n- [SearchKit](http://www.searchkit.co/) - Ealsticsearch UI components\n- [SearchKit DateFilter](https://www.npmjs.com/package/searchkit-datefilter)" }, { "alpha_fraction": 0.5928633809089661, "alphanum_fraction": 0.5968119502067566, "avg_line_length": 27.260330200195312, "blob_id": "6e61a28bc4965d67f99bc924c26bab7a966a27ff", "content_id": "7264f93787a7e1bf898c07561c10f4cbf9c8c2e8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6838, "license_type": "permissive", "max_line_length": 156, "num_lines": 242, "path": "/orchestrator/gui/client/src/components/utils/remotePageTable.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { confirmAlert } from 'react-confirm-alert'\nimport BootstrapTable from 'react-bootstrap-table-next'\nimport paginationFactory from 'react-bootstrap-table2-paginator'\nimport { Button } from 'reactstrap'\n\nimport 'react-confirm-alert/src/react-confirm-alert.css'\nimport 'react-bootstrap-table-next/dist/react-bootstrap-table2.min.css'\n\nimport {\n getMultiKey,\n setMultiKey\n} from './'\n\nconst str_fmt = require('string-format')\n\n\nconst RemotePagination = ({ keyField, columns, data, page, pageSize, totalSize, defaultSort, onTableChange }) => {\n const pagination = paginationFactory({\n page: page,\n sizePerPage: pageSize,\n totalSize: totalSize,\n hidePageListOnlyOnePage: true,\n showTotal: true,\n paginationTotalRenderer: (from, to, size) => (\n <span className=\"react-bootstrap-table-pagination-total ml-3\">\n Showing { from } to { to } of { size } Results\n </span>\n ),\n withFirstAndLast: false,\n })\n\n if (defaultSort.length == 0 || defaultSort == null) {\n defaultSort = [{\n dataField: keyField, // if dataField is not match to any column defined, it will be ignored.\n order: 'desc' // desc or asc\n }]\n }\n\n return (\n <BootstrapTable\n remote\n hover\n striped\n condensed\n bootstrap4\n keyField={ keyField }\n columns={ columns }\n data={ data }\n pagination={ pagination }\n onTableChange={ onTableChange }\n defaultSorted={ defaultSort }\n defaultSortDirection='desc'\n />\n )\n}\n\nclass RemotePageTable extends Component {\n constructor(props, context) {\n super(props, context)\n this.handleTableChange = this.handleTableChange.bind(this)\n this.editable = this.props.editRows == true\n this.keyField = this.props.keyField || 'id'\n let columns = this.props.columns || []\n\n if (this.editable) {\n columns.push({\n text: 'Options',\n dataField: 'options',\n isDummyField: true,\n keyField: this.keyField,\n navigate: this.props.navigate,\n formatter: this.optionsFormatter,\n deleteConfirm: this.deleteData.bind(this),\n options: this.props.editOptions\n })\n }\n\n this.state = {\n page: 1,\n pageSize: 10,\n total: 0,\n columns: columns,\n sort: 'name',\n displayData: (this.props.data || []).slice(0, 10)\n }\n }\n\n shouldComponentUpdate(nextProps, nextState) {\n let props_update = this.props != nextProps\n let state_update = this.state != nextState\n\n if (props_update) {\n this.setState((state, props) => {\n let startIndex = (state.page - 1) * state.pageSize\n let endIndex = startIndex + state.pageSize\n\n return {\n total: nextProps.total,\n displayData: this.editData(nextProps.data.sort(this.dynamicSort(this.state.sort)).slice(startIndex, endIndex))\n }\n })\n }\n return props_update || state_update\n }\n\n dynamicSort(property) {\n let sortOrder = 1\n if (property[0] === \"-\") {\n sortOrder = -1\n property = property.substr(1)\n }\n return (a, b) => ((a[property] < b[property]) ? -1 : (a[property] > b[property]) ? 1 : 0) * sortOrder\n }\n\n tablePagination(page, sizePerPage, currentSize) {\n let startIndex = (page - 1) * sizePerPage\n let endIndex = startIndex + sizePerPage\n let minRows = startIndex + 1\n\n if ((this.props.data.length < minRows || currentSize < sizePerPage) && this.props.total != this.props.data.length) {\n this.props.dataGet(page, sizePerPage, this.state.sort)\n }\n\n this.setState((state, props) => ({\n page: page,\n displayData: this.editData(this.props.data.sort(this.dynamicSort(this.state.sort)).slice(startIndex, endIndex)),\n pageSize: sizePerPage\n }))\n }\n\n tableSort(column, order) {\n let sort = (order == 'desc' ? '' : '-') + column\n\n this.setState((state, props) => {\n this.props.dataGet(state.page, state.pageSize, sort)\n let startIndex = (state.page - 1) * state.pageSize\n let endIndex = startIndex + state.pageSize\n\n return {\n sort: sort,\n displayData: this.editData(this.props.data.sort(this.dynamicSort(sort)).slice(startIndex, endIndex))\n }\n })\n }\n\n handleTableChange(type, args) {\n switch(type) {\n case 'filter':\n break;\n case 'pagination':\n this.tablePagination(args.page, args.sizePerPage, args.data.length)\n break;\n case 'sort':\n this.tableSort(args.sortField, args.sortOrder)\n break;\n case 'cellEdit':\n break;\n default:\n break;\n }\n }\n\n editData(data) {\n return this.editable ? data.map(d => { return { ...d, options: 'options'}}) : data\n }\n\n deleteData(delFun, key) {\n confirmAlert({\n title: 'Confirm Delete',\n childrenElement: () => (\n <div>\n <p>{ str_fmt('Are you sure to delete: {key}?', {key: key}) }</p>\n {\n this.props.dataKey.match(/^[Dd]evice/) ? (\n <p>This will also remove actuators associated with this device</p>\n ): ''\n }\n </div>\n\n ),\n buttons: [\n {\n label: 'Delete',\n onClick: () => delFun(key)\n }, {\n label: 'Cancel'\n }\n ]\n })\n }\n\n optionsFormatter(cell, row) {\n let i = 1\n let rtn = []\n\n if (Object.keys(this.options).length == 0) {\n rtn.push(<p key={ i++ } >Options not configured</p>)\n } else {\n if (this.options.modal) {\n rtn.push(<this.options.modal key={ i++ } data={ row } />)\n }\n\n if (this.options.navigate) {\n rtn.push(<Button key={ i++ } color='primary' size='sm' onClick={ () => this.options.navigate('/' + row[this.keyField]) }>{ cell }</Button>)\n }\n\n if (this.options.info) {\n rtn.push(<Button key={ i++ } color='info' size='sm' onClick={ () => this.options.info(row[this.keyField]) }>Info</Button>)\n }\n\n if (this.options.delete) {\n rtn.push(<Button key={ i++ } color='danger' size='sm' onClick={ () => this.deleteConfirm(this.options.delete, row[this.keyField]) }>Delete</Button>)\n }\n }\n\n return (<div>{ rtn }</div>)\n }\n\n render() {\n return (\n <RemotePagination\n keyField={ this.keyField }\n columns={ this.state.columns }\n data={ this.state.displayData }\n page={ this.state.page }\n pageSize={ this.state.pageSize }\n totalSize={ this.props.total }\n defaultSort={ this.props.defaultSort || [] }\n onTableChange={ this.handleTableChange }\n />\n )\n }\n}\n\nconst mapStateToProps = (state, props) => ({\n data: getMultiKey(state, props.dataKey) || [],\n total: getMultiKey(state, [props.dataKey.split('.')[0], 'count'].join('.')) || 0\n})\n\nexport default connect(mapStateToProps)(RemotePageTable)" }, { "alpha_fraction": 0.5604053139686584, "alphanum_fraction": 0.5604053139686584, "avg_line_length": 20.383333206176758, "blob_id": "f0fb7dd13b4dded45dbe38517858f2416e0a5af8", "content_id": "e988c4ff8de2422a31876d3ba9674a6135ff3d35", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1283, "license_type": "permissive", "max_line_length": 91, "num_lines": 60, "path": "/orchestrator/gui/client/src/reducers/auth.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import jwtDecode from 'jwt-decode'\nimport * as auth from '../actions/auth'\n\nconst initialState = {\n access: undefined,\n errors: {},\n refresh: false\n}\n\nexport default (state=initialState, action=null) => {\n switch(action.type) {\n case auth.LOGIN_SUCCESS:\n return {\n ...state,\n access: {\n token: action.payload.token,\n ...jwtDecode(action.payload.token)\n },\n errors: {}\n }\n\n case auth.LOGOUT_REQUEST:\n case auth.LOGOUT_SUCCESS:\n return {\n access: undefined,\n errors: {}\n }\n\n case auth.TOKEN_REFRESH:\n console.log('Token Refresh')\n return {\n ...state,\n refresh: true\n }\n\n case auth.TOKEN_REFRESHED:\n console.log('Token Refreshed')\n return {\n ...state,\n access: {\n token: action.payload.token,\n ...jwtDecode(action.payload.token)\n },\n errors: {},\n refresh: false\n }\n\n case auth.LOGIN_FAILURE:\n case auth.LOGOUT_FAILURE:\n case auth.TOKEN_FAILURE:\n console.log('Auth Failure', action.type, action)\n return {\n access: undefined,\n errors: action.payload.response || {'non_field_errors': action.payload.statusText},\n }\n\n default:\n return state\n }\n}\n" }, { "alpha_fraction": 0.6274510025978088, "alphanum_fraction": 0.6535947918891907, "avg_line_length": 18.25, "blob_id": "13fa16299b87cc28e370b555f09134a9a867fc0d", "content_id": "22f437e7e7cd59f4ca5ec07b5d8fe02e361a82eb", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 153, "license_type": "permissive", "max_line_length": 31, "num_lines": 8, "path": "/logger/gui/config/eslint_rules/promise_rules.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Promise Rules\n\nmodule.exports = {\n 'promise/always-return': 2,\n 'promise/catch-or-return': 2,\n 'promise/no-native': 0,\n 'promise/param-names': 2\n}" }, { "alpha_fraction": 0.6518375277519226, "alphanum_fraction": 0.6518375277519226, "avg_line_length": 18.884614944458008, "blob_id": "079a8fbb38f52bd70c4fa1e4a2b145942b9e474b", "content_id": "2a2f3dbfb1da8e4c00128e7f89226b0174f673b6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "permissive", "max_line_length": 60, "num_lines": 26, "path": "/orchestrator/gui/server/gui_server/orchestrator/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Orchestrator, OrchestratorAuth\n\n# Register your models here.\n\n\nclass OrchestratorAdmin(admin.ModelAdmin):\n list_display = (\n 'orc_id',\n 'name',\n 'proto',\n 'host',\n 'port',\n )\n\n\nclass OrchestratorAuthAdmin(admin.ModelAdmin):\n list_display = (\n 'user',\n 'token',\n 'orchestrator'\n )\n\n\nadmin.site.register(Orchestrator, OrchestratorAdmin)\nadmin.site.register(OrchestratorAuth, OrchestratorAuthAdmin)\n" }, { "alpha_fraction": 0.6110062003135681, "alphanum_fraction": 0.6126247644424438, "avg_line_length": 33.00917434692383, "blob_id": "9bcdbc26524e77f89d032d47552d5beba0e97ce0", "content_id": "3a48aeb3207f780bf61b646beef7742343526d1b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3707, "license_type": "permissive", "max_line_length": 106, "num_lines": 109, "path": "/orchestrator/core/orc_server/command/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import coreapi\nimport coreschema\n\nfrom rest_framework import viewsets, filters\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\n# Local imports\nimport utils\nfrom .actions import action_send\nfrom ..models import SentHistory, HistorySerializer\n\n\nclass HistoryViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Command History to be viewed or edited.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = HistorySerializer\n lookup_field = 'command_id'\n\n permissions = {\n 'create': (IsAuthenticated,),\n 'destroy': (IsAdminUser,),\n 'partial_update': (IsAdminUser,),\n 'retrieve': (IsAuthenticated,),\n 'update': (IsAdminUser,),\n # Custom Views\n 'send': (IsAuthenticated,),\n }\n\n queryset = SentHistory.objects.order_by('-received_on')\n filter_backends = (filters.OrderingFilter,)\n ordering_fields = ('command_id', 'user', 'received_on', 'actuators', 'status', 'details')\n\n schema = utils.OrcSchema(\n send_fields=[\n coreapi.Field(\n \"actuator\",\n required=False,\n location=\"json\",\n schema=coreschema.String(\n description='Actuator/Type that is to receive the command.'\n )\n ),\n coreapi.Field(\n \"command\",\n required=True,\n location=\"json\",\n schema=coreschema.Object(\n description='Command that is to be sent to the actuator(s).'\n )\n )\n ]\n )\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all commands that the user has executed, all commands if admin\n \"\"\"\n self.pagination_class.page_size_query_param = 'length'\n self.pagination_class.max_page_size = 100\n queryset = self.filter_queryset(self.get_queryset())\n\n if not request.user.is_staff: # Standard User\n queryset = queryset.filter(user=request.user)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific command that the user has executed, command if admin\n \"\"\"\n command = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if command.user is not request.user:\n raise PermissionDenied(detail='User not authorised to access command', code=401)\n\n serializer = self.get_serializer(command)\n return Response(serializer.data)\n\n @action(methods=['PUT'], detail=False)\n def send(self, request, *args, **kwargs):\n \"\"\"\n Sends the specified command to the specified orchestrator in the command or in the request\n \"\"\"\n rslt = action_send(\n usr=request.user,\n cmd=request.data.get('command', {}),\n actuator=request.data.get('actuator', None),\n channel=request.data.get('channel', {}),\n )\n\n return Response(*rslt)\n" }, { "alpha_fraction": 0.5124468803405762, "alphanum_fraction": 0.5239830017089844, "avg_line_length": 21.88888931274414, "blob_id": "f72fcd3b28ebd996f95087e295c65a07c9201ca2", "content_id": "c011225290c7e84de9b8efff0dbc3fe7904a0d25", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1647, "license_type": "permissive", "max_line_length": 62, "num_lines": 72, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/utils.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "export const keys = {\n // Structures\n Structure: [\n 'name', // 0 - TNAME - Datatype name\n 'type', // 1 - TTYPE - Base type - built-in or defined\n 'opts', // 2 - TOPTS - Type options\n 'desc', // 3 - TDESC - Type description\n 'fields' // 4 - FIELDS - List of fields\n ],\n // Field Definitions\n Enum_Def: [\n 'id', // 0 - FTAG - Element ID\n 'value', // 1 - FNAME - Element name\n 'desc' // 2 - EDESC - Enumerated value description\n ],\n Gen_Def: [\n 'id', // 0 - FTAG - Element ID\n 'name', // 1 - FNAME - Element name\n 'type', // 2 - FTYPE - Datatype of field\n 'opts', // 3 - FOPTS - Field options\n 'desc' // 4 - FDESC - Field Description\n ]\n}\n\n\nexport const isOptional_jadn = (def) => {\n if (def.hasOwnProperty(\"opts\")) {\n return def.opts.indexOf('[0') >= 0\n } else {\n return false\n }\n}\n\nexport const isOptional_json = (req, field) => {\n if (req && Array.isArray(req)) {\n return req.indexOf(field) >= 0\n }\n return false\n}\n\nexport const opts2arr = (opts) => {\n let rtn_opts = {}\n let jadn_opts = {\n // Type Options\n '=': 'compact',\n '[': 'min',\n ']': 'max',\n '*': 'rtype',\n '$': 'pattern',\n '@': 'format'\n }\n\n opts.forEach(opt => {\n let opt_char = opt.charAt(0)\n let opt_val = opt.substr(1)\n\n if (jadn_opts.hasOwnProperty(opt_char)) {\n rtn_opts[jadn_opts[opt_char]] = opt_val\n } else {\n console.warn('Unknown option', opt_char)\n }\n })\n return rtn_opts\n}\n\nexport const zip = (keys, arr) => {\n let arr_obj = {}\n for (let i in arr) {\n arr_obj[keys[i]] = arr[i]\n }\n return arr_obj\n}" }, { "alpha_fraction": 0.571310818195343, "alphanum_fraction": 0.5762572288513184, "avg_line_length": 27.23255729675293, "blob_id": "c86b3b6ca7dd0448e2bd75bd9ac6904ce2b89ef8", "content_id": "66723c01039126bb72db1be18f7b0f2e3f25e614", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1213, "license_type": "permissive", "max_line_length": 70, "num_lines": 43, "path": "/orchestrator/gui/client/src/store/socketMiddleware.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Translate a Redux API call to a WebSocket\nimport { isRSAA, RSAA } from 'redux-api-middleware'\nimport * as SocketActions from '../actions/socket'\n\nconst NO_CONNECTION = null\n\n\nexport default ({getState, dispatch}) => {\n setTimeout(() => dispatch(SocketActions.setupSocket(dispatch)), 100)\n\n return next => action => {\n let auth = getState().Auth\n let socket = getState().Socket\n\n if (isRSAA(action)) {\n if (socket.connected && true) { // socket open & not api force\n const callAPI = action[RSAA];\n let message = {\n endpoint: callAPI.endpoint,\n method: callAPI.method,\n jwt: auth.access ? auth.access.token : '',\n data: JSON.parse(callAPI.body || '{}'),\n types: {\n success: callAPI.types[1],\n failure: callAPI.types[2]\n }\n }\n // console.log(callAPI, message)\n socket.connection.send(message)\n\n return next({\n type: callAPI.types[0],\n asyncDispatch: callAPI.asyncDispatch\n })\n } else if (true) { // TODO: Force api over socket??\n return next(action)\n }\n }\n if (action.hasOwnProperty('type')) {\n return next(action)\n }\n }\n}" }, { "alpha_fraction": 0.5423114895820618, "alphanum_fraction": 0.5541653037071228, "avg_line_length": 31.427045822143555, "blob_id": "0bb1e12e74a4ea2cfcf697f4256c891f91f020f3", "content_id": "8ac0a5d9e15ae2f24843c51783af3bb58ccf98b6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9111, "license_type": "permissive", "max_line_length": 135, "num_lines": 281, "path": "/orchestrator/gui/server/gui_server/data/static/js/openc2.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "/*jslint devel: true */\n/*jslint nomen: true */\n/*\nOpenC2 Message Creator\n*/\n\n(function ($) { //an IIFE so safely alias jQuery to $\n\t\"use strict\";\n\t\n $.OpenC2 = function (messageSelect, messageFields, alertFun) {\n this.messageSelect = (messageSelect instanceof $) ? messageSelect : $(messageSelect);\n this.messageFields = (messageFields instanceof $) ? messageFields : $(messageFields);\n\t\tthis.alertFun = (typeof alertFun === 'function') ? alertFun : alert.bind(window);\n\t\tthis.nulls = [null, undefined, '', ' '];\n\t\tthis.message = null;\n\t\t\n\t\tthis.Types = ['Record', 'Enumerated', 'Map', 'Choice', 'ArrayOf', 'Array'];\n\t\tthis.messageSelect.change(this._messageChange.bind(this));\n };\n\n //assigning an object literal to the prototype is a shorter syntax\n //than assigning one property at a time\n $.OpenC2.prototype = {\n\t\tinitSchema: function (schema) {\n\t\t\tthis.schema = (schema instanceof Object) ? schema : {};\n\t\t\tthis.messageSelect.find('option.schema_record').remove();\n\t\t\tvar schema_records = [];\n\t\t\t\n\t\t\ttry {\n\t\t\t\tschema_records = $.map(this.schema.meta.exports, function (v) { return v; });\n\t\t\t} catch (e) {\n\t\t\t\t$('#message-list').append($('<option/>').addClass('schema_record').attr('disabled', '').text('Cannot Load, Invalid Schema'));\n\t\t\t\tthis.alertFun('Schema Invalid, cannot load message types');\n\t\t\t}\n\t\t\t\n\t\t\tschema_records.sort();\n\t\t\t\n\t\t\tschema_records.forEach(function (rec) {\n\t\t\t\tthis.messageSelect.append($('<option/>').addClass('schema_record').attr('value', rec).text(rec));\n\t\t\t}, this);\n\t\t},\n\t\t_messageChange: function (e) {\n\t\t\tvar selected = $(e.target).val(),\n\t\t\t\tmsg = this.schema.types.filter(function (type) { return type[0] === selected; });\n\t\t\t\n\t\t\tif (selected === '') {\n\t\t\t\treturn;\n\t\t\t} else if (msg.length === 1) {\n\t\t\t\tthis.message = msg[0];\n\t\t\t} else {\n\t\t\t\tthis.messageFields.empty().append($('<p/>').text(selected + ' could not be found in the schema'));\n\t\t\t\tthis.alertFun(selected + ' could not be found in the schema');\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tthis._addFields();\n\t\t},\n\t\t_choiceChange: function (e) {\n\t\t\tvar id = $(e.target).attr('id').replace('-choice', ''),\n\t\t\t\tchoiceCont = $(e.target).parent().find('.choiceOptions'),\n\t\t\t\tselected = $(e.target).val(),\n\t\t\t\tselectedDef = this.schema.types.filter(function (type) {\n\t\t\t\t\treturn type[0] === id;\n\t\t\t\t});\n\t\t\t\n\t\t\tchoiceCont.empty();\n\t\t\tselectedDef = (selectedDef.length === 1) ? selectedDef[0][selectedDef[0].length - 1] : [];\n\t\t\t\n\t\t\tselectedDef = selectedDef.filter(function (type) {\n\t\t\t\treturn Number(type[0]) === Number(selected);\n\t\t\t});\n\t\t\tselectedDef = (selectedDef.length === 1) ? selectedDef[0] : [];\n\t\t\t\n\t\t\tchoiceCont.append(this._defField(selectedDef, id.toLowerCase(), true));\n\t\t},\n\t\t_addFields: function () {\n\t\t\tthis.messageFields\n\t\t\t\t.empty()\n\t\t\t\t.append($('<p/>').append($('<b/>').text('Comment: ')).append($('<span/>').text(this.message[this.message.length - 2])))\n\t\t\t\t.append($('<div/>').attr('id', 'fieldDefs'));\n\t\t\t\n\t\t\tvar defs = this.message[this.message.length - 1],\n\t\t\t\tfieldDefs = this.messageFields.find('#fieldDefs');\n\t\t\t\n\t\t\tdefs.forEach(function (def) {\n\t\t\t\tfieldDefs.append(this._defType(def));\n\t\t\t}, this);\n\t\t},\n\t\t_defType: function (def, parent, field) {\n\t\t\tparent = ($.inArray(parent, this.nulls) < 0 ? parent : null);\n\t\t\tfield = (typeof field === 'boolean') ? field : false;\n\t\t\tvar defType = this.schema.types.filter(function (type) {\n\t\t\t\t\treturn type[0] === def[2];\n\t\t\t\t}),\n\t\t\t\tdefs = [],\n\t\t\t\trtnCont = $('<div/>'),\n\t\t\t\tsel = null,\n\t\t\t\trec = false;\n\t\t\t\n\t\t\tdefType = defType.length === 1 ? defType[0] : def;\n\t\t\tdefs = defType[defType.length - 1];\n\t\t\t\n\t\t\tswitch (defType[1]) {\n\t\t\tcase 'Enumerated':\n\t\t\t\trtnCont = $('<div/>')\n\t\t\t\t\t.addClass('form-group' + (field ? '' : ' col-sm-6'))\n\t\t\t\t\t.attr({\n\t\t\t\t\t\t'aria-describedby': def[0] + '_help',\n\t\t\t\t\t\toptional: this._isOptional(def)\n\t\t\t\t\t})\n\t\t\t\t\t.append($('<label/>').attr('for', (parent !== null ? parent + '.' : '') + def[1]).text(def[1]))\n\t\t\t\t\t.append($('<small/>').attr('id', def[0] + '_help').addClass('form-text text-muted').text(def[def.length - 1]))\n\t\t\t\t\t.append($('<div/>').addClass('col-12 my-1'));\n\t\t\t\t\n\t\t\t\tsel = $('<select/>')\n\t\t\t\t\t.addClass('form-control')\n\t\t\t\t\t.attr({\n\t\t\t\t\t\tid: (parent !== null ? parent + '.' : '') + def[1],\n\t\t\t\t\t\tname: (parent !== null ? parent + '.' : '') + def[1]\n\t\t\t\t\t})\n\t\t\t\t\t.append($('<option/>').attr('value', '').attr('selected', '').text(def[1] + ' Options'));\n\t\t\t\t\t\n\t\t\t\tdefs.forEach(function (def) {\n\t\t\t\t\tsel.append($('<option/>').attr('value', def[1]).text(def[1] + ' - ' + def[2]));\n\t\t\t\t}, this);\n\n\t\t\t\trtnCont.append(sel);\n\t\t\t\tbreak;\n\t\t\t\t\t\t\t\n\t\t\tcase 'Choice':\n\t\t\t\trtnCont = $('<fieldset/>')\n\t\t\t\t\t.addClass('border border-secondary' + (field ? '' : ' col-sm-6'))\n\t\t\t\t\t.attr({\n\t\t\t\t\t\tid: defType[0],\n\t\t\t\t\t\t'aria-describedby': def[0] + '_help'\n\t\t\t\t\t})\n\t\t\t\t\t.append($('<legend/>').text(def[1]))\n\t\t\t\t\t.append($('<small/>').attr('id', def[0] + '_help').addClass('form-text text-muted').text(def[def.length - 1]))\n\t\t\t\t\t.append($('<div/>').addClass('col-12 my-1'));\n\t\t\t\t\t\t\t\n\t\t\t\tsel = $('<select/>')\n\t\t\t\t\t.addClass('form-control')\n\t\t\t\t\t.attr('id', defType[0] + '-choice')\n\t\t\t\t\t.append($('<option/>').attr('value', '').attr('selected', '').text(def[1] + ' Options'));\n\t\t\t\t\t\t\n\t\t\t\tdefs.forEach(function (def) {\n\t\t\t\t\tvar com = def[def.length - 1];\n\t\t\t\t\tsel.append($('<option/>').attr('value', def[0]).text(def[1] + (com === '' ? '' : ' - ' + com)));\n\t\t\t\t});\n\t\t\t\t\t\t\t\n\t\t\t\trtnCont.append(sel).append($('<div>').addClass('col-sm-12 py-2 choiceOptions'));\n\t\t\t\t\t\t\t\t\n\t\t\t\tsel.change(this._choiceChange.bind(this));\n\t\t\t\tbreak;\n\t\t\t\t\t\t\n\t\t\tcase 'Record':\n\t\t\t\trec = true;\n\t\t\tcase 'Map':\n\t\t\t\trtnCont = $('<fieldset/>')\n\t\t\t\t\t.addClass('border border-' + (rec ? 'primary' : 'light') + (field ? '' : ' col-sm-6'))\n\t\t\t\t\t.attr('aria-describedby', def[0] + '_help')\n\t\t\t\t\t.append($('<legend/>').text(def[1]))\n\t\t\t\t\t.append($('<small/>').attr('id', def[0] + '_help').addClass('form-text text-muted').text(def[def.length - 1]))\n\t\t\t\t\t.append($('<div/>').addClass('col-12 my-1'));\n\t\t\t\t\t\n\t\t\t\tdefs.forEach(function (d) {\n\t\t\t\t\trtnCont.append(this._defField(d, ($.inArray(parent, this.nulls) < 0 ? parent + '.' : '') + def[1]).addClass(field ? 'mx-2' : ''));\n\t\t\t\t}, this);\n\t\t\t\tbreak;\n\t\t\t\t\n\t\t\tcase 'ArrayOf':\n\t\t\t\tconsole.log('Array', defType);\n\t\t\t\tbreak;\n\t\t\t\t\n\t\t\tcase 'Array':\n\t\t\t\tconsole.log('Array', defType);\n\t\t\t\tbreak;\n\t\t\t\t\t\t\t\n\t\t\tdefault:\n\t\t\t\trtnCont = this._defField(defType, parent, false);\n\t\t\t}\n\n\t\t\treturn rtnCont;\n\t\t},\n\t\t_defField: function (def, parent, field) {\n\t\t\tparent = ($.inArray(parent, this.nulls) < 0 ? parent : null);\n\t\t\tfield = (typeof field === 'boolean') ? field : true;\n\t\t\t\n\t\t\tvar defType = this.schema.types.filter(function (type) {\n\t\t\t\treturn type[0] === def[2];\n\t\t\t});\n\t\t\t\n\t\t\tdefType = (defType.length === 1 ? defType[0] : def);\n\t\t\t\n\t\t\tif ($.inArray(defType[1], this.Types) >= 0) {\n\t\t\t\treturn this._defType(def, parent, true);\n\t\t\t\t\n\t\t\t} else {\n\t\t\t\tdef = ($.isNumeric(def[0]) ? def.slice(1) : def);\n\t\t\t\t\n\t\t\t\treturn $('<div/>').addClass('form-group' + (field ? '' : ' col-sm-6'))\n\t\t\t\t\t.append($('<label/>').attr('for', (parent !== null ? parent + '.' : '') + def[0]).text(def[0]))\n\t\t\t\t\t.append($('<input/>').attr({\n\t\t\t\t\t\ttype: 'text',\n\t\t\t\t\t\tname: (parent !== null ? parent + '.' : '') + def[0],\n\t\t\t\t\t\t'aria-describedby': def[0] + '_help',\n\t\t\t\t\t\tplaceholder: 'Enter ' + def[0],\n\t\t\t\t\t\toptional: this._isOptional(def)\n\t\t\t\t\t}).addClass('form-control'))\n\t\t\t\t\t.append($('<small/>').attr('id', def[0] + '_help').addClass('form-text text-muted').text(def[def.length - 1]));\n\t\t\t}\n\t\t},\n\t\t_isOptional: function (def) {\n\t\t\tconsole.log(def);\n\t\t\tconsole.log(def.length);\n\t\t\tswitch (def.length) {\n\t\t\t\tcase 5:\n\t\t\t\t\treturn $.inArray('[0', def[3]) >= 0\n\t\t\t\t\t\n\t\t\t\tcase 4:\n\t\t\t\t\treturn $.inArray('[0', def[2]) >= 0\n\t\t\t\t\t\n\t\t\t\tdefault:\n\t\t\t\t\tconsole.log('default optional - ' + def[0] + ' - ' + def[1]);\n\t\t\t\t\treturn false;\n\t\t\t}\n\t\t},\n\t\tgenMsg: function () {\n\t\t\tvar msg = {},\n\t\t\t\tmsgFields = this.messageFields.serializeArray();\n\t\t\t\n\t\t\tmsgFields.forEach(function (field) {\n\t\t\t\tvar optional = this.messageFields.find('[name=\\'' + field.name + '\\']').first().attr('optional') || false;\n\t\t\t\t\n\t\t\t\tif (optional) {\n\t\t\t\t\tif ($.inArray(field.value, this.nulls) < 0) {\n\t\t\t\t\t\tthis._setMultiKey(msg, field.name, field.value);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tthis._setMultiKey(msg, field.name, field.value);\n\t\t\t\t}\n\t\t\t}, this);\n\t\t\t\n\t\t\tconsole.log(msg);\n\t\t},\n\t\t_setMultiKey: function (a, k, v) {\n\t\t\tvar keys = k.split('.');\n\t\t\t\n\t\t\tif (keys.length > 1) {\n\t\t\t\tif (!a.hasOwnProperty(keys[0])) {\n\t\t\t\t\ta[keys[0]] = {};\n\t\t\t\t}\n\t\t\t\tthis._setMultiKey(a[keys[0]], keys.slice(1).join('.'), v);\n\t\t\t\t\n\t\t\t} else {\n\t\t\t\ta[k] = v;\n\t\t\t}\n\t\t},\n\t\t_getMultiKey: function (a, k) {\n\t\t\tvar keys = k.split('.');\n\t\t\t\n\t\t\tif (keys.length > 1) {\n\t\t\t\treturn (a.hasOwnProperty(keys[0]) ? this._getMultiKey(a[keys[0]], keys.slice(1).join('.')) : '');\n\t\t\t} else {\n\t\t\t\treturn (a.hasOwnProperty(k) ? a[k] : '');\n\t\t\t}\n\t\t}\n };\n\n $.OpenC2.defaultOptions = {\n schema: {},\n\t\tmessageSelect: 'message-select',\n\t\tmessageFields: 'message-fields'\n };\n\t\n}(jQuery));\n\n/* \n* so you can use it as:\nvar oc2 = new $.OpenC2($('#message-select'), $('#message-fields'))\noc2.InitSchema(OpenC2Schema)\n*/" }, { "alpha_fraction": 0.607032060623169, "alphanum_fraction": 0.6075491309165955, "avg_line_length": 20.977272033691406, "blob_id": "36740e16ecc4e905825dce714a7f92fbc4089d78", "content_id": "cad78b2eaed777548e8dedf3e64b733a5d9622de", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "permissive", "max_line_length": 65, "num_lines": 88, "path": "/orchestrator/gui/server/gui_server/tracking/log.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom . import EVENT_LEVELS\nfrom .models import EventLog\nfrom .settings import TRACKING\n\nlog_levels = TRACKING['EVENT_LEVELS']\n\n\ndef log(level=EVENT_LEVELS.Info, usr=None, msg=''):\n \"\"\"\n Log a message at the specified level\n :param level: level of the error\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n level = level if level in EVENT_LEVELS else EVENT_LEVELS.Info\n usr = None if usr.is_anonymous else usr\n\n if level in log_levels:\n print(f\"{level} Log: {usr} - {msg}\")\n EventLog.objects.create(\n user=usr,\n level=level,\n message=msg\n )\n\n\ndef debug(usr=None, msg=''):\n \"\"\"\n Log debug message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Debug, usr, msg)\n\n\ndef error(usr=None, msg=''):\n \"\"\"\n Log error message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Error, usr, msg)\n\n\ndef fatal(usr=None, msg=''):\n \"\"\"\n Log fatal message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Fatal, usr, msg)\n\n\ndef info(usr=None, msg=''):\n \"\"\"\n Log info message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Info, usr, msg)\n\n\ndef trace(usr=None, msg=''):\n \"\"\"\n Log trace message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Trace, usr, msg)\n\n\ndef warn(usr=None, msg=''):\n \"\"\"\n Log warning message\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n log(EVENT_LEVELS.Warn, usr, msg)\n" }, { "alpha_fraction": 0.7488151788711548, "alphanum_fraction": 0.7630331516265869, "avg_line_length": 29.14285659790039, "blob_id": "46b7f1a2e9d587fb78ea85743b9cda11be408ba7", "content_id": "d1ad40349c468f3a6e38a75ea4370af87d319c6c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "permissive", "max_line_length": 61, "num_lines": 7, "path": "/orchestrator/core/orc_server/account/exceptions.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from rest_framework.exceptions import APIException\n\n\nclass EditException(APIException):\n status_code = 403\n default_detail = 'Permission Denied, cannot alter object'\n default_code = 'permission_denied'\n" }, { "alpha_fraction": 0.6898148059844971, "alphanum_fraction": 0.6990740895271301, "avg_line_length": 18.636363983154297, "blob_id": "68262f13865bd6593c71d6a7726de19e2887d3ae", "content_id": "cb87be50cf725ec892ff4388b871151d9374c370", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "permissive", "max_line_length": 60, "num_lines": 11, "path": "/orchestrator/core/orc_server/conformance/tests/sbom_tests.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nOpenC2 Software Bill of Materials Profile (SBOM) Conformance\n\"\"\"\nfrom test_setup import SetupTestCase\n\n\nclass SBOM_UnitTests(SetupTestCase):\n \"\"\"\n SBOM OpenC2 Conformance Tests\n \"\"\"\n profile = \"SBOM\"\n" }, { "alpha_fraction": 0.5741565823554993, "alphanum_fraction": 0.5805219411849976, "avg_line_length": 23.546875, "blob_id": "9092683e35e3c8fbfbed506aa6d6c5a6b9b6a31e", "content_id": "209f68a8fb63e20108ddcfe58ac1bb01ccee4c9b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1571, "license_type": "permissive", "max_line_length": 105, "num_lines": 64, "path": "/orchestrator/gui/client/src/reducers/command.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import * as command from '../actions/command'\n\nimport {\n mergeByProperty\n} from '../components/utils'\n\nconst initialState = {\n commands: [],\n sort: '',\n count: 0,\n errors: {}\n}\n\nexport default (state=initialState, action=null) => {\n switch(action.type) {\n case command.GET_COMMANDS_SUCCESS:\n let newActs = action.payload.results || []\n\n return {\n ...state,\n count: action.payload.count || 0,\n commands: action.meta.refresh ? newActs : mergeByProperty(state.commands, newActs, 'command_id'),\n sort: action.meta.sort,\n errors: {\n ...state.errors,\n [command.GET_COMMANDS_FAILURE]: {}\n }\n }\n\n case command.SEND_COMMAND_SUCCESS:\n setTimeout(() => {\n action.asyncDispatch(command.getCommand(action.payload.command_id))\n }, action.payload.wait * 1000 || 1000)\n\n return {\n ...state,\n errors: {\n ...state.errors,\n [command.SEND_COMMAND_FAILURE]: {}\n }\n }\n\n case command.GET_COMMAND_SUCCESS:\n return {\n ...state,\n commands:mergeByProperty(state.commands, [action.payload], 'command_id')\n }\n\n case command.GET_COMMANDS_FAILURE:\n case command.SEND_COMMAND_FAILURE:\n case command.GET_COMMAND_FAILURE:\n console.log('Command Failure', action.type, action)\n return {\n ...state,\n errors: {\n ...state.errors,\n [action.type]: action.payload.response || {'non_field_errors': action.payload.statusText},\n }\n }\n\n default:\n return state\n }\n}\n" }, { "alpha_fraction": 0.7053571343421936, "alphanum_fraction": 0.7053571343421936, "avg_line_length": 11.55555534362793, "blob_id": "039964ed6e76ea893bb369a85baf5e8a687e2416", "content_id": "b21814546be8b26209beb4d8a80a0ca0a649c26d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 112, "license_type": "permissive", "max_line_length": 26, "num_lines": 9, "path": "/logger/gui/src/components/utils/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import {\n ThemeChooser,\n ThemeSwitcher\n} from './theme-switcher';\n\nexport {\n ThemeChooser,\n ThemeSwitcher\n};" }, { "alpha_fraction": 0.6996309757232666, "alphanum_fraction": 0.7099630832672119, "avg_line_length": 22.771930694580078, "blob_id": "929f0afdc943be5a4b382f0f79c09b302f6cf198", "content_id": "dd47a0e58de363e0f1f7c3a955f2d570595c4d5e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1355, "license_type": "permissive", "max_line_length": 78, "num_lines": 57, "path": "/orchestrator/core/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"Orchestrator Core\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Orchestrator Core container\"\n\n# Add files to container\nADD requirements.txt /tmp/requirements.txt\nADD orc_server /opt/orc_server\nADD docker-entrypoint.sh /docker-entrypoint.sh\n\n# Set environment vars\n# DJANGO Settings\nENV DJANGO_SETTINGS_MODULE=orchestrator.settings \\\n DJANGO_ENV=\"prod\"\n\n# Requirements install\n# System packages\nRUN apk add --no-cache --virtual .build-deps \\\n gcc \\\n libc-dev \\\n python3-dev \\\n linux-headers && \\\n# python/pip packages\npip3 install -r /tmp/requirements.txt && \\\n#\n# Collect Static Filesz\ncd /opt/orc_server && \\\npython3 manage.py collectstatic --noinput && \\\ncd && \\\n#\n# Mod Entrypoint Script\nchmod +x /docker-entrypoint.sh && \\\nchmod +x /opt/orc_server/dev_start.sh && \\\ndos2unix /docker-entrypoint.sh && \\\ndos2unix /opt/orc_server/dev_start.sh&& \\\n#\n# Cleanup\napk del .build-deps && \\\nrm -rf /var/cache/apk/* *.tar.gz* /usr/src /root/.gnupg /tmp/*\n\n# Ports\nEXPOSE 8080/tcp\n\n# Orchestrator Core Working Directory\nWORKDIR /opt/orc_server\n\n# entrypoint Command\nENTRYPOINT [\"/docker-entrypoint.sh\"]\n\n# Startup Command\nCMD [\"uwsgi\", \"--ini\", \"/opt/orc_server/uwsgi.ini\"]\n" }, { "alpha_fraction": 0.592383623123169, "alphanum_fraction": 0.5952045321464539, "avg_line_length": 23.44827651977539, "blob_id": "d9f2fd77c533f1ac8c8ddfaf8008e4763b850bcf", "content_id": "6a02b3bb5338e117db3cf31b39d551316ff9b8f4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 709, "license_type": "permissive", "max_line_length": 78, "num_lines": 29, "path": "/orchestrator/gui/server/gui_server/tracking/settings.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\n\nfrom . import EVENT_LEVELS, REQUEST_LEVELS\n\n\n_DEFAULT = {\n \"URL_PREFIXES\": [\n # \"^/(?!admin)\" # Don\"t log /admin/*\n \".*\" # Log Everything\n ],\n \"EVENT_LEVELS\": [getattr(EVENT_LEVELS, err) for err in EVENT_LEVELS],\n \"REQUEST_LEVELS\": [getattr(REQUEST_LEVELS, err) for err in REQUEST_LEVELS]\n}\n\n_SETTINGS = getattr(settings, \"TRACKING\", {})\nTRACKING = dict()\n\nfor k in _DEFAULT:\n attr = _SETTINGS.get(k, [])\n\n if not isinstance(attr, (list, tuple)):\n TRACKING[k] = list(attr)\n elif len(attr) >= 1:\n TRACKING[k] = attr\n else:\n TRACKING[k] = _DEFAULT[k]\n" }, { "alpha_fraction": 0.6869999766349792, "alphanum_fraction": 0.6970000267028809, "avg_line_length": 32.33333206176758, "blob_id": "b21080412df77a29dcb21e7fb9bbcc6642480e2f", "content_id": "590e619395a141606e7660ac03a4bcb43bee3899", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 100, "num_lines": 30, "path": "/orchestrator/core/orc_server/command/preferences_registry.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.forms import ValidationError\nfrom dynamic_preferences.types import IntegerPreference\nfrom dynamic_preferences.preferences import Section\nfrom dynamic_preferences.registries import global_preferences_registry as global_registry\n\ncommand = Section('command')\n\n\n@global_registry.register\nclass CommandWait(IntegerPreference):\n \"\"\"\n Dynamic Preference for Command wait time\n Time before checking the database after sending a command for a response\n \"\"\"\n section = command\n name = 'wait'\n help_text = 'The amount of time to wait, in seconds, for a response to a command (0-30 seconds)'\n default = 5\n\n def validate(self, value):\n \"\"\"\n Validate the wait time when updated\n :param value: new value to validate\n :return: None/exception\n \"\"\"\n if value < 0:\n raise ValidationError('Wait cannot be less than 0 seconds')\n\n if value > 30:\n raise ValidationError('Wait cannot be greater than 30 seconds')\n" }, { "alpha_fraction": 0.7209077477455139, "alphanum_fraction": 0.7238049507141113, "avg_line_length": 31.375, "blob_id": "cbba10ac80a0f1345ced21cfce2016280a701c25", "content_id": "3aa49af02bad48754676aa6dfa37892346c19870", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2071, "license_type": "permissive", "max_line_length": 166, "num_lines": 64, "path": "/base/modules/utils/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Screaming Bunny Utils\n\n## Installing\n### On a standalone System via pip\n- Install requires Python 3.6+ and pip\n\n- Install via pip\n ```bash\n pip install git+https://gitlab.labs.g2-inc.net/ScreamingBunny/Utils.git\n ```\n \n- To update if already installed\n\t \n ```bash\n pip install --upgrade git+https://gitlab.labs.g2-inc.net/ScreamingBunny/Utils.git\n ```\n\n### On a standalone System via submodule source\n- Install requires Python 2.7+ and pip\n\n- Add the submodule to the repo\n - Add `--branch=BRANCH` to use a branch instead of master\n - DIR - the directory to add the submodule to, recommended to use `./module/NAME`\n - After adding a submodule, it is recommended to edit the `./.gitmodules` file and change the absolute url to a relative url unless the git server is not the same\n\n ```bash\n git submodule add \"REPO-URL.git\" \"DIR\"\n ```\n \n- Initialize the submodule and pull the repo\n\t\n\t```bash\n\tgit submodule init\n\tgit submodule update\n\t```\n\n\t- Updating the submodule\n\t\t- The submodule is a nested repo, using `git fetch` and `git merge` will work within submodule directory\n\t\t- For easier updating, especially if their are multiple submodules, use th efollowing command to update the submodule to the latest commit\n\t\t\t\n\t\t\t```bash\n\t\t\tgit submodule update --remote\n\t\t\t```\n\n- Installing via source\n\t- Be sure to be in the submodule directory to be installed\n\t- Updating is running the same command again, it will override the currently installed version\n\n\t```bash\n\tpython setup.py install\n\t```\n\n \n### Gitlab CI\n- Submodule init and update are handled by the CI Runner\n- The submodule will be available where its folder is specified\n- This repo is recommended to be used as a python pkg, the following is how to install and use it\n\t- Standalone\n\t\t- See the Installing on a standalone system via submodule source\n\n\t- Docker\n\t\t- Add the submodule directory to the image, a tmp directory is preferred\n\t\t- See the Installing on a standalone system via submodule source\n\t\t- Cleanup the tmp directory and remove the submodule directory" }, { "alpha_fraction": 0.6629629731178284, "alphanum_fraction": 0.6703703999519348, "avg_line_length": 12.550000190734863, "blob_id": "25cdb1034be053b21daeb793417e7239daf62ef2", "content_id": "c0edd64f0d0cd4e794acb81f62707508185ec150", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 270, "license_type": "permissive", "max_line_length": 37, "num_lines": 20, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import JADN_Field from './jadn_field'\nimport JSON_Field from './json_field'\n\nimport {\n isOptional_jadn,\n isOptional_json,\n keys,\n opts2arr,\n zip\n} from './utils'\n\nexport {\n JADN_Field,\n JSON_Field,\n isOptional_jadn,\n isOptional_json,\n keys,\n opts2arr,\n zip\n}" }, { "alpha_fraction": 0.6370370388031006, "alphanum_fraction": 0.6370370388031006, "avg_line_length": 18.285715103149414, "blob_id": "d182e7bc1fe765ebdd402d4dce3c185dfc092f38", "content_id": "ea213e0e93adad00d3a211beeba0eaa25c56dbec", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/orchestrator/core/orc_server/es_mirror/utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .general import es_dict, get_nestedFields, ElasticHooks\n\n__all__ = [\n 'es_dict',\n 'get_nestedFields',\n 'ElasticHooks',\n]\n" }, { "alpha_fraction": 0.697857141494751, "alphanum_fraction": 0.7069047689437866, "avg_line_length": 37.541282653808594, "blob_id": "b69fd27b42628b1cd2a21cbe8a08b87f61fe01bd", "content_id": "8525d479de860ad352ab5b87089f7d7c1288b16d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4200, "license_type": "permissive", "max_line_length": 218, "num_lines": 109, "path": "/docs/Orchestrator.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# O.I.F. (OpenC2 Integration Fabric)\n\n## Container/Services ReadMe\n### Orchestrator\n- [Core](../orchestrator/core/ReadMe.md)\n- [GUI](../orchestrator/gui/client/ReadMe.md)\n\n### Transport\n- [HTTPS](orchestrator/transport/https/README.md)\n- [MQTT](orchestrator/transport/mqtt/ReadMe.md)\n\n### Logger\n- [GUI](../logger/gui/ReadMe.md)\n- [Server](../logger/server/ReadMe.md)\n\n#### Default Container/Service\n#### Credentials\n- OIF GUI - admin/password\n\t- Note: Admin and User GUI use the same credentials but not the same login\n\n##### Ports\n- Logger GUI - HOST:8081\n- OIF GUI - HOST:8080\n- OIF API - HOST:8080/api\n- HTTPS - Orchestrator: HOST:5000(default)\n\n## Requirements\n- Docker v18+\n- Docker-Compose v1.20+\n- Python 3.6+\n- pip 18+\n\n## Configuration\n- Run `configure.py` with the desired options prior to starting the Orchestrator for the first time\n\t- Options\n\t\t- `-b` or `--build-image` -- Build base containers\n\t\t- `-d` or `--dev` -- Build using the development python image\n \t- `-f FILE` or `--log_file FILE` -- Enables logging to the designated file\n \t- `-h` or `--help` -- Shows the help and exits\n \t- `-v` or `--verbose` -- Enables verbose output \t\n ```bash\n python configure.py [OPTIONS]\n ```\n\n## Running the Compose\n### General Info\n- Options\n\t- * `-f FILE` or `--file FILE` -- Specify an alternate compose file (default: docker-compose.yml)\n\t- `-p NAME` or `--project-name NAME` -- Specify an alternate project name (default: directory name)\n\t- `d` or `--detach` -- Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit.\n- Starting\n\t- Run the `docker-compose` command for the Orchestrator as shown below\n\n- Stoping\n\t- If running attatched (showing log output, no -d option)\n\t\t- Use 'Ctrl + C' \n\t- If running detatched (not showing log output, -d option)\n\t\t- Run the `docker-compose` that was used to start the Orchestrator **except** replace `up ...` with `down`\n\t\t\t\n\t\t\t```bash\n\t\t\tdocker-compose ...... down\n\t\t\t```\n- Building Images\n\t- Run the `docker-compose` that was used to start the Orchestrator **except** replace `up ...` with `build`\n\t- Options\n\t\t- SERVICE_NAME - The name of the service to rebuild the image, if not specified all will build\n\t- Notes\n\t\t- Does not need to be run prior to starting, the containers will autobuild if not available\n\t\t- Should be run after adding a new Protocol or Serialization\n\t\n\t```bash\n\tdocker-compose ...... build [SERVICE_NAME]\n\t```\n\n### Docker Compose Files\n### Central Logging\n- __Still in Beta__\n- Run the `docker-compose` as normal with the additional option of a second '-f/--file'\n- Allows for a central location for logging rather than the docker default of per container\n- Runs on default port of 8081 for logger web GUI\n\n\t```bash\n\tdocker-compose -f orchestrator-compose.yaml -f orchestrator-compose.log.yaml ...\n\t```\n\n#### Orchestrator\n- Use [`docker-compose`](https://docs.docker.com/compose/reference/overview/) to start the orchestrator on the system\n\n\t```bash\n\tdocker-compose -f orchestrator-compose.yaml [-p NAME] up [-d]\n ```\n\n### Registration\n#### Registering a device with the OIF\n- Give Device a name and generate a UUID for it.\n- Select a transport\n - HTTPS: Enter host and port (Default Port 5001)\n - MQTT: Enter host and port of the broker (Default Port 1883)\n- Select which serializations in which the device utilizes.\n - Default included device supports JSON, CBOR, and XML.\n- Note: include a note about what type of device you are adding.\n\n#### Registering an actuator with the OIF\n- Give actuator a name and generate a UUID for it.\n- Select a parent device.\n - Note: device should be registered before the actuator.\n- Upload/Copy-Paste schema. Schema for the default included ISR actuator can be found at [device/actuator/isr/act_server/schema.json](../device/actuator/isr/act_server/schema.json).\n- This information can also be found under the [ISR Actuator](../device/actuator/isr/ReadMe.md) page.\n- If you are registering a new actuator for the first time while utilizing the MQTT transport you may need to update the `MQTT_TOPICS` environment variable. Read the MQTT Topics section [here](transport/mqtt/ReadMe.md)" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 14.666666984558105, "blob_id": "cfd09df86a27f478b2b196923d66f5fed0e0db7e", "content_id": "a3d94b57da564518bc0c4c258ec2a50fbd537800", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 93, "license_type": "permissive", "max_line_length": 32, "num_lines": 6, "path": "/logger/gui/config/eslint_rules/ext_rules.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Extended Rules\n// not enough for their own file\n\nmodule.exports = {\n 'compat/compat': 2\n}" }, { "alpha_fraction": 0.6312752366065979, "alphanum_fraction": 0.6353144645690918, "avg_line_length": 32.32692337036133, "blob_id": "0b4e04af5dfd3131f7b5ae179e27ec0af0437eff", "content_id": "da808a328cac8d28e8139e2a13e776f27ed8c8e1", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1733, "license_type": "permissive", "max_line_length": 117, "num_lines": 52, "path": "/orchestrator/gui/server/gui_server/account/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import serializers\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \"\"\"\n Model Serializer for Users\n \"\"\"\n auth_groups = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ('username', 'password', 'email', 'first_name', 'last_name', 'is_active', 'is_staff', 'auth_groups')\n extra_kwargs = {\n 'password': {'write_only': True},\n 'is_active': {'default': 0, 'write_only': True},\n 'is_staff': {'default': 0, 'write_only': True},\n }\n\n def get_auth_groups(self, obj):\n return [g.name for g in obj.groups.all()]\n\n def create(self, validated_data):\n user = super(UserSerializer, self).create(validated_data)\n if 'password' in validated_data:\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n def update(self, instance, validated_data):\n if 'password' in validated_data:\n password = validated_data.pop('password')\n instance.set_password(password)\n return super(UserSerializer, self).update(instance, validated_data)\n\n\nclass PasswordSerializer(serializers.Serializer):\n \"\"\"\n Serializer for password change endpoint.\n \"\"\"\n old_password = serializers.CharField(required=True)\n new_password_1 = serializers.CharField(required=True)\n new_password_2 = serializers.CharField(required=True)\n\n def validate(self, data):\n if data['new_password_1'] != data['new_password_2']:\n raise serializers.ValidationError(\"New Passwords do not match\")\n return data\n" }, { "alpha_fraction": 0.6839814186096191, "alphanum_fraction": 0.6848520040512085, "avg_line_length": 34.89583206176758, "blob_id": "68f90caba2b5790490df216d39891c970adc78f9", "content_id": "3abd672b65ea9b3edfd30d4b843eb7a8b2f69326", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3446, "license_type": "permissive", "max_line_length": 130, "num_lines": 96, "path": "/orchestrator/gui/client/src/actions/actuator.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for actuator API\nimport { RSAA } from 'redux-api-middleware'\nimport { withGUIAuth } from './util'\n\nconst str_fmt = require('string-format')\n\n// API Base URL\nconst baseAPI = '/api/actuator'\n\n// Helper Functions\n// None\n\n// API Calls\n// GET - /api/actuator/ - all actuators\nconst GET_ACTUATORS_REQUEST = '@@actuator/GET_ACTUATORS_REQUEST'\nexport const GET_ACTUATORS_SUCCESS = '@@actuator/GET_ACTUATORS_SUCCESS'\nexport const GET_ACTUATORS_FAILURE = '@@actuator/GET_ACTUATORS_FAILURE'\nexport const getActuators = ({page=1, count=10, sort='name', refresh=false}={}) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}?page={page}&length={count}&ordering={sort}', {base: baseAPI, page: page, count: count, sort: sort}),\n method: 'GET',\n headers: withGUIAuth({'Content-Type': 'application/json'}),\n types: [\n GET_ACTUATORS_REQUEST,\n {\n type: GET_ACTUATORS_SUCCESS,\n meta: {\n sort: sort,\n refresh: refresh\n }\n }, GET_ACTUATORS_FAILURE\n ]\n }\n})\n\n// POST - /api/actuator/ - create actuator (name, host, port, protocol, serialization, profile)\nconst CREATE_ACTUATOR_REQUEST = '@@actuator/CREATE_ACTUATOR_REQUEST'\nexport const CREATE_ACTUATOR_SUCCESS = '@@actuator/CREATE_ACTUATOR_SUCCESS'\nexport const CREATE_ACTUATOR_FAILURE = '@@actuator/CREATE_ACTUATOR_FAILURE'\nexport const createActuator = (actuator) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/', {base: baseAPI}),\n method: 'POST',\n headers: withGUIAuth(),\n body: JSON.stringify(actuator),\n types: [\n CREATE_ACTUATOR_REQUEST, CREATE_ACTUATOR_SUCCESS, CREATE_ACTUATOR_FAILURE\n ]\n }\n})\n\n// GET - /api/actuator/{name} - specific actuators\nconst GET_ACTUATOR_REQUEST = '@@actuator/GET_ACTUATOR_REQUEST'\nexport const GET_ACTUATOR_SUCCESS = '@@actuator/GET_ACTUATOR_SUCCESS'\nexport const GET_ACTUATOR_FAILURE = '@@actuator/GET_ACTUATOR_FAILURE'\nexport const getActuator = (actuatorUUID) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{actuator}/', {base: baseAPI, actuator: actuatorUUID}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_ACTUATOR_REQUEST, GET_ACTUATOR_SUCCESS, GET_ACTUATOR_FAILURE\n ]\n }\n})\n\n// PATCH - /api/actuator/{name} - update specific actuator (name, host, port, protocol, serialization, profile)\nconst UPDATE_ACTUATOR_REQUEST = '@@actuator/UPDATE_ACTUATOR_REQUEST'\nexport const UPDATE_ACTUATOR_SUCCESS = '@@actuator/UPDATE_ACTUATOR_SUCCESS'\nexport const UPDATE_ACTUATOR_FAILURE = '@@actuator/UPDATE_ACTUATOR_FAILURE'\nexport const updateActuator = (actuatorUUID, actuator) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{actuator}/', {base: baseAPI, actuator: actuatorUUID}),\n method: 'PATCH',\n headers: withGUIAuth(),\n body: JSON.stringify(actuator),\n types: [\n UPDATE_ACTUATOR_REQUEST, UPDATE_ACTUATOR_SUCCESS, UPDATE_ACTUATOR_FAILURE\n ]\n }\n})\n\n// DELETE - /api/actuator/{name} - delete specific actuator\nconst DELETE_ACTUATOR_REQUEST = '@@actuator/DELETE_ACTUATOR_REQUEST'\nexport const DELETE_ACTUATOR_SUCCESS = '@@actuator/DELETE_ACTUATOR_SUCCESS'\nexport const DELETE_ACTUATOR_FAILURE = '@@actuator/DELETE_ACTUATOR_FAILURE'\nexport const deleteActuator = (actuatorUUID) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{actuator}/', {base: baseAPI, actuator: actuatorUUID}),\n method: 'DELETE',\n headers: withGUIAuth(),\n types: [\n DELETE_ACTUATOR_REQUEST, DELETE_ACTUATOR_SUCCESS, DELETE_ACTUATOR_FAILURE\n ]\n }\n})\n" }, { "alpha_fraction": 0.5880058407783508, "alphanum_fraction": 0.5894685387611389, "avg_line_length": 21.538461685180664, "blob_id": "303d234b59467b9bbc83c04cb1a1e76989c89f97", "content_id": "efa40fe5a589ba66afd1e09c5c98d6c09943606a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2051, "license_type": "permissive", "max_line_length": 108, "num_lines": 91, "path": "/orchestrator/gui/client/src/components/account/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\nimport { toast } from 'react-toastify'\n\nimport {\n Button,\n Modal,\n ModalBody,\n ModalFooter,\n ModalHeader\n} from 'reactstrap'\n\nimport { confirmAlert } from 'react-confirm-alert'\nimport 'react-confirm-alert/src/react-confirm-alert.css'\n\nimport {\n ChangePassword\n} from './lib'\n\nimport * as AccountActions from '../../actions/account'\nimport { withGUIAuth } from '../../actions/util'\n\nconst str_fmt = require('string-format')\n\nclass Account extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Account'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n console.log(this.props.match.params.page)\n this.validPages = ['all', 'change_password']\n let page = this.props.match.params.page || 'all'\n\n if (this.validPages.indexOf(page) === -1) {\n page = 'all'\n }\n\n this.state = {\n activeTab: page\n }\n }\n\n toggleTab(tab) {\n if (this.state.activeTab !== tab) {\n this.props.history.push({\n pathname: str_fmt('/account/{tab}', {tab: tab})\n })\n this.setState({\n activeTab: tab\n })\n }\n }\n\n render() {\n let page = null\n switch (this.state.activeTab) {\n case 'change_password':\n page = <ChangePassword />\n break;\n default:\n page = (\n <div className=\"row mx-auto\">\n <h1>Account Options</h1>\n <p>Todo</p>\n </div>\n )\n }\n\n return (\n <div >\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n { page }\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n errors: state.Account.errors,\n siteTitle: state.Util.site_title\n})\n\nexport default connect(mapStateToProps)(Account)\n" }, { "alpha_fraction": 0.6686695218086243, "alphanum_fraction": 0.6789699792861938, "avg_line_length": 29.657894134521484, "blob_id": "aef7c741cb57abdbe007e8530707f9748dd0074c", "content_id": "f6ebe158810b029be0e786c8ca1f5bb27a9fde6d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "permissive", "max_line_length": 107, "num_lines": 38, "path": "/orchestrator/core/orc_server/utils/model.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango Model Utilities\n\"\"\"\nfrom django.contrib import admin\n\n\ndef get_or_none(model, **kwargs):\n \"\"\"\n Get filtered results from the given model\n :param model: model to filter\n :param kwargs: field/value to match\n :return: matching row(s) from the model\n \"\"\"\n base_model = getattr(model, 'objects', model)\n qry = base_model.filter(**kwargs)\n return None if len(qry) == 0 else (qry.first() if len(qry) == 1 else qry)\n\n\nclass ReadOnlyModelAdmin(admin.ModelAdmin):\n \"\"\"\n ModelAdmin class that prevents modifications through the admin.\n The changelist and the detail view work, but a 403 is returned if one actually tries to edit an object.\n Source: https://gist.github.com/aaugustin/1388243\n \"\"\"\n actions = None\n\n def get_readonly_fields(self, request, obj=None):\n return self.fields or [f.name for f in self.model._meta.fields]\n\n def has_add_permission(self, request):\n return False\n\n # Allow viewing objects but not actually changing them.\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n" }, { "alpha_fraction": 0.6401564478874207, "alphanum_fraction": 0.6414602398872375, "avg_line_length": 19.1842098236084, "blob_id": "7813166033a392c3b3dd3d4a265e854b6c128573", "content_id": "4a612136a4ee9a95cb9a76ed967728d0dee58f57", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "permissive", "max_line_length": 51, "num_lines": 38, "path": "/orchestrator/gui/server/gui_server/webApp/templatetags/common.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.template import Library\n\nregister = Library()\n\n\[email protected]_tag\ndef define(val):\n \"\"\"\n Dynamically define a variable within a template\n :param val: value to assign\n :return: value\n \"\"\"\n return val\n\n\[email protected](name='split')\ndef spit(st, tok):\n \"\"\"\n Split a the given string at the given toekn\n :param st: string to split\n :param tok: token to split at\n :return: split string\n \"\"\"\n return st.split(tok)\n\n\[email protected](name='get_idx')\ndef get_idx(lst, idx):\n \"\"\"\n get the item at the given index\n :param lst: iterative object\n :param idx: index to get the item\n :return: item at hte given index\n \"\"\"\n return lst[idx]\n" }, { "alpha_fraction": 0.5842558145523071, "alphanum_fraction": 0.5857933759689331, "avg_line_length": 36.8139533996582, "blob_id": "de9c2bdbbe00d007bb387a56ea65085a7f0cf60b", "content_id": "82ae615e0e96a618423f33cd68a3261a5d5ec693", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6504, "license_type": "permissive", "max_line_length": 183, "num_lines": 172, "path": "/orchestrator/gui/client/src/components/static/nav.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport ReactDOM from 'react-dom'\nimport { connect } from 'react-redux'\nimport qs from 'query-string'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faHome } from '@fortawesome/free-solid-svg-icons'\n\nimport { ThemeChooser } from '../utils'\nimport * as AuthActions from '../../actions/auth'\n\nclass NavItem extends Component {\n constructor(props, context) {\n super(props, context)\n this.external = this.props.external || false\n this.dropdown = this.props.dropdown || false\n }\n\n render() {\n let active = (this.props.href === this.props.active)\n let href = (this.props.href || '').endsWith('/') ? this.props.href : this.props.href + '/'\n\n return (\n <li onClick={ this.external ? () => {} : this.props.click } className={ this.props.liClassName + active ? ' active' : '' } >\n <a href={ href } target={ this.props.target } onClick={ this.external ? () => {} : (e) => { e.preventDefault() } } className={ this.dropdown ? 'dropdown-item' : 'nav-link' } >\n { this.props.icon ? <FontAwesomeIcon icon={ this.props.icon } size='lg' /> : '' } { this.props.text }\n </a>\n </li>\n );\n }\n}\n\nclass Nav extends Component {\n constructor(props, context) {\n super(props, context)\n let act = (this.props.history.location.pathname === this.prefix)\n this.topNav = null\n this.bottomNav = null\n this.navigate = this.navigate.bind(this)\n this.setSize = this.setSize.bind(this)\n\n this.themeOptionStyles = {\n position: 'fixed',\n bottom: '5px',\n right: '5px'\n }\n\n this.state = {\n active: (act ? '/' : this.props.history.location.pathname),\n }\n }\n\n setSize() {\n setTimeout(() => {\n if (!this.topNav || !this.bottomNav) { return; }\n let topHeight = this.topNav.getBoundingClientRect().height\n let bottomHeight = this.bottomNav.getBoundingClientRect().height\n\n this.bottomNav.style.marginTop = topHeight + 'px'\n document.body.style.paddingTop = (topHeight + bottomHeight + 10) + 'px'\n }, 30)\n }\n\n navigate(e) {\n e.preventDefault()\n if (e.target.href === null || e.target.href === undefined ) { return }\n let href = e.target.href.replace(window.location.origin, '')\n let query = {}\n\n this.props.history.push({\n pathname: href,\n search: qs.stringify(query)\n })\n\n this.setState({ active: href })\n }\n\n NavTop() {\n return (\n <nav className=\"navbar navbar-light bg-light border-0 fixed-top\" ref={ (elm) => this.topNav = elm} >\n <div className=\"container-fluid\">\n <a href=\"/\" className=\"navbar-brand\" onClick={ this.navigate }>{ this.props.site_title }</a>\n <p className=\"navbar-text float-right m-0 p-0\">OpenC2 Orchestrator<br/>GUI Prototype</p>\n </div>\n </nav>\n )\n }\n\n NavBottom() {\n return (\n <nav className=\"navbar navbar-expand-lg navbar-dark bg-primary fixed-top\" ref={ (elm) => this.bottomNav = elm}>\n <div className=\"container-fluid\">\n <button className=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\"#navMain\" aria-controls=\"navMain\" aria-expanded=\"false\" aria-label=\"Toggle navigation\">\n <span className=\"navbar-toggler-icon\"></span>\n </button>\n <div className=\"collapse navbar-collapse\" id=\"navMain\">\n <ul className=\"nav navbar-nav mr-auto\">\n <NavItem href=\"/\" text=\"Home\" active={ this.state.active } click={ this.navigate }/>\n\n {/* <NavItem href=\"/orchestrator\" text=\"Orchestrators\" active={ this.state.active } click={ this.navigate.bind(this) }/> */}\n\n <NavItem href=\"/device\" text=\"Devices\" active={ this.state.active } click={ this.navigate }/>\n\n <NavItem href=\"/actuator\" text=\"Actuators\" active={ this.state.active } click={ this.navigate }/>\n\n <NavItem href=\"/command\" text=\"Commands\" active={ this.state.active } click={ this.navigate }/>\n\n <NavItem href=\"/command/generate\" text=\"Command Generator\" active={ this.state.active } click={ this.navigate }/>\n </ul>\n <ul className=\"nav navbar-nav ml-auto\">\n <li className=\"nav-item dropdown\">\n <a className=\"nav-link dropdown-toggle\" data-toggle=\"dropdown\" href=\"#\" role=\"button\" aria-haspopup=\"true\" aria-expanded=\"false\">Hello, { this.props.username }</a>\n <ul className=\"dropdown-menu dropdown-menu-right\">\n {\n this.props.admin ? (\n <NavItem dropdown external href=\"/admin\" text=\"Admin\" target=\"_blank\" active={ this.state.active } />\n ) : ''\n }\n {/*\n this.props.admin ? (\n <NavItem dropdown href=\"/admin\" text=\"Admin\" active={ this.state.active } click={ this.navigate }/>\n ) : ''\n */}\n {/* <a className=\"dropdown-item\" href=\"/preferences/\" >Site Preferences</a> */}\n {/* if user.preferences\n <li><a className=\"dropdown-item\" href=\"/account/preferences/\" >User Preferences</a></li>\n endif */}\n <NavItem dropdown href=\"/account/change_password/\" text=\"Change Password\" active={ this.state.active } click={ this.navigate }/>\n\n <li className=\"dropdown-divider\" />\n\n <NavItem dropdown href=\"/logout\" text=\"Logout\" active={ this.state.active } click={ this.navigate }/>\n </ul>\n </li>\n </ul>\n </div>\n </div>\n </nav>\n );\n }\n\n render() {\n if (this.props.isAuthenticated) {\n this.setSize()\n return (\n <div className=\"navbar-double fixed-top\">\n { this.NavTop() }\n { this.NavBottom() }\n <div style={ this.themeOptionStyles }>\n <ThemeChooser size='sm' change={ this.setSize } />\n </div>\n </div>\n )\n } else {\n return (<div></div>)\n }\n }\n}\n\nconst mapStateToProps = (state) => ({\n errors: state.Auth.errors,\n isAuthenticated: AuthActions.isAuthenticated(state.Auth),\n username: state.Auth.access == undefined ? 'User' : state.Auth.access.username,\n admin: state.Auth.access ? state.Auth.access.admin : false,\n site_title: state.Util.site_title\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n logout: () => dispatch(AuthActions.logout())\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Nav)\n" }, { "alpha_fraction": 0.555343508720398, "alphanum_fraction": 0.5576335787773132, "avg_line_length": 23.952381134033203, "blob_id": "64fd933fbc36eb93194c794c5b14b163448b12ff", "content_id": "cf14453eb60749450878e5eca1384e9d559d60bb", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2620, "license_type": "permissive", "max_line_length": 122, "num_lines": 105, "path": "/orchestrator/gui/client/src/components/device/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\n\nimport {\n Button,\n Modal,\n ModalBody,\n ModalFooter,\n ModalHeader\n} from 'reactstrap'\n\nimport {\n DeviceModal\n} from './lib'\n\nimport {\n RemotePageTable\n} from '../utils'\n\nimport * as DeviceActions from '../../actions/device'\n\nconst str_fmt = require('string-format')\n\nclass Devices extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Devices'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.tableColumns = [\n {\n text: 'Name',\n dataField: 'name',\n sort: true\n }, {\n text: 'Transport',\n dataField: 'transport',\n formatter: (cell) => ( <span>{ cell.map(t => str_fmt('{serialization} via {protocol}', t)).join(' | ') }</span> ),\n sort: true\n }\n ]\n\n this.editOptions = {\n modal: DeviceModal,\n delete: this.props.deleteDevice\n }\n // this.props.getDevices()\n }\n\n render() {\n return (\n <div className=\"row mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <div className=\"col-12\">\n <div className=\"col-12\">\n { this.props.admin ? <DeviceModal register className=\"float-right\" /> : '' }\n <h1>{ this.props.orchestrator.name } Devices</h1>\n </div>\n\n <RemotePageTable\n keyField='device_id'\n dataKey='Device.devices'\n dataGet={ this.props.getDevices }\n columns={ this.tableColumns }\n editRows\n editOptions={ this.editOptions }\n defaultSort={[\n {\n dataField: 'name',\n order: 'desc'\n },\n {\n dataField: 'transport',\n order: 'desc'\n }\n ]}\n />\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title,\n orchestrator: {\n name: state.Util.name || 'N/A'\n },\n admin: state.Auth.access.admin\n})\n\n\nconst mapDispatchToProps = (dispatch) => ({\n getDevices: (page, sizePerPage, sort) => dispatch(DeviceActions.getDevices(page, sizePerPage, sort)),\n deleteDevice: (dev) => dispatch(DeviceActions.deleteDevice(dev))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Devices)\n" }, { "alpha_fraction": 0.5845070481300354, "alphanum_fraction": 0.591549277305603, "avg_line_length": 14.333333015441895, "blob_id": "f8ad30264351407176f523ce24ed6c6505d47754", "content_id": "affd3787023b7c315a913c2ac4d40be80d30b5a1", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "permissive", "max_line_length": 32, "num_lines": 9, "path": "/orchestrator/transport/https/HTTPS/certs/README.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OpenC2 HTTPS Transport\n\n- Place certs here if generated.\n\n - Flask app cert names:\n ```bash\n server.crt\n server.key\n ```\n " }, { "alpha_fraction": 0.6991314888000488, "alphanum_fraction": 0.6991314888000488, "avg_line_length": 26.32203483581543, "blob_id": "bebcc9aece3ca2657703ba76a7c8a42015480188", "content_id": "725a37ec57160c16b1a621e5584cf77c035d178b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1612, "license_type": "permissive", "max_line_length": 87, "num_lines": 59, "path": "/orchestrator/core/orc_server/backup/views/import_export.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nSave and load data for the Orchestrator\n\"\"\"\nfrom rest_framework import permissions\nfrom rest_framework.parsers import JSONParser, MultiPartParser\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework_files.viewsets import ImportExportModelViewSet\n\n# Local imports\nfrom actuator.models import Actuator, ActuatorSerializer\nfrom device.models import Device, DeviceSerializer\nfrom ..utils import (\n # MsgPack\n MessagePackParser,\n MessagePackRenderer,\n # XLS\n XLSParser,\n XLSRenderer,\n # XLSX\n # XLSXParser,\n # XLSXRenderer,\n # XML\n XMLParser,\n XMLRenderer\n\n)\n\n\nclass ImportExportBase(ImportExportModelViewSet):\n permission_classes = (permissions.IsAdminUser,)\n parser_classes = (MultiPartParser,)\n renderer_classes = (JSONRenderer, MessagePackRenderer, XLSRenderer, XMLRenderer)\n file_content_parser_classes = (JSONParser, MessagePackParser, XLSParser, XMLParser)\n filename = 'Backup'\n\n _removeActions = [\n \"create\",\n \"retrieve\",\n \"update\",\n \"partial_update\",\n \"destroy\",\n \"list\"\n ]\n\n def __init__(self, *args, **kwargs):\n self.filename = self.__class__.__name__.replace(\"ImportExport\", \"\") + \"s\"\n super().__init__(*args, **kwargs)\n\n\nclass ActuatorImportExport(ImportExportBase):\n lookup_field = 'actuator_id'\n queryset = Actuator.objects.order_by('actuator_id')\n serializer_class = ActuatorSerializer\n\n\nclass DeviceImportExport(ImportExportBase):\n lookup_field = 'device_id'\n queryset = Device.objects.order_by('device_id')\n serializer_class = DeviceSerializer\n" }, { "alpha_fraction": 0.5994962453842163, "alphanum_fraction": 0.6042821407318115, "avg_line_length": 34.87349319458008, "blob_id": "f2e395a0ccccf267429cde88477702a0c180d5ed", "content_id": "64ed64df3e968f9eb5bc222f00861627b177a29e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11910, "license_type": "permissive", "max_line_length": 130, "num_lines": 332, "path": "/base/modules/utils/twisted/sb_utils/twisted_tools.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# pylint: disable=C0111,C0103,R0205\n\"\"\"\n# based on:\n# - txamqp-helpers by Dan Siemon <[email protected]> (March 2010)\n# http://git.coverfire.com/?p=txamqp-twistd.git;a=tree\n# - Post by Brian Chandler\n# https://groups.google.com/forum/#!topic/pika-python/o_deVmGondk\n# - Pika Documentation\n# https://pika.readthedocs.io/en/latest/examples/twisted_example.html\nFire up this test application via `twistd -ny twisted_service.py`\nThe application will answer to requests to exchange \"foobar\" and any of the\nrouting_key values: \"request1\", \"request2\", or \"request3\"\nwith messages to the same exchange, but with routing_key \"response\"\nWhen a routing_key of \"task\" is used on the exchange \"foobar\",\nthe application can asynchronously run a maximum of 2 tasks at once\nas defined by PREFETCH_COUNT\n\"\"\"\nimport json\nimport pika\nimport requests\n\nfrom dataclasses import dataclass, field\nfrom pika.adapters import twisted_connection\nfrom twisted.internet import defer, protocol, reactor\nfrom typing import Callable, List, Tuple\n\nfrom sb_utils import (\n ObjectDict,\n safe_json\n)\n\nPREFETCH_COUNT = 2\n\n\n@dataclass\nclass SendMessage:\n exchange: str\n routing_key: str\n message: dict\n headers: dict = field(default_factory=dict)\n\n @property\n def message_bytes(self) -> bytes:\n return json.dumps(self.message).encode()\n\n\n@dataclass\nclass ListenQueue:\n routing_key: str\n callback: Callable\n exchange: str = \"\"\n\n\nclass PikaProtocol(twisted_connection.TwistedProtocolConnection):\n \"\"\"\n The protocol is created and destroyed each time a connection is created and lost\n \"\"\"\n connected: bool = False\n debug: bool = False\n name: str = \"Pika:AMQP:Protocol\"\n factory: 'PikaFactory'\n _channel: pika.adapters.twisted_connection.TwistedChannel\n\n @defer.inlineCallbacks\n def connectionReady(self):\n self._channel = yield self.channel()\n yield self._channel.basic_qos(prefetch_count=PREFETCH_COUNT)\n self.connected = True\n yield self._channel.confirm_delivery()\n for listener in self.factory.read_list:\n yield self.setup_read(listener)\n\n self.send()\n\n @defer.inlineCallbacks\n def read(self, listener: ListenQueue):\n \"\"\"\n Add an exchange to the list of exchanges to read from\n \"\"\"\n if self.connected:\n # Connection is already up. Add the reader\n self.setup_read(listener)\n\n # Send all messages that are queued in the factory\n def send(self):\n \"\"\"\n If connected, send all waiting messages\n \"\"\"\n if self.connected:\n while len(self.factory.queued_messages) > 0:\n sender = self.factory.queued_messages.pop(0)\n self.send_message(sender)\n\n # Do all the work that configures a listener\n @defer.inlineCallbacks\n def setup_read(self, listener: ListenQueue):\n \"\"\"\n This function does the work to read from an exchange\n \"\"\"\n queue = listener.routing_key # For now use the exchange name as the queue name\n consumer_tag = listener.exchange # Use the exchange name for the consumer tag for now\n\n # Declare the exchange in case it doesn't exist\n yield self._channel.exchange_declare(\n exchange=listener.exchange,\n exchange_type=\"topic\",\n durable=True,\n auto_delete=False\n )\n\n # Declare the queue and bind to it\n yield self._channel.queue_declare(\n queue=queue,\n durable=True,\n exclusive=False,\n auto_delete=False\n )\n\n yield self._channel.queue_bind(queue=queue, exchange=listener.exchange, routing_key=listener.routing_key)\n\n # Consume\n (queue, _) = yield self._channel.basic_consume(queue=queue, auto_ack=False, consumer_tag=consumer_tag)\n\n # Now setup the readers\n self._set_queue_read(queue, listener.callback)\n\n def _read_item(self, item, queue, callback: Callable):\n \"\"\"\n Callback function which is called when an item is read\n \"\"\"\n # Setup another read of this queue\n self._set_queue_read(queue, callback)\n\n (channel, method_frame, header_frame, body) = item\n headers = ObjectDict(header_frame.headers)\n body = safe_json(body)\n\n self._log(f\"{method_frame.exchange} ({method_frame.routing_key}): {body}\", system=self.name)\n d = defer.maybeDeferred(callback, headers, body)\n d.addCallbacks(\n lambda _: channel.basic_ack(delivery_tag=method_frame.delivery_tag),\n lambda _: channel.basic_nack(delivery_tag=method_frame.delivery_tag)\n )\n\n @staticmethod\n def _read_item_err(error):\n print(error)\n\n @defer.inlineCallbacks\n def send_message(self, sender: SendMessage):\n \"\"\"\n Send a single message\n \"\"\"\n self._log(f\"{sender.exchange} ({sender.routing_key}): {sender.message}\", system=self.name)\n\n # First declare the exchange just in case it doesn't exist\n if sender.exchange:\n yield self._channel.exchange_declare(\n exchange=sender.exchange,\n exchange_type=\"topic\",\n durable=True,\n auto_delete=False\n )\n\n # First declare the queue just in case it doesn't exist\n yield self._channel.queue_declare(\n queue=sender.routing_key,\n durable=True,\n auto_delete=False\n )\n\n try:\n yield self._channel.basic_publish(\n exchange=sender.exchange or \"\",\n routing_key=sender.routing_key,\n body=sender.message_bytes,\n properties=pika.spec.BasicProperties(\n delivery_mode=2,\n headers=sender.headers\n )\n )\n except Exception as error: # pylint: disable=W0703\n self._log(f\"Error while sending message: {error}\", system=self.name)\n\n def _set_queue_read(self, queue, callback):\n # Now setup the readers\n d = queue.get()\n d.addCallback(self._read_item, queue, callback)\n d.addErrback(self._read_item_err)\n\n def _log(self, msg, **kwargs):\n if self.debug:\n pre = kwargs.pop(\"system\") if \"system\" in kwargs else self.name\n pre = f\"{pre} => \" if pre else \"\"\n\n post = \", \".join([f\"{k}: `{v}`\" for k, v in kwargs.items()])\n post = f\" {{{post}}}\" if post else \"\"\n\n print(f\"{pre}{msg}{post}\")\n\n\nclass PikaFactory(protocol.ReconnectingClientFactory):\n name = \"Pika:AMQP:Factory\"\n client: PikaProtocol = None # The protocol instance\n connected: bool = False\n protocol = PikaProtocol\n\n # Helper Vars\n debug: bool = False\n queued_messages: List[SendMessage]\n read_list: List[ListenQueue]\n\n def __init__(self, host: str = None, port: int = None, vhost: str = None, creds: Tuple[str, str] = None, debug: bool = False):\n self.host = host or 'localhost'\n self.port = port or 5672\n self.vhost = vhost or '/'\n self.creds = creds or (\"guest\", \"guest\")\n self.debug = debug\n\n self.queued_messages = [] # List of messages waiting to be sent\n self.read_list = [] # List of queues to listen on\n\n # Make the TCP connection\n reactor.connectTCP(self.host, self.port, self) # pylint: disable=no-member\n\n def buildProtocol(self, addr):\n parameters = pika.ConnectionParameters(\n host=self.host,\n port=self.port,\n virtual_host=self.vhost,\n credentials=pika.PlainCredentials(self.creds[0], self.creds[1])\n )\n p = self.protocol(parameters)\n p.factory = self # Tell the protocol about this factory\n p.debug = self.debug # Tell the protocol about this factory's debug status\n\n self.client = p # Store the protocol\n\n # Reset the reconnection delay since we're connected now\n self._log(\"Connected\", system=self.name)\n self.connected = True\n self.resetDelay()\n\n return p\n\n def startedConnecting(self, connector):\n self._log(\"Started to connect\", system=self.name)\n\n def clientConnectionFailed(self, connector, reason):\n self._log(f\"Connection failed. Reason: {reason.value}\", system=self.name)\n protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)\n\n def clientConnectionLost(self, connector, reason): # pylint: disable=W0221\n self._log(f\"Lost connection. Reason: {reason.value}\", system=self.name)\n protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)\n\n def send_message(self, exchange=None, routing_key=None, message=None, headers=None):\n sender = SendMessage(exchange=exchange, routing_key=routing_key, message=message, headers=headers)\n self.queued_messages.append(sender)\n if self.client is not None:\n self.client.send()\n\n def read_messages(self, exchange, routing_key, callback):\n \"\"\"\n Configure an exchange to be read from\n \"\"\"\n listen = ListenQueue(exchange=exchange, routing_key=routing_key, callback=callback)\n self.read_list.append(listen)\n if self.client is not None:\n self.client.read(listen)\n\n def get_exchanges(self):\n \"\"\"\n Get a list of exchange names on the queue\n :return: list of exchange names\n \"\"\"\n try:\n vhost = self.vhost[1:] if self.vhost.startswith(\"/\") else self.vhost\n url = f\"http://{self.host}:15672/api/exchanges/{vhost}?columns=name\"\n kwargs = dict(auth=self.creds) if self.creds else {}\n response = requests.get(url, **kwargs).json()\n return list(filter(None, [que.get(\"name\", \"\") for que in response]))\n except Exception: # pylint: disable=broad-except\n return []\n\n def get_queues(self):\n \"\"\"\n Get a list of queue names on the queue\n :return: list of queue names\n \"\"\"\n try:\n vhost = self.vhost[1:] if self.vhost.startswith(\"/\") else self.vhost\n url = f\"http://{self.host}:15672/api/queues/{vhost}?columns=name\"\n kwargs = dict(auth=self.creds) if self.creds else {}\n response = requests.get(url, **kwargs).json()\n return list(filter(None, [que.get(\"name\", \"\") for que in response]))\n except Exception: # pylint: disable=broad-except\n return []\n\n def get_binds(self):\n \"\"\"\n Get a list of exchange/topic bindings\n :return: list of exchange/topic bindings\n \"\"\"\n try:\n binds = []\n vhost = self.vhost[1:] if self.vhost.startswith(\"/\") else self.vhost\n url = f\"http://{self.host}:15672/api/bindings/{vhost}\"\n kwargs = dict(auth=self.creds) if self.creds else {}\n response = requests.get(url, **kwargs).json()\n for queue in self.get_queues():\n for bind in response:\n # for bind in manager.get_queue_bindings(vhost=\"/\", qname=queue):\n if bind.get(\"vhost\") == self.vhost and bind.get(\"destination\") == queue: # and\n binds.append({\n \"exchange\": bind.get(\"source\", \"\"),\n \"routing_key\": bind.get(\"routing_key\", \"\")\n })\n return binds\n except Exception: # pylint: disable=broad-except\n return []\n\n def _log(self, msg, **kwargs):\n if self.debug:\n pre = kwargs.pop(\"system\") if \"system\" in kwargs else self.name\n pre = f\"{pre} => \" if pre else \"\"\n\n post = \", \".join([f\"{k}: `{v}`\" for k, v in kwargs.items()])\n post = f\" {{{post}}}\" if post else \"\"\n\n print(f\"{pre}{msg}{post}\")\n" }, { "alpha_fraction": 0.5933962464332581, "alphanum_fraction": 0.6103773713111877, "avg_line_length": 24.85365867614746, "blob_id": "9c991e4e7b6ce7c42f49134f864f78ae26172abd", "content_id": "2549a7d8fffe010431af0aeef9a4be1ef454f437", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1060, "license_type": "permissive", "max_line_length": 100, "num_lines": 41, "path": "/orchestrator/gui/client/src/reducers/util.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import * as util from '../actions/util'\n\nimport {\n titleCase\n} from '../components/utils'\n\nconst initialState = {\n site_title: 'Orchestrator',\n name: 'Orchestrator',\n message: 'MESSAGE',\n id: '123456789',\n protocols: [],\n serializations: []\n}\n\nexport default (state=initialState, action=null) => {\n switch(action.type) {\n case util.INFO_SUCCESS:\n return {\n site_title: titleCase(action.payload.name.toLowerCase() || 'Orchestrator'),\n name: titleCase(action.payload.name || 'Orchestrator'),\n message: action.payload.message || 'MESSAGE',\n id: action.payload.id || '123456789',\n protocols: action.payload.protocols || [],\n serializations: action.payload.serializations || []\n }\n\n case util.INFO_FAILURE:\n console.log('Utils Failure', action.type, action)\n return {\n ...state,\n errors: {\n ...state.errors,\n [action.type]: action.payload.response || {'non_field_errors': action.payload.statusText},\n }\n }\n\n default:\n return state\n }\n}\n" }, { "alpha_fraction": 0.607937216758728, "alphanum_fraction": 0.6236371397972107, "avg_line_length": 35.39682388305664, "blob_id": "bdda694374ed247516549aff37ca7ed265fbd3bd", "content_id": "7593489ba2f1391beec688deeda87f6450af7771", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2293, "license_type": "permissive", "max_line_length": 118, "num_lines": 63, "path": "/orchestrator/transport/coap/COAP/coap_server.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import os\n\nfrom coapthon import defines\nfrom coapthon.resources.resource import Resource\nfrom coapthon.server.coap import CoAP\n\nfrom sb_utils import decode_msg, encode_msg, Producer\n\n\nclass TransportResource(Resource):\n def __init__(self, name=\"TransportResource\", coap_server=None):\n super(TransportResource, self).__init__(name, coap_server, visible=True, observable=True, allow_children=True)\n\n def render_POST_advanced(self, request, response):\n # retrieve Content_type stored as dict of types:values (ex. \"application/json\": 50)\n encoding = [k for k, v in defines.Content_types.items() if v == request.content_type]\n encoding = \"json\" if len(encoding) != 1 else encoding[0].split(\"/\")[1]\n\n # read custom options added for O.I.F. and retrieve them based on their number\n # opts = {o.name: o.value for o in request.options}\n\n # Create headers for the orchestrator from the request\n headers = dict(\n correlationID=f\"{request.mid:x}\",\n socket=(request.source[0] + \":\" + str(request.source[1])),\n encoding=encoding,\n transport=\"coap\",\n # orchestratorID=\"orchid1234\", # orchestratorID is currently an unused field, this is a placeholder\n )\n\n # Send response back to Orchestrator\n producer = Producer(os.environ.get(\"QUEUE_HOST\", \"localhost\"), os.environ.get(\"QUEUE_PORT\", \"5672\"))\n producer.publish(\n message=decode_msg(request.payload, encoding),\n headers=headers,\n exchange=\"orchestrator\",\n routing_key=\"response\"\n )\n\n # build and send response\n response.payload = encode_msg({\n \"status\": 200,\n \"status_text\": \"received\"\n }, encoding)\n response.code = defines.Codes.CONTENT.number\n return self, response\n\n\nclass CoAPServer(CoAP):\n def __init__(self, host, port):\n CoAP.__init__(self, (host, port))\n self.add_resource(\"transport/\", TransportResource())\n\n\nif __name__ == \"__main__\":\n server = CoAPServer(\"0.0.0.0\", 5683)\n try:\n print(\"Server listening on 0.0.0.0:5683\")\n server.listen(10)\n except KeyboardInterrupt:\n print(\"Server Shutdown\")\n server.close()\n print(\"Exiting...\")\n" }, { "alpha_fraction": 0.52163165807724, "alphanum_fraction": 0.550061821937561, "avg_line_length": 25.09677505493164, "blob_id": "bc1c1e0c78b054e483fc534bde5daa7fc652d49b", "content_id": "1d93dfc30adc46df66206423c79d217175a6fff8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "permissive", "max_line_length": 73, "num_lines": 31, "path": "/orchestrator/core/orc_server/device/migrations/0005_auto_20191220_1353.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-20 13:53\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('device', '0004_remove_device_multi_actuator'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='transport',\n name='exchange',\n ),\n migrations.RemoveField(\n model_name='transport',\n name='routing_key',\n ),\n migrations.AddField(\n model_name='transport',\n name='channel',\n field=models.CharField(default='routing_key', max_length=30),\n ),\n migrations.AddField(\n model_name='transport',\n name='topic',\n field=models.CharField(default='topic', max_length=30),\n ),\n ]\n" }, { "alpha_fraction": 0.6163890361785889, "alphanum_fraction": 0.6172894835472107, "avg_line_length": 33.16923141479492, "blob_id": "91b7a8eefa9bdd9b5cfaf585e319a7aa81560b21", "content_id": "c4d2a904d9f2a3d7ba8a1d84e6730bf75c50ee70", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "permissive", "max_line_length": 104, "num_lines": 65, "path": "/orchestrator/core/orc_server/command/processors.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import random\n\n# Local imports\nfrom actuator.models import Actuator\nfrom orchestrator.models import Protocol\nfrom tracking import log\nfrom utils import decode_msg, get_or_none, isHex, safe_cast\nfrom .models import SentHistory, ResponseHistory\n\n\ndef command_response(body, message):\n \"\"\"\n Process a command received from an actuator\n :param body: command body\n :param message: complete message (headers, meta, ...)\n :return: None\n \"\"\"\n log.info(msg=f'Message response received: {body}')\n headers = getattr(message, \"headers\", {})\n actuator = None\n\n if headers.get('error', False):\n correlation_ID = headers['source'].get('correlationID', '')\n opts = {\n '_coap_id' if isHex(correlation_ID) else 'command_id': correlation_ID\n }\n\n command = get_or_none(SentHistory, **opts)\n log.error(msg=f'Message Failure: cmd - {command.command_id}, {body}')\n\n response = {\n 'error': body\n }\n\n else:\n act_host, act_port = headers.get('socket', '').split(':')[0:2]\n correlation_ID = headers.get('correlationID', '')\n opts = {\n '_coap_id' if isHex(correlation_ID) else 'command_id': correlation_ID\n }\n\n command = get_or_none(SentHistory, **opts)\n profile = headers.get('profile', '')\n\n encode = headers.get('encode', 'json')\n response = decode_msg(body, encode)\n\n actuator = get_or_none(\n model=Actuator,\n profile__iexact=profile,\n device__transport__host__iexact=act_host,\n device__transport__port=safe_cast(act_port, int),\n device__transport__protocol=get_or_none(Protocol, name__iexact=headers.get('transport', ''))\n )\n\n if hasattr(actuator, '__iter__'):\n log.warn(msg=f'Multiple actuators match for command response - {command.command_id}')\n actuator = random.choice(actuator)\n\n try:\n cmd_rsp = ResponseHistory(command=command, actuator=actuator, response=response)\n cmd_rsp.save()\n # TODO: change to more specific exceptions\n except Exception as e: # pylint: disable=broad-except\n log.error(msg=f'Message response failed to save: {e}')\n" }, { "alpha_fraction": 0.7459853887557983, "alphanum_fraction": 0.7459853887557983, "avg_line_length": 28.782608032226562, "blob_id": "be7a7950154f0b87cc9f952798219cddf44975e1", "content_id": "177a655e42c703849f2271374d788ea05bdf20d9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "permissive", "max_line_length": 83, "num_lines": 23, "path": "/orchestrator/core/orc_server/tracking/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from rest_framework import permissions, viewsets\n\nfrom ..models import EventLog, EventLogSerializer, RequestLog, RequestLogSerializer\n\n\nclass RequestLogViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAdminUser, )\n serializer_class = RequestLogSerializer\n\n queryset = RequestLog.objects.order_by('-requested_at')\n\n\nclass EventLogViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAdminUser, )\n serializer_class = EventLogSerializer\n\n queryset = EventLog.objects.order_by('-occurred_at')\n" }, { "alpha_fraction": 0.6735395193099976, "alphanum_fraction": 0.6735395193099976, "avg_line_length": 21.384614944458008, "blob_id": "2b00ee500745a62acc51e7c3e7cfd41aab20a717", "content_id": "e0ac50b21baffc369c8b1bd77a128fe592692146", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/orchestrator/core/orc_server/es_mirror/decorators.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.db.models import Model\nfrom elasticsearch_dsl import Document\n\nfrom .apps import ES_Hooks\n\n\ndef ElasticModel(doc: Document) -> Model:\n def wrapper(model: Model) -> Model:\n if ES_Hooks:\n ES_Hooks.add_model(model, doc)\n\n return model\n return wrapper\n" }, { "alpha_fraction": 0.6152828335762024, "alphanum_fraction": 0.6192523837089539, "avg_line_length": 29.836734771728516, "blob_id": "7b60b3d35ef64deafecd9df35af3775e63f69d18", "content_id": "fb094f3629a10798a1674ab684fd3b628b84585c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3023, "license_type": "permissive", "max_line_length": 136, "num_lines": 98, "path": "/orchestrator/gui/client/src/actions/generate.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for client side generate page\nimport { RSAA } from 'redux-api-middleware';\nimport { withGUIAuth } from './util'\n\nconst str_fmt = require('string-format')\n\n// Helper Functions\n// N/A - N/A - set schema locally\nconst SCHEMA_DEFINE = '@@generate/SCHEMA_DEFINE';\nexport const SCHEMA_SUCCESS = '@@generate/SCHEMA_SUCCESS';\nexport const SCHEMA_FAILURE = '@@generate/SCHEMA_FAILURE';\nexport const setSchema = (schema) => ({\n [RSAA]: {\n endpoint: '',\n method: 'OPTIONS',\n types: [\n SCHEMA_DEFINE,\n {\n type: SCHEMA_SUCCESS,\n meta: {\n schema: schema\n }\n },\n SCHEMA_FAILURE\n ]\n }\n})\n\n// API Calls\n// GET - /api/actuator?fields=actuator_id,name,profile,device - get base info of all actuators\nconst ACTUATOR_INFO_REQUEST = '@@generate/ACTUATOR_INFO_REQUEST'\nexport const ACTUATOR_INFO_SUCCESS = '@@generate/ACTUATOR_INFO_SUCCESS'\nexport const ACTUATOR_INFO_FAILURE = '@@generate/ACTUATOR_INFO_FAILURE'\nexport const actuatorInfo = (fields=['actuator_id', 'name', 'profile', 'device'], page=1, count=10) => ({\n [RSAA]: {\n endpoint: str_fmt('/api/actuator?fields={fields}&page={page}&length={count}', {fields: fields.join(','), page: page, count: count}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n ACTUATOR_INFO_REQUEST,\n {\n type: ACTUATOR_INFO_SUCCESS,\n meta: {\n fields: fields,\n page: count === 10 ? 1 : ++page,\n count: count\n }\n },\n ACTUATOR_INFO_FAILURE\n ]\n }\n})\n\n// GET - /api/actuator?fields=actuator_id,name,profile - get base info of all actuators\nconst ACTUATOR_SELECT_REQUEST = '@@generate/ACTUATOR_SELECT_REQUEST'\nexport const ACTUATOR_SELECT_SUCCESS = '@@generate/ACTUATOR_SELECT_SUCCESS'\nexport const ACTUATOR_SELECT_FAILURE = '@@generate/ACTUATOR_SELECT_FAILURE'\nexport const actuatorSelect = (actUUID, type='actuator') => ({\n [RSAA]: {\n endpoint: str_fmt('/api/actuator/{act}/?fields=schema,profile', {act: actUUID}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n ACTUATOR_SELECT_REQUEST,\n {\n type: ACTUATOR_SELECT_SUCCESS,\n meta: {\n type: type\n }\n },\n ACTUATOR_SELECT_FAILURE\n ]\n }\n})\n\n// GET - /api/device?fields=device_id.name - get base info of all devices\nconst DEVICE_INFO_REQUEST = '@@generate/DEVICE_INFO_REQUEST'\nexport const DEVICE_INFO_SUCCESS = '@@generate/DEVICE_INFO_SUCCESS'\nexport const DEVICE_INFO_FAILURE = '@@generate/DEVICE_INFO_FAILURE'\nexport const deviceInfo = (fields=['device_id', 'name', 'transport'], page=1, count=10) => ({\n [RSAA]: {\n endpoint: str_fmt('/api/device?fields={fields}&page={page}&length={count}', {fields: fields.join(','), page: page, count: count}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n DEVICE_INFO_REQUEST,\n {\n type: DEVICE_INFO_SUCCESS,\n meta: {\n fields,\n page: count === 10 ? 1 : ++page,\n count\n }\n },\n DEVICE_INFO_FAILURE\n ]\n }\n})\n\n" }, { "alpha_fraction": 0.7238805890083313, "alphanum_fraction": 0.7238805890083313, "avg_line_length": 23.363636016845703, "blob_id": "745027a06b7c7e33ade33189e2921cb05c7e0982", "content_id": "6444e0dc4f147e9a454c2ed05a11ad1f22580e2a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "permissive", "max_line_length": 52, "num_lines": 11, "path": "/orchestrator/core/orc_server/orchestrator/views/gui.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import reverse\n\n\ndef gui_redirect(request):\n \"\"\"\n GUI redirect to API\n :param request: request instance\n :return: HTTP Redirect\n \"\"\"\n return HttpResponseRedirect(reverse('api.root'))\n" }, { "alpha_fraction": 0.538297176361084, "alphanum_fraction": 0.5514902472496033, "avg_line_length": 35.56640625, "blob_id": "71fb1e5d63c1662e57dfb7e12681e6d6fb0dc023", "content_id": "8fc422dd5ee1f24763fe1745a7cd150fb1e68846", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18722, "license_type": "permissive", "max_line_length": 129, "num_lines": 512, "path": "/base/modules/utils/root/sb_utils/message/pysmile/decode.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nSMILE Decode\n\"\"\"\nimport json\nimport logging\nimport struct\n\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import (\n Callable,\n Dict,\n List,\n Union\n)\n\nfrom . import util\nfrom .constants import (\n BYTE_MARKER_END_OF_CONTENT,\n BYTE_MARKER_END_OF_STRING,\n HEADER_BIT_HAS_RAW_BINARY,\n HEADER_BIT_HAS_SHARED_NAMES,\n HEADER_BIT_HAS_SHARED_STRING_VALUES,\n HEADER_BIT_VERSION,\n HEADER_BYTE_1,\n HEADER_BYTE_2,\n HEADER_BYTE_3,\n INT_MISC_BINARY_7BIT,\n INT_MISC_BINARY_RAW,\n NULL_BIT,\n TOKEN_BYTE_FLOAT_32,\n TOKEN_BYTE_FLOAT_64,\n TOKEN_LITERAL_EMPTY_STRING,\n TOKEN_LITERAL_END_ARRAY,\n TOKEN_LITERAL_END_OBJECT,\n TOKEN_LITERAL_FALSE,\n TOKEN_LITERAL_NULL,\n TOKEN_LITERAL_START_ARRAY,\n TOKEN_LITERAL_START_OBJECT,\n TOKEN_LITERAL_TRUE,\n TOKEN_MISC_LONG_TEXT_ASCII,\n TOKEN_MISC_LONG_TEXT_UNICODE,\n TOKEN_PREFIX_FP,\n TOKEN_PREFIX_INTEGER,\n TOKEN_PREFIX_KEY_ASCII,\n TOKEN_PREFIX_KEY_SHARED_LONG,\n TOKEN_PREFIX_KEY_SHARED_SHORT,\n TOKEN_PREFIX_KEY_UNICODE,\n TOKEN_PREFIX_SHARED_STRING_LONG,\n TOKEN_RESERVED\n)\n\nlog = logging.getLogger()\nif not log.handlers:\n log.addHandler(logging.NullHandler())\n\n\nclass SMILEDecodeError(Exception):\n pass\n\n\nclass DecodeMode(Enum):\n HEAD = 0 # Waiting for magic header :)\n ROOT = 1 # Waiting for Root object\n ARRAY = 2 # In array context\n VALUE = 3 # Waiting for value\n KEY = 4 # Waiting for key\n DONE = 5 # Done -- remain here until reset\n BAD = 6 # Got a data error -- remain here until reset\n\n\n@dataclass\nclass SmileHeader:\n version: int\n raw_binary: bool = True\n shared_keys: bool = True\n shared_values: bool = True\n\n\nclass SmileDecoder:\n input: bytearray\n output: list # List[...]\n mode: DecodeMode\n error: str\n index: int\n nested_depth: int\n in_array: List[bool]\n first_array_element: List[bool]\n first_key: List[bool]\n header: SmileHeader\n shared_key_strings: List[str]\n shared_value_strings: List[str]\n _decoders: Dict[DecodeMode, Callable]\n\n def __init__(self, smile: Union[bytes, str] = None):\n self._init(smile)\n self._decoders = {\n DecodeMode.HEAD: self._decode_header,\n DecodeMode.ROOT: self._decode_RAV,\n DecodeMode.ARRAY: self._decode_RAV,\n DecodeMode.VALUE: self._decode_RAV,\n DecodeMode.KEY: self._decode_key\n }\n\n def _init(self, smile: Union[bytes, str] = None) -> None:\n # Input\n if smile:\n smile = smile if isinstance(smile, bytes) else bytes(smile, \"UTF-8\")\n self.input = bytearray(smile)\n else:\n self.input = bytearray()\n\n # Output\n self.output = []\n\n # Current Decoder State\n self.mode = DecodeMode.HEAD\n\n # Error message\n self.error = None\n\n # Current read index\n self.index = 0\n\n # current nest level\n self.nested_depth = 0\n\n # true if in array context\n self.in_array = [False] * 30\n\n # true if the next token is the first value of an array (used for printing)\n self.first_array_element = [False] * 30\n\n # true if the next token is the first key of an object (used for printing)\n self.first_key = [False] * 30\n\n # smile header\n self.header = None\n\n # Cached Keys for back references\n self.shared_key_strings = []\n\n # Cached Values for back references\n self.shared_value_strings = []\n\n @staticmethod\n def _escape(o: Union[bytearray, bytes, str]) -> str:\n o = o.decode(\"utf-8\") if isinstance(o, (bytearray, bytes)) else o\n return o.replace(\"\\n\", r\"\\n\").replace(\"\\\"\", r\"\\\"\")\n\n def get_value(self) -> str: # bytes:\n return \"\".join(map(str, self.output))\n\n # Decoding methods\n def copy_key_string(self, n: int = 0) -> None:\n key_str = self._escape(self.input[self.index:self.index + n])\n if self.header.shared_keys:\n self.save_key_string(key_str)\n self.write(f\"\\\"{key_str}\\\":\")\n self.index += n\n\n def copy_shared_key_string(self) -> None:\n if not self.header.shared_keys:\n raise SMILEDecodeError(\"Cannot lookup shared key, sharing disabled!\")\n try:\n sh_str = self.shared_key_strings[self.input[self.index - 1] - 0x40]\n except IndexError:\n log.debug(\"self.index: %d\", self.index)\n log.debug(\"self.shared_key_strings: %s\", self.shared_key_strings)\n else:\n self.write(f\"\\\"{sh_str}\\\":\")\n\n def copy_shared_value_string(self) -> None:\n if not self.header.shared_values:\n raise SMILEDecodeError(\"Cannot lookup shared value, sharing disabled!\")\n try:\n svr = self.shared_value_strings[self.input[self.index - 1] - 1]\n except IndexError:\n log.debug(\"self.index: %d\", self.index)\n log.debug(\"self.shared_value_strings: %s\", self.shared_value_strings)\n else:\n self.write(f\"\\\"{svr}\\\"\")\n\n def copy_value_string(self, n: int = 0) -> None:\n val_str = self._escape(self.input[self.index:self.index + n])\n if self.header.shared_values:\n self.save_value_string(val_str)\n self.write(f\"\\\"{val_str}\\\"\")\n self.index += n\n\n def copy_variable_length_string(self) -> None:\n i = self.input.index(b\"\\xfc\", self.index)\n self.write(f\"\\\"{self._escape(self.input[self.index:i])}\\\"\")\n self.index = i + 1\n\n def pull_bits(self, n: int) -> bytes:\n ret_s = b\"\"\n for _ in range(abs(n)):\n byt = self.pull_byte()\n if byt is None:\n break\n ret_s += bytes([byt])\n return ret_s\n\n def pull_byte(self) -> Union[int, None]:\n if len(self.input) > self.index:\n rtn = self.input[self.index]\n self.index += 1\n return rtn\n self.mode = DecodeMode.DONE\n return None\n\n def save_key_string(self, key_str: str) -> None:\n log.debug(\"key_str: %s\", key_str)\n self.shared_key_strings.append(key_str)\n\n def save_value_string(self, val_str: str) -> None:\n log.debug(\"val_str: %s\", val_str)\n self.shared_value_strings.append(val_str)\n\n def varint_decode(self) -> int:\n smile_zzvarint_decode = 0\n tmp = self.input[self.index:]\n for _, ch in enumerate(tmp):\n self.index += 1\n if ch & 0x80:\n smile_zzvarint_decode <<= 6\n smile_zzvarint_decode |= (ch & 0x3F)\n break\n smile_zzvarint_decode <<= 7\n smile_zzvarint_decode |= ch\n return smile_zzvarint_decode\n\n def write(self, *args) -> None:\n if args:\n self.output.extend(args)\n\n def zzvarint_decode(self) -> None:\n self.write(util.zigzag_decode(self.varint_decode()))\n\n # Actual decoding\n def _decode_header(self) -> None:\n head = self.pull_bits(3)\n if not (head and head.startswith(HEADER_BYTE_1 + HEADER_BYTE_2 + HEADER_BYTE_3)):\n self.mode = DecodeMode.BAD\n self.error = \"Invalid Header!\"\n return\n self.mode = DecodeMode.ROOT\n features = self.pull_byte()\n self.header = SmileHeader(\n version=features & HEADER_BIT_VERSION,\n raw_binary=bool((features & HEADER_BIT_HAS_RAW_BINARY) >> 2),\n shared_keys=bool(features & HEADER_BIT_HAS_SHARED_NAMES),\n shared_values=bool((features & HEADER_BIT_HAS_SHARED_STRING_VALUES) >> 1)\n )\n\n def _decode_RAV(self) -> None:\n byt = self.pull_byte()\n if byt is None:\n log.debug(\"No bytes left to read!\")\n self.mode = DecodeMode.DONE\n return\n log.debug(\"Pulled Byte: 0x{:x}\".format(byt))\n\n if self.in_array[self.nested_depth]:\n if self.first_array_element[self.nested_depth]:\n self.first_array_element[self.nested_depth] = False\n elif byt != TOKEN_LITERAL_END_ARRAY:\n self.write(\",\")\n\n if byt == NULL_BIT:\n log.debug(\"Token: Null Bit (skip)\")\n elif 0x01 <= byt <= 0x1F:\n log.debug(\"Token: Shared Value String\")\n self.copy_shared_value_string()\n elif TOKEN_LITERAL_EMPTY_STRING <= byt <= TOKEN_LITERAL_TRUE:\n # Simple literals, numbers\n msg, b = {\n TOKEN_LITERAL_EMPTY_STRING: (\"Token: Empty String\", \"\\\"\\\"\"),\n TOKEN_LITERAL_NULL: (\"Token: Literal Null\", \"null\"),\n TOKEN_LITERAL_FALSE: (\"Token: Literal False\", \"false\"),\n TOKEN_LITERAL_TRUE: (\"Token: Literal True\", \"true\"),\n }.get(byt)\n log.debug(msg)\n self.write(b)\n elif TOKEN_PREFIX_INTEGER <= byt < TOKEN_PREFIX_FP:\n # Integral numbers\n log.debug(\"Token: Integral Numbers\")\n smile_value_length = byt & 0x03\n if smile_value_length < 2:\n self.zzvarint_decode()\n elif smile_value_length == 2:\n # BigInteger\n log.warning(\"Not Yet Implemented: Value BigInteger\")\n else:\n # Reserved for future use\n log.warning(\"Reserved: integral numbers with length >= 3\")\n elif TOKEN_PREFIX_FP <= byt <= 0x2B:\n # Floating point numbers\n if byt == TOKEN_BYTE_FLOAT_32:\n b1, b2, b3, b4, b5 = self.pull_bits(5)\n byt = (b1 | (b2 << 7) | (b3 << 14) | (b4 << 21) | (b5 << 28))\n try:\n flt = util.bits_to_float(byt)\n except struct.error:\n flt = util.long_bits_to_float(byt)\n self.write(flt)\n elif byt == TOKEN_BYTE_FLOAT_64:\n b1, b2, b3, b4, b5, b6, b7, b8, b9 = self.pull_bits(9)\n byt = (b1 | (b2 << 7) | (b3 << 14) | (b4 << 21) | (b5 << 28) | (b6 << 35) | (b7 << 42) | (b8 << 49) | (b9 << 56))\n flt = util.long_bits_to_float(byt)\n self.write(flt)\n else:\n log.warning(\"Not Yet Implemented\")\n elif 0x2C <= byt <= 0x3F:\n # Reserved for future use\n log.warning(\"Reserved: 0x2C <= value <= 0x3F\")\n elif 0x40 <= byt <= 0x5F or 0x80 <= byt <= 0x9F:\n # Tiny ASCII/Unicode\n log.debug(\"Token: Tiny ASCII/Unicode\")\n smile_value_length = (byt & 0x1F) + 1\n self.copy_value_string(smile_value_length)\n elif 0x60 <= byt <= 0x7F or 0xA0 <= byt <= 0xBF:\n # Small ASCII/Unicode\n log.debug(\"Token: Small ASCII/Unicode\")\n smile_value_length = (byt & 0x1F) + 33\n self.copy_value_string(smile_value_length)\n elif 0xC0 <= byt <= 0xDF:\n # Small Integers\n log.debug(\"Token: Small Integer\")\n self.write(util.zigzag_decode(byt & 0x1F))\n else:\n # Misc binary / text / structure markers\n if TOKEN_MISC_LONG_TEXT_ASCII <= byt < TOKEN_MISC_LONG_TEXT_UNICODE:\n # Long (variable length) ASCII text\n log.debug(\"Token: Long (var length) ASCII Test\")\n self.copy_variable_length_string()\n elif TOKEN_MISC_LONG_TEXT_UNICODE <= byt < INT_MISC_BINARY_7BIT:\n log.warning(\"Not Yet Implemented: Value Long Unicode\")\n elif INT_MISC_BINARY_7BIT <= byt < TOKEN_PREFIX_SHARED_STRING_LONG:\n log.warning(\"Not Yet Implemented: Value Long Shared String Reference\")\n elif TOKEN_PREFIX_SHARED_STRING_LONG <= byt < HEADER_BIT_VERSION:\n # Binary, 7-bit encoded\n log.warning(\"Not Yet Implemented: Value Binary\")\n elif HEADER_BIT_VERSION <= byt < TOKEN_LITERAL_START_ARRAY:\n log.warning(\"Reserved: 0xF0 <= value <= 0xF8\")\n elif byt == TOKEN_LITERAL_START_ARRAY:\n # START_ARRAY\n log.debug(\"Token: Start Array\")\n self.write(\"[\")\n self.nested_depth += 1\n self.in_array[self.nested_depth] = True\n self.first_array_element[self.nested_depth] = True\n self.first_key[self.nested_depth] = False\n elif byt == TOKEN_LITERAL_END_ARRAY:\n # END_ARRAY\n log.debug(\"Token: End Array\")\n self.write(\"]\")\n self.nested_depth -= 1\n elif byt == TOKEN_LITERAL_START_OBJECT:\n # START_OBJECT\n log.debug(\"Token: Start Object\")\n self.write(\"{\")\n self.nested_depth += 1\n self.in_array[self.nested_depth] = False\n self.first_array_element[self.nested_depth] = False\n self.first_key[self.nested_depth] = True\n self.mode = DecodeMode.KEY\n return\n elif byt == TOKEN_LITERAL_END_OBJECT:\n log.debug(\"Token: End Object\")\n log.warning(\"Reserved: value == 0xFB\")\n elif byt == BYTE_MARKER_END_OF_STRING:\n log.error(\"Found end-of-String marker (0xFC) in value mode\")\n elif byt == INT_MISC_BINARY_RAW:\n log.warning(\"Not Yet Implemented: Raw Binary Data\")\n elif byt == BYTE_MARKER_END_OF_CONTENT:\n log.debug(\"Token: End Marker\")\n self.mode = DecodeMode.DONE\n return\n\n if not self.in_array[self.nested_depth]:\n self.mode = DecodeMode.KEY\n\n def _decode_key(self) -> None:\n byt = self.pull_byte()\n if byt is None or byt == BYTE_MARKER_END_OF_CONTENT:\n log.debug(\"No bytes left to read!\")\n self.mode = DecodeMode.DONE\n return\n log.debug(\"Pulled Byte: 0x{:x}\".format(byt))\n\n try:\n if self.first_key[self.nested_depth]:\n self.first_key[self.nested_depth] = False\n elif byt != TOKEN_LITERAL_END_OBJECT:\n self.write(\",\")\n except IndexError:\n self.first_key.append(False)\n\n # Byte ranges are divided in 4 main sections (64 byte values each)\n if 0x00 <= byt <= 0x1F:\n log.warning(\"Reserved: 0x01 <= key <= 0x1F\")\n elif byt == TOKEN_LITERAL_EMPTY_STRING:\n # Empty String\n log.debug(\"Token: Literal Empty String\")\n self.write(\"\\\"\\\"\")\n elif TOKEN_LITERAL_NULL <= byt <= 0x2F:\n log.warning(\"Reserved: 0x21 <= key <= 0x2F\")\n elif TOKEN_PREFIX_KEY_SHARED_LONG <= byt <= 0x33:\n # \"Long\" shared key name reference\n log.warning(\"Not Yet Implemented: Long Shared Key Name Reference\")\n elif byt == 0x32:\n # Long (not-yet-shared) Unicode name, 64 bytes or more\n log.warning(\"Not Yet Implemented: Long Key Name\")\n elif 0x35 <= byt <= 0x39:\n log.warning(\"Reserved: 0x35 <= key <= 0x39\")\n elif byt == 0x3A:\n log.error(\"0x3A NOT allowed in Key mode\")\n elif 0x3B <= byt <= 0x3F:\n log.warning(\"Reserved: 0x3B <= key <= 0x3F\")\n elif TOKEN_PREFIX_KEY_SHARED_SHORT <= byt <= 0x7F:\n # \"Short\" shared key name reference (1 byte lookup)\n log.debug(\"Token: Short Shared Key Name Reference\")\n self.copy_shared_key_string()\n elif TOKEN_PREFIX_KEY_ASCII <= byt <= 0xBF:\n # Short Ascii names\n # 5 LSB used to indicate lengths from 2 to 32 (bytes == chars)\n log.debug(\"Token: Short ASCII Name\")\n smile_key_length = (byt & 0x1F) + 1\n self.copy_key_string(smile_key_length)\n elif TOKEN_PREFIX_KEY_UNICODE <= byt <= TOKEN_RESERVED:\n # Short Unicode names\n # 5 LSB used to indicate lengths from 2 to 57\n log.debug(\"Token: Short Unicode Name\")\n smile_key_length = (byt - 0xC0) + 2\n self.copy_key_string(smile_key_length)\n elif TOKEN_LITERAL_START_ARRAY <= byt <= TOKEN_LITERAL_START_OBJECT:\n log.warning(\"Reserved: 0xF8 <= key <= 0xFA\")\n elif byt == TOKEN_LITERAL_END_OBJECT:\n log.debug(\"Token: Literal End Object\")\n self.write(\"}\")\n self.nested_depth -= 1\n try:\n in_arry = self.in_array[self.nested_depth]\n except IndexError:\n in_arry = False\n self.mode = DecodeMode.VALUE if in_arry else DecodeMode.KEY\n return\n elif byt >= BYTE_MARKER_END_OF_STRING:\n log.warning(\"Reserved: key >= 0xFC\")\n self.mode = DecodeMode.VALUE\n\n def decode(self, smile: Union[bytes, str] = None) -> Union[dict, list]:\n \"\"\"\n Decode SMILE format string into a Python Object\n :param smile: SMILE formatted data string\n :returns: Decoded python object\n \"\"\"\n if smile:\n self._init(smile)\n elif len(self.input) > 0:\n pass\n else:\n raise ValueError(\"Input not defined, cannot decode value\")\n\n while self.mode not in (DecodeMode.BAD, DecodeMode.DONE):\n decoder = self._decoders.get(self.mode, None)\n if decoder:\n decoder()\n if self.mode == DecodeMode.BAD:\n continue\n\n elif self.mode == DecodeMode.BAD:\n if self.error is None:\n self.error = \"Unknown Error!\"\n break\n\n elif self.mode == DecodeMode.DONE:\n log.debug(\"Decoding Done!\")\n break\n\n if self.mode == DecodeMode.BAD:\n raise SMILEDecodeError(f\"Bad State: {self.error}\", self.get_value())\n ret_val = self.get_value()\n try:\n jsonified = json.loads(ret_val)\n except (ValueError, UnicodeDecodeError):\n msg = f\"Unable to jsonify string: {ret_val}\"\n log.exception(msg)\n raise SMILEDecodeError(msg, ret_val)\n return jsonified\n\n @classmethod\n def decode_smile(cls, smile: Union[bytes, str]) -> Union[dict, list]:\n \"\"\"\n Decode SMILE format string into a Python Object\n :param smile: SMILE formatted data string\n :returns: Decoded python object\n \"\"\"\n return cls().decode(smile)\n\n\ndef decode(smile: Union[bytes, str]) -> Union[dict, list]:\n \"\"\"\n Decode SMILE format string into a Python Object\n :param smile: SMILE formatted data string\n :returns: Decoded python object\n \"\"\"\n log.debug(\"Decoding: %s\", smile)\n return SmileDecoder.decode_smile(smile)\n" }, { "alpha_fraction": 0.6930232644081116, "alphanum_fraction": 0.7023255825042725, "avg_line_length": 18.545454025268555, "blob_id": "830e96524277ecad88bbf2d5e1e0d12310c2e9a2", "content_id": "daf7f6c98f28290438892511dc9a252220196034", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "permissive", "max_line_length": 59, "num_lines": 11, "path": "/orchestrator/core/orc_server/conformance/tests/sfpf_tests.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nOpenC2 Stateful Packet Filtering Profile (SFPF) Conformance\n\"\"\"\nfrom test_setup import SetupTestCase\n\n\nclass SFPF_UnitTests(SetupTestCase):\n \"\"\"\n SFPF OpenC2 Conformance Tests\n \"\"\"\n profile = \"SFPF\"\n" }, { "alpha_fraction": 0.6346844434738159, "alphanum_fraction": 0.636394739151001, "avg_line_length": 28.831632614135742, "blob_id": "77306686d1dfe7bfaef6946214a1eb8a233d0b03", "content_id": "62af3a72e14af97f2865192d951b231e330c6025", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5847, "license_type": "permissive", "max_line_length": 111, "num_lines": 196, "path": "/orchestrator/core/orc_server/command/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.utils import timezone\nfrom jsonfield import JSONField\nfrom rest_framework import serializers\n\n# Local Imports\nfrom actuator.models import Actuator, ActuatorSerializer\nfrom es_mirror.decorators import ElasticModel\nfrom tracking import log\nfrom utils import randBytes, get_or_none\nfrom .documents import CommandDocument, ResponseDocument\n\n\n@ElasticModel(doc=CommandDocument)\nclass SentHistory(models.Model):\n \"\"\"\n Command Sent History model\n \"\"\"\n command_id = models.UUIDField(\n default=uuid.uuid4,\n editable=False,\n help_text=\"Unique UUID of the command\",\n primary_key=True,\n )\n _coap_id = models.CharField(\n blank=True,\n help_text=\"Unique 16-bit hex ID for CoAP\",\n max_length=10,\n null=True,\n unique=True\n )\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n help_text=\"User that sent the command\"\n )\n received_on = models.DateTimeField(\n default=timezone.now,\n help_text=\"Time the command was received\"\n )\n actuators = models.ManyToManyField(\n Actuator,\n help_text=\"Actuators the command was sent to\"\n )\n command = JSONField(\n blank=True,\n help_text=\"Command that was received\",\n null=True\n )\n\n class Meta:\n verbose_name_plural = \"Sent History\"\n\n @property\n def responses(self):\n \"\"\"\n Command responses received from actuators\n :return: command responses\n \"\"\"\n return ResponseSerializer(ResponseHistory.objects.filter(command=self), many=True).data\n\n @property\n def coap_id(self):\n try:\n return bytes.fromhex(self._coap_id) if self._coap_id else b''\n except ValueError:\n return f\"Invalid hex bytes: {self.coap_id}\"\n\n @coap_id.setter\n def coap_id(self, val=None):\n if val and isinstance(val, (bytes, bytearray, int, str)):\n val = bytes.fromhex(val) if isinstance(val, str) else (f\"{val:x}\" if isinstance(val, int) else val)\n self._coap_id = val.hex()\n else:\n raise ValueError(\"invalid type for coap_id field\")\n\n def gen_coap_id(self):\n self._coap_id = randBytes(2).hex()\n return self._coap_id\n\n def __str__(self):\n return \"Sent History: {} - {}\".format(self.command_id, self.user)\n\n\n@ElasticModel(doc=ResponseDocument)\nclass ResponseHistory(models.Model):\n \"\"\"\n Command Response History model\n \"\"\"\n command = models.ForeignKey(\n SentHistory,\n on_delete=models.CASCADE,\n help_text=\"Command that was received\"\n )\n received_on = models.DateTimeField(\n default=timezone.now,\n help_text=\"Time the respose was received\"\n )\n actuator = models.ForeignKey(\n Actuator,\n help_text=\"Actuator response was received from\",\n null=True,\n on_delete=models.PROTECT\n )\n response = JSONField(\n blank=True,\n help_text=\"Response that was received\",\n null=True\n )\n\n class Meta:\n verbose_name_plural = \"Response History\"\n\n def __str__(self):\n return \"Response History: command_id {}\".format(self.command.command_id)\n\n\n@receiver(pre_save, sender=SentHistory)\ndef check_command_id(sender, instance=None, **kwargs):\n \"\"\"\n Validate the command id given is a UUID\n :param sender: sender instance - SentHistory\n :param instance: SENDER instance\n :param kwargs: key/value args\n :return: None\n \"\"\"\n if instance.command_id is None:\n log.info(msg=f\"Command submitted without command id, command id generated\")\n instance.command_id = uuid.uuid4()\n instance.command.update({\"id\": str(instance.command_id)})\n else:\n try:\n val = uuid.UUID(str(instance.command_id), version=4)\n except ValueError:\n log.info(msg=f\"Invalid command id received: {instance.command_id}\")\n raise ValueError(\"Invalid command id\")\n\n tmp = get_or_none(sender, command_id=val)\n if val is None and tmp is not None:\n log.info(msg=f\"Duplicate command id received: {instance.command_id}\")\n raise ValueError(\"command id has been used\")\n\n\nclass ResponseSerializer(serializers.ModelSerializer):\n \"\"\"\n Command Response API Serializer\n \"\"\"\n received_on = serializers.DateTimeField()\n actuator = serializers.SlugRelatedField(\n allow_null=True,\n read_only=True,\n slug_field=\"name\"\n )\n response = serializers.JSONField()\n\n class Meta:\n model = SentHistory\n fields = (\"received_on\", \"actuator\", \"response\")\n\n\nclass HistorySerializer(serializers.ModelSerializer):\n \"\"\"\n Command Sent API Serializer\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(HistorySerializer, self).__init__(*args, **kwargs)\n self.request = kwargs.get(\"context\", None).get(\"request\", None)\n\n command_id = serializers.UUIDField(format=\"hex_verbose\")\n user = serializers.SlugRelatedField(\n read_only=True,\n slug_field=\"username\"\n )\n received_on = serializers.DateTimeField()\n actuators = ActuatorSerializer(read_only=True, many=True)\n command = serializers.JSONField()\n responses = serializers.JSONField()\n status = serializers.SerializerMethodField()\n\n class Meta:\n model = SentHistory\n fields = (\"command_id\", \"user\", \"received_on\", \"actuators\", \"command\", \"responses\", \"status\")\n\n def get_status(self, obj):\n rtn = \"processing\"\n\n num_rsps = len(obj.responses)\n if num_rsps >= 1:\n rtn = f\"processed {num_rsps} response{'s' if num_rsps>1 else ''}\"\n\n return rtn\n" }, { "alpha_fraction": 0.6290963888168335, "alphanum_fraction": 0.6382833123207092, "avg_line_length": 28.526315689086914, "blob_id": "589e29ca840767bb287613fefc1487037ce56527", "content_id": "5cfdbd227e495211f832bcc04d596a856d7f108c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7293, "license_type": "permissive", "max_line_length": 121, "num_lines": 247, "path": "/orchestrator/core/orc_server/device/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.db.models.signals import m2m_changed, post_delete\nfrom django.db.models.query import QuerySet\nfrom django.db.utils import IntegrityError\nfrom django.dispatch import receiver\nfrom drf_queryfields import QueryFieldsMixin\nfrom drf_writable_nested import WritableNestedModelSerializer\nfrom rest_framework import serializers\n\n# Local imports\nfrom orchestrator.models import Protocol, Serialization\nfrom utils import get_or_none, prefixUUID\n\n\ndef defaultName():\n \"\"\"\n Unique name generation\n :return: 30 character\n \"\"\"\n return prefixUUID(\"Device\", 30)\n\n\ndef shortID():\n \"\"\"\n Short ID generator\n :return: 16 character UUID\n \"\"\"\n return prefixUUID(\"\", 16)\n\n\nclass Transport(models.Model):\n \"\"\"\n Transport instance object base\n \"\"\"\n transport_id = models.CharField(\n default=shortID,\n editable=False,\n help_text=\"Unique ID of the transport\",\n max_length=30,\n unique=True,\n )\n host = models.CharField(\n default=\"127.0.0.1\",\n help_text=\"Hostname/IP of the device\",\n max_length=60\n )\n port = models.IntegerField(\n default=8080,\n help_text=\"Port of the device\",\n validators=[\n MinValueValidator(1),\n MaxValueValidator(65535)\n ]\n )\n protocol = models.ForeignKey(\n Protocol,\n help_text=\"Protocol supported by the device\",\n on_delete=models.CASCADE\n )\n serialization = models.ManyToManyField(\n Serialization,\n help_text=\"Serialization(s) supported by the device\"\n )\n topic = models.CharField(\n default=\"topic\",\n help_text=\"Topic for the specific device, only necessary for Pub/Sub protocols\",\n max_length=30\n )\n channel = models.CharField(\n default=\"channel\",\n help_text=\"Channel for the specific device, only necessary for Pub/Sub protocols\",\n max_length=30\n )\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"\n Override the save function for added validation\n :param args: save args\n :param kwargs: save key/value args\n :return: None\n \"\"\"\n if not self.protocol.pub_sub:\n trans = get_or_none(Transport, host=self.host, port=self.port, protocol=self.protocol)\n trans = trans if isinstance(trans, (list, QuerySet)) else [trans]\n if len(trans) > 1:\n raise DjangoValidationError(\"host, port, and protocol must make a unique pair unless a pub/sub protocol\")\n\n super(Transport, self).save(force_insert, force_update, using, update_fields)\n\n def __str__(self):\n return \"{}:{} - {}\".format(self.host, self.port, self.protocol.name)\n\n\nclass Device(models.Model):\n \"\"\"\n Device instance object base\n \"\"\"\n device_id = models.UUIDField(\n default=uuid.uuid4,\n help_text=\"Unique ID of the device\",\n unique=True\n )\n name = models.CharField(\n default=defaultName,\n help_text=\"Unique display name of the device\",\n max_length=30,\n unique=True\n )\n transport = models.ManyToManyField(\n Transport,\n help_text=\"Transports the device supports\"\n )\n note = models.TextField(\n blank=True,\n help_text=\"Extra information about the device\",\n null=True\n )\n\n @property\n def url_name(self):\n \"\"\"\n URL Formatted device name\n :return: url name\n \"\"\"\n return self.name.lower().replace(\" \", \"_\")\n\n def __str__(self):\n return \"{}\".format(self.name)\n\n class Meta:\n permissions = (\n (\"use_device\", \"Can use device\"),\n )\n\n\nclass DeviceGroup(models.Model):\n \"\"\"\n Device Groups instance object base\n \"\"\"\n name = models.CharField(\n max_length=80,\n help_text=\"Unique display name of the device group\",\n unique=True\n )\n users = models.ManyToManyField(\n User,\n blank=True,\n help_text=\"Users in the group\"\n )\n\n devices = models.ManyToManyField(\n Device,\n blank=True,\n help_text=\"Devices available to users in the group\"\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"group\"\n verbose_name_plural = \"groups\"\n\n\n@receiver(post_delete, sender=Device)\ndef remove_transports(sender, instance=None, **kwargs):\n \"\"\"\n Cleanup unused transports on device delete\n :param sender: model \"sending\" the action - Device\n :param instance: SENDER instance\n :param kwargs: key/value args\n :return: None\n \"\"\"\n for trans in Transport.objects.all():\n devs = list(trans.device_set.all())\n if len(devs) == 0:\n trans.delete()\n\n\n@receiver(m2m_changed, sender=Device.transport.through)\ndef verify_unique(sender, instance=None, **kwargs):\n \"\"\"\n On Device transport change, check the updated transport is unique\n :param sender: sender instance - Device\n :param instance: SENDER instance\n :param kwargs: key/value args\n :return: None\n \"\"\"\n action = kwargs.get(\"action\", None)\n transports = [get_or_none(Transport, pk=t) for t in kwargs.get(\"pk_set\", [])]\n transports = list(filter(None, transports))\n\n for trans in transports:\n count = trans.device_set.count()\n if action == \"pre_add\" and count > 1:\n raise IntegrityError(\"Transport cannot be associated with more that one device\")\n\n if action in (\"post_clear\", \"post_remove\") and count == 0:\n trans.delete()\n\n\nclass TransportSerializer(serializers.ModelSerializer):\n \"\"\"\n Transport API Serializer\n \"\"\"\n transport_id = serializers.CharField(max_length=30, default=shortID, read_only=True)\n host = serializers.CharField(max_length=60, default=\"127.0.0.1\")\n port = serializers.IntegerField(default=8080, min_value=1, max_value=65535)\n protocol = serializers.SlugRelatedField(\n queryset=Protocol.objects.all(),\n slug_field=\"name\"\n )\n topic = serializers.CharField(max_length=30, default=\"topic\")\n channel = serializers.CharField(max_length=30, default=\"channel\")\n pub_sub = serializers.SerializerMethodField()\n serialization = serializers.SlugRelatedField(\n queryset=Serialization.objects.all(),\n slug_field=\"name\",\n many=True\n )\n\n class Meta:\n model = Transport\n fields = (\"transport_id\", \"host\", \"port\", \"protocol\", \"topic\", \"channel\", \"pub_sub\", \"serialization\")\n\n def get_pub_sub(self, obj):\n ps = obj.protocol.pub_sub\n return ps if isinstance(ps, bool) else False\n\n\nclass DeviceSerializer(QueryFieldsMixin, WritableNestedModelSerializer):\n \"\"\"\n Device API Serializer\n \"\"\"\n device_id = serializers.UUIDField(format=\"hex_verbose\")\n transport = TransportSerializer(many=True)\n # schema = serializers.JSONField(required=False)\n note = serializers.CharField(allow_blank=True)\n\n class Meta:\n model = Device\n fields = (\"device_id\", \"name\", \"transport\", \"note\")\n" }, { "alpha_fraction": 0.7126263380050659, "alphanum_fraction": 0.7139840126037598, "avg_line_length": 58.72972869873047, "blob_id": "6f0e82b7407e71d21df31df21a8e79a0449264b8", "content_id": "d2e466d07892af9309c57db67f3e15a8d267350f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6629, "license_type": "permissive", "max_line_length": 138, "num_lines": 111, "path": "/orchestrator/gui/server/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OASIS TC Open: oif-orchestrator-gui\n\n## Server\n### About this Image\n- Work in progress Not available as of yet\n\n### How to use this image\n- Work in progress Not available as of yet\n\n## GUI\n### About this Image\n- This image is Alpine 3.10 with a simple GUI for use with the GUI Server\n- UI port - 80\n\n### How to use this image\nNote: Pulling an images requires using a specific tag (server or gui), the latest tag is not supported.\n\n- Prior to the GUI starting, the Core should be started and running.\n- Container Env Args:\n\t- `ORC_HOST` - Hostname/IP address of the system running the Core\t- `ORC_PORT` - Port the Core runs one (Docker port not mapped port)\n\n- Adding Certs \n\t1. Create a directory in httpd named `conf`\n\t2. TODO...\n\nEnvironment Variables\n\n| Variable | Type | Description | Default|\n| ----------- | ----------- | ----------- | ----------- |\n| ORC_HOST | String | Hostname/IP address of the system running the Orchestrator Core | None |\n| ORC_PORT | Integer | Port the Orchestrator Core is running the API on | None |\n\n### Resources\n- General\n\t- [Another JSON Validator](https://www.npmjs.com/package/ajv) - JSON schema validator\n\t- [Bootstrap](https://getbootstrap.com/)\n\t - [Bootstrap](https://www.npmjs.com/package/bootstrap) - Bootstrap for node\n\t - [jQuery](https://www.npmjs.com/package/jquery) - jQuery for node\n\t - [Reactstrap](https://www.npmjs.com/package/reactstrap) - Bootstrap v4 components for React\n\t- [Django Channels](https://www.npmjs.com/package/django-channels) - WebSocket support for Django Channels\n \t- [FontAwesome](https://fontawesome.com/) - Additional Icons\n\t - [Core SVG Icons](https://www.npmjs.com/package/@fortawesome/fontawesome-svg-core)\n\t - [Solid SVG Icons](https://www.npmjs.com/package/@fortawesome/free-solid-svg-icons)\n\t - [React FontAwesome](https://www.npmjs.com/package/@fortawesome/react-fontawesome)\n\t- [History](https://www.npmjs.com/package/history) - History management for single page apps\n\t- [JWT Decode](https://www.npmjs.com/package/jwt-decode) - JSON Web Tokens\n\t- [Moment](https://www.npmjs.com/package/moment) - DateTime formatting/parsing\n\t- [Query String](https://www.npmjs.com/package/query-string) - Parse and stringify URL query strings\n\t- [React](https://reactjs.org/) - Core Framework\n \t- [Bootstrap Tables](https://www.npmjs.com/package/react-bootstrap-table-next/)\n \t- [Bootstrap Tables Paginator](https://www.npmjs.com/package/react-bootstrap-table2-paginator)\n \t- [Confirm Alert](https://www.npmjs.com/package/react-confirm-alert) - Confirmation popup\n\t\t- [Connected React Router](https://www.npmjs.com/package/connected-react-router) - Router State Sync, replacement for react-router-redux\n \t- [Document Meta](https://www.npmjs.com/package/react-document-meta) - Dynamic page metadata\n\t\t- [DOM](https://www.npmjs.com/package/react-dom)\n \t- [JSON Editor](https://www.npmjs.com/package/react-json-editor-ajrm) - JSON Syntax Editor\n \t- [JSON Pretty](https://www.npmjs.com/package/react-json-pretty) - JSON Pretty Format\n \t- [Moment](https://www.npmjs.com/package/react-moment) - Date/Time Formatting\n\t\t- [Redux](https://www.npmjs.com/package/react-redux) - React Redux Bindings\n\t\t- [Router DOM](https://www.npmjs.com/package/react-router-dom)\n\t\t- [Scripts](https://www.npmjs.com/package/react-scripts)\n\t\t- [Toastify](https://www.npmjs.com/package/react-toastify) - Notifications\n \t- [Transition Group](https://www.npmjs.com/package/react-transition-group)\n - [Redux](https://redux.js.org/) - State container\n \t- [API Middleware](https://www.npmjs.com/package/redux-api-middleware)\n \t- [Logger](https://www.npmjs.com/package/redux-logger)\n\t\t- [Persist](https://www.npmjs.com/package/redux-persist)\n\t\t- [Persist Transform Filter](https://www.npmjs.com/package/redux-persist-transform-filter)\n\t- [String Format](https://www.npmjs.com/package/string-format)\n\t- [vkBeautify](https://www.npmjs.com/package/vkbeautify) - JSON, XML, CSS, SQL pretty/minify\n\t- [Warning](https://www.npmjs.com/package/warning)\n\n- Developement\n\t- [CSSO](https://www.npmjs.com/package/csso) - CSS Optimizer\n\t- [fs-extra](https://www.npmjs.com/package/fs-extra) - File system methods\n\t- [File Download](https://www.npmjs.com/package/download-file) - File Downloads\n\t- [Named Regex Groups](https://www.npmjs.com/package/named-regexp-groups) - Named group extraction\n\t- [Strinct URI Encode](https://www.npmjs.com/package/strict-uri-encode)\n\t- [Sync Requests](https://www.npmjs.com/package/sync-requests) - Synchronous HTTP requests\n\t- [WebPack](https://www.npmjs.com/package/webpack) - Module bundler and builder\n\t\t- [Bundle Tracker](https://www.npmjs.com/package/webpack-bundle-tracker)\n\t\t- [CLI](https://www.npmjs.com/package/webpack-cli)\n\t\t- [Dev Server](https://www.npmjs.com/package/webpack-dev-server)\n\t\t- [HTTP Proxy Middleware](https://www.npmjs.com/package/http-proxy-middleware)\n\t\t- [Loaders](https://webpack.js.org/loaders) - Loader Info\n\t\t\t- [Babel](https://www.npmjs.com/package/babel-loader)\n\t\t\t\t- [Babel Core](https://www.npmjs.com/package/@babel/core)\n\t\t\t\t- [Proposal Object Rest Spread](https://www.npmjs.com/package/@babel/plugin-proposal-object-rest-spread)\n\t\t\t\t- [Preset Env](https://www.npmjs.com/package/@babel/preset-env)\n\t\t\t\t- [Preset React](https://www.npmjs.com/package/@babel/preset-react)\n\t\t\t- [CSS](https://www.npmjs.com/package/css-loader)\n\t\t\t- [File](https://www.npmjs.com/package/file-loader)\n\t\t\t- [Less](https://www.npmjs.com/package/less-loader) - Loads Less to CSS\n\t\t\t\t- [Less](https://www.npmjs.com/package/less) - Core package\n\t\t\t- [Style](https://www.npmjs.com/package/style-loader)\n\t\t\t- [URL](https://www.npmjs.com/package/url-loader)\n\t\t- [Merge](https://www.npmjs.com/package/webpack-merge) - Config Merge\n\t\t- [Plugins](https://webpack.js.org/plugins) - Plugin Info\n\t\t\t- [Clean](https://www.npmjs.com/package/clean-webpack-plugin)\n\t\t\t- [Copy](https://www.npmjs.com/package/copy-webpack-plugin)\n\t\t\t- [Deadcode](https://www.npmjs.com/package/webpack-deadcode-plugin) - Find and notify of unused code\n\t\t\t- [Favicons](https://www.npmjs.com/package/favicons-webpack-plugin)\n\t\t\t- [HTML](https://www.npmjs.com/package/html-webpack-plugin)\n\t\t\t- [Mini CSS](https://www.npmjs.com/package/mini-css-extract-plugin)\n\t\t\t- [Optimize CSS Assets](https://www.npmjs.com/package/optimize-css-assets-webpack-plugin)\n\t\t\t- [Terser](https://www.npmjs.com/package/terser-webpack-plugin)\n\n#### Interesting Modules\n- [Entity Editor](https://www.npmjs.com/package/react-entity-editor)\n- [SpreadSheet Grid](https://www.npmjs.com/package/react-spreadsheet-grid)\n- [React Admin](https://github.com/marmelab/react-admin)" }, { "alpha_fraction": 0.6918238997459412, "alphanum_fraction": 0.6918238997459412, "avg_line_length": 25.5, "blob_id": "631abd514f2759ed0957eb7ead4843cc8120b42f", "content_id": "adae754116c0120e212f22ae4221fee20d2b0b19", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 636, "license_type": "permissive", "max_line_length": 55, "num_lines": 24, "path": "/orchestrator/gui/client/src/reducers/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import { combineReducers } from 'redux'\nimport { connectRouter } from 'connected-react-router'\n\nimport actuator from './actuator'\nimport account from './account'\nimport auth from './auth'\nimport command from './command'\nimport device from './device'\nimport generate from './generate'\n// import socket from './socket'\nimport util from './util'\n\nexport default (history) => combineReducers({\n 'router': connectRouter(history), // MUST BE 'router'\n // Custom Reducers\n 'Actuator': actuator,\n 'Account': account,\n 'Auth': auth,\n 'Command': command,\n 'Device': device,\n 'Generate': generate,\n // 'Socket': socket,\n 'Util': util\n})\n" }, { "alpha_fraction": 0.6193259358406067, "alphanum_fraction": 0.6241963505744934, "avg_line_length": 28.670520782470703, "blob_id": "56e819c7e651247f6a9fd31fadab7e4ec228e4ab", "content_id": "bcd3b65d9c0e74bcd69032f281dd72460d9c3725", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5133, "license_type": "permissive", "max_line_length": 170, "num_lines": 173, "path": "/base/modules/utils/root/sb_utils/general.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import base64\nimport binascii\nimport json\nimport re\nimport struct\nimport sys\nimport uuid\n\nfrom typing import (\n Any,\n Callable,\n Dict,\n Type,\n Union\n)\n\n\n# Util Functions\ndef toStr(s: Any) -> str:\n \"\"\"\n Convert a given type to a default string\n :param s: item to convert to a string\n :return: converted string\n \"\"\"\n return s.decode(sys.getdefaultencoding(), 'backslashreplace') if hasattr(s, 'decode') else str(s)\n\n\ndef prefixUUID(pre: str = 'PREFIX', max_len: int = 30) -> str:\n \"\"\"\n Prefix a uuid with the given prefix with a max length\n :param pre: prefix str\n :param max_len: max length of prefix + UUID\n :return: prefixed UUID\n \"\"\"\n uid_max = max_len - (len(pre) + 10)\n uid = str(uuid.uuid4()).replace('-', '')[:uid_max]\n return f'{pre}-{uid}'[:max]\n\n\ndef safe_cast(val: Any, to_type: Type, default: Any = None) -> Any:\n \"\"\"\n Cast the given value to the goven type safely without an exception being thrown\n :param val: value to cast\n :param to_type: type to cast as\n :param default: default value if casting fails\n :return: casted value or given default/None\n \"\"\"\n try:\n return to_type(val)\n except (ValueError, TypeError):\n return default\n\n\ndef safe_json(msg: Union[dict, str], encoders: Dict[Type, Callable[[Any], Any]] = None, *args, **kwargs) -> Union[dict, str]: # pylint: disable=keyword-arg-before-vararg\n \"\"\"\n Load JSON data if given a str and able\n Dump JSON data otherwise, encoding using encoders & JSON Defaults\n :param msg: str JSON to attempt to load\n :param encoders: custom type encoding - Ex) -> {bytes: lambda b: b.decode('utf-8', 'backslashreplace')}\n :return: loaded JSON data or original str\n \"\"\"\n if isinstance(msg, str):\n try:\n return json.loads(msg, *args, **kwargs)\n except ValueError:\n return msg\n\n msg = default_encode(msg, encoders or {})\n return json.dumps(msg, *args, **kwargs)\n\n\ndef check_values(val: Any) -> Any:\n \"\"\"\n Check the value of given and attempt to convert it to a bool, int, float\n :param val: value to check\n :return: converted/original value\n \"\"\"\n if isinstance(val, str):\n if val.lower() in (\"true\", \"false\"):\n return safe_cast(val, bool, val)\n\n if re.match(r\"^\\d+\\.\\d+$\", val):\n return safe_cast(val, float, val)\n\n if val.isdigit():\n return safe_cast(val, int, val)\n\n return val\n\n\ndef default_encode(itm: Any, encoders: Dict[Type, Callable[[Any], Any]] = None) -> Any:\n \"\"\"\n Default encode the given object to the predefined types\n :param itm: object to encode/decode,\n :param encoders: custom type encoding - Ex) -> {bytes: lambda b: b.decode('utf-8', 'backslashreplace')}\n :return: default system encoded object\n \"\"\"\n if encoders and isinstance(itm, tuple(encoders.keys())):\n return encoders[type(itm)](itm)\n\n if isinstance(itm, dict):\n return {default_encode(k): default_encode(v, encoders) for k, v in itm.items()}\n\n if isinstance(itm, (list, set, tuple)):\n return type(itm)(default_encode(i, encoders) for i in itm)\n\n if isinstance(itm, (int, float)):\n return itm\n\n return toStr(itm)\n\n\ndef default_decode(itm: Any, decoders: Dict[Type, Callable[[Any], Any]] = None) -> Any:\n \"\"\"\n Default decode the given object to the predefined types\n :param itm: object to encode/decode,\n :param decoders: custom type decoding - Ex) -> {bytes: lambda b: b.decode('utf-8', 'backslashreplace')}\n :return: default system encoded object\n \"\"\"\n print(itm)\n if decoders and isinstance(itm, tuple(decoders.keys())):\n return decoders[type(itm)](itm)\n\n if isinstance(itm, dict):\n return {default_decode(k, decoders): default_decode(v, decoders) for k, v in itm.items()}\n\n if isinstance(itm, (list, set, tuple)):\n return type(itm)(default_decode(i, decoders) for i in itm)\n\n if isinstance(itm, (int, float)):\n return itm\n\n if isinstance(itm, str):\n return check_values(itm)\n\n return itm\n\n\ndef isBase64(sb: Union[bytes, str]) -> bool:\n try:\n if isinstance(sb, str):\n # If there's any unicode here, an exception will be thrown and the function will return false\n sb_bytes = bytes(sb, 'ascii')\n elif isinstance(sb, bytes):\n sb_bytes = sb\n else:\n raise ValueError(\"Argument must be string or bytes\")\n return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes\n except (binascii.Error, ValueError):\n return False\n\n\ndef floatByte(num: Union[float, bytes]) -> Union[float, bytes]:\n if isinstance(num, float):\n return struct.pack(\"!f\", num)\n\n if isinstance(num, bytes) and len(num) == 4:\n return struct.unpack(\"!f\", num)[0]\n\n return num\n\n\ndef floatString(num: Union[float, str]) -> Union[float, str]:\n if isinstance(num, float):\n return f\"f{num}\"\n\n if isinstance(num, str) and num.startswith(\"f\") and num[1:].replace(\".\", \"\", 1).isdigit():\n return float(num[1:])\n\n return num\n\n\n# Utility Classes\n" }, { "alpha_fraction": 0.561475396156311, "alphanum_fraction": 0.5696721076965332, "avg_line_length": 26.22222137451172, "blob_id": "0b7c0297d72b739b6146aaa003602f0dafe8f7c0", "content_id": "59a6bdf3f8215ffb0d3a4f8f7d74d863cd2c6776", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 244, "license_type": "permissive", "max_line_length": 33, "num_lines": 9, "path": "/logger/gui/config/eslint_rules/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "module.exports = {\n ...require('./eslint_rules'),\n ...require('./ext_rules'),\n ...require('./flowtype_rules'),\n ...require('./import_rules'),\n ...require('./jsx-a11y_rules'),\n ...require('./promise_rules'),\n ...require('./react_rules')\n}" }, { "alpha_fraction": 0.698296844959259, "alphanum_fraction": 0.698296844959259, "avg_line_length": 24.6875, "blob_id": "2e36f1cee90d4298638c0813464a7131c4fe82ed", "content_id": "48184cf886c05fa5bb91c0381bb5a3c341e6d511", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "permissive", "max_line_length": 89, "num_lines": 16, "path": "/orchestrator/core/orc_server/tracking/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny,))\ndef api_root(request):\n \"\"\"\n Logging root\n \"\"\"\n rtn = dict(\n message=\"Hello, {}. You're at the logs api index.\".format(request.user.username),\n )\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.6647173762321472, "alphanum_fraction": 0.6647173762321472, "avg_line_length": 23.428571701049805, "blob_id": "2bda0e460492ca8ca6310fb848a152b277529d62", "content_id": "f834011faf635642e783a4ed1709ae6cb1739de0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "permissive", "max_line_length": 90, "num_lines": 21, "path": "/orchestrator/gui/server/gui_server/account/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\n\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'', views.UserViewSet)\n\nurlpatterns = [\n # JWT Tokens\n path('jwt/', include([\n path('', obtain_jwt_token),\n path('refresh/', refresh_jwt_token),\n path('verify/', verify_jwt_token),\n ])),\n\n # User Actions\n path('', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.46277883648872375, "alphanum_fraction": 0.469614714384079, "avg_line_length": 30.520618438720703, "blob_id": "1e8c4ced058f9458520ecdefc98085a15d06090e", "content_id": "588ebb0e0717b184d37f584da4b904c6ee8bbb66", "detected_licenses": [ "MIT", "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 30574, "license_type": "permissive", "max_line_length": 145, "num_lines": 970, "path": "/orchestrator/gui/client/src/components/utils/jadn-editor/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport themes from 'react-json-editor-ajrm/themes'\n\nimport JSONInput from 'react-json-editor-ajrm'\nimport { identical, getType } from 'react-json-editor-ajrm/mitsuketa'\n\nimport err from 'react-json-editor-ajrm/err'\nimport { format } from 'react-json-editor-ajrm/locale'\nimport defaultLocale from 'react-json-editor-ajrm/locale/en'\n\nimport {\n // DomNode_Update\n quarkize,\n validToken,\n tokenFollowed,\n followedBySymbol,\n followsSymbol,\n typeFollowed,\n // JSON_Placeholder\n escape_character,\n determine_string,\n determine_value,\n stripQuotesFromKey,\n indent\n} from './utils'\n\nimport {\n updateArray\n} from '../'\n\nclass JADNInput extends JSONInput {\n DomNode_Update(obj) {\n const locale = this.props.locale || defaultLocale,\n containerNode = obj.cloneNode(true),\n hasChildren = containerNode.hasChildNodes(),\n children = containerNode.childNodes\n\n if (!hasChildren) {\n return '';\n }\n\n let buffer = {\n tokens_unknown: [],\n tokens_proto: [],\n tokens_split: [],\n tokens_fallback: [],\n tokens_normalize: [],\n tokens_merge: [],\n tokens_plainText: '',\n indented: '',\n json: '',\n jsObject: undefined,\n markup: ''\n }\n\n children.forEach(child => {\n switch (child.nodeName) {\n case 'SPAN':\n buffer.tokens_unknown.push({\n string: child.textContent,\n type: child.attributes.type.textContent\n });\n break;\n case 'DIV':\n buffer.tokens_unknown.push({\n string: child.textContent,\n type: 'unknown'\n });\n break;\n case 'BR':\n if (child.textContent === '') {\n buffer.tokens_unknown.push({\n string: '\\n',\n type: 'unknown'\n });\n }\n break;\n case '#text':\n buffer.tokens_unknown.push({\n string: child.wholeText,\n type: 'unknown'\n });\n break;\n case 'FONT' :\n buffer.tokens_unknown.push({\n string: child.textContent,\n type: 'unknown'\n });\n break;\n default :\n console.error('Unrecognized node:', {child})\n break;\n }\n })\n\n buffer.tokens_proto = buffer.tokens_unknown.map(token => quarkize(token.string, 'proto')).reduce((all, quarks) => all.concat(quarks))\n\n buffer.tokens_proto.forEach(token => {\n if (token.type.indexOf('proto') === -1 && !validToken(token.string, token.type)) {\n buffer.tokens_split = buffer.tokens_split.concat(quarkize(token.string,'split'));\n return;\n }\n buffer.tokens_split.push(token);\n })\n\n buffer.tokens_fallback = buffer.tokens_split.map(token => {\n let type = token.type,\n fallback = [];\n\n if (type.indexOf('-') > -1) {\n type = type.slice(type.indexOf('-') + 1);\n if (type!=='string') {\n fallback.push('string');\n }\n fallback.push('key', 'error');\n }\n\n return {\n string: token.string,\n length: token.string.length,\n type: type,\n fallback: fallback\n };\n })\n\n let buffer2 = {\n brackets: [],\n stringOpen: false,\n isValue: false\n };\n\n buffer.tokens_normalize = buffer.tokens_fallback.map((token, i) => {\n let normalToken = {\n type: token.type,\n string: token.string\n };\n\n switch (normalToken.type) {\n case 'symbol':\n case 'colon':\n if (buffer2.stringOpen) {\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n break;\n }\n switch (normalToken.string) {\n case '[':\n case '{':\n buffer2.brackets.push(normalToken.string);\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ']':\n case '}':\n buffer2.brackets.pop();\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ',':\n if (tokenFollowed(buffer).type === 'colon') {\n break;\n }\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ':':\n normalToken.type = 'colon';\n buffer2.isValue = true;\n break;\n }\n break;\n case 'delimiter':\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n if (!buffer2.stringOpen) {\n buffer2.stringOpen = normalToken.string;\n break;\n }\n if (i > 0) {\n const previousToken = buffer.tokens_fallback[i - 1],\n _string = previousToken.string,\n _type = previousToken.type,\n _char = _string.charAt(_string.length - 1);\n if (_type === 'string' && _char === '\\\\') {\n break;\n }\n }\n if (buffer2.stringOpen === normalToken.string) {\n buffer2.stringOpen = false;\n break;\n }\n break;\n case 'primitive':\n case 'string':\n if (['false', 'true', 'null', 'undefined'].indexOf(normalToken.string) > -1) {\n const lastIndex = buffer.tokens_normalize.length - 1;\n if (lastIndex >= 0) {\n if (buffer.tokens_normalize[lastIndex].type !== 'string') {\n normalToken.type = 'primitive';\n break;\n }\n normalToken.type = 'string';\n break;\n }\n normalToken.type = 'primitive';\n break;\n }\n if (normalToken.string === '\\n' && !buffer2.stringOpen) {\n normalToken.type = 'linebreak';\n break;\n }\n\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n break;\n case 'space':\n case 'number':\n if (buffer2.stringOpen) {\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n }\n break;\n default:\n break;\n }\n return normalToken;\n })\n\n for (var i = 0; i < buffer.tokens_normalize.length; i++) {\n const token = buffer.tokens_normalize[i];\n\n let mergedToken = {\n string: token.string,\n type: token.type,\n tokens: [i]\n };\n\n if (['symbol', 'colon'].indexOf(token.type) === -1 && i+1 < buffer.tokens_normalize.length) {\n let count = 0;\n\n for (var u = i+1; u < buffer.tokens_normalize.length; u++) {\n const nextToken = buffer.tokens_normalize[u];\n if (token.type !== nextToken.type) {\n break;\n }\n mergedToken.string += nextToken.string;\n mergedToken.tokens.push(u);\n count++;\n }\n i += count;\n }\n buffer.tokens_merge.push(mergedToken);\n }\n\n const quotes = '\\'\"',\n alphanumeric = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$';\n\n var error = false,\n line = buffer.tokens_merge.length > 0 ? 1 : 0;\n\n buffer2 = {\n brackets: [],\n stringOpen: false,\n isValue: false\n };\n\n const setError = (tokenID, reason, offset=0) => {\n error = {\n token: tokenID,\n line: line,\n reason: reason\n };\n buffer.tokens_merge[tokenID + offset].type = 'error';\n }\n\n let bracketList = [];\n\n // Break apart??\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n if (error) break;\n let token = buffer.tokens_merge[i],\n string = token.string,\n type = token.type,\n found = false;\n\n switch (type) {\n case 'space':\n break;\n case 'linebreak':\n line++;\n break;\n case 'symbol':\n switch (string) {\n case '{':\n case '[':\n found = followsSymbol(buffer, i, ['}', ']']);\n if (found) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: buffer.tokens_merge[found].string,\n secondToken: string\n }));\n break;\n }\n if (string === '[' && i > 0 && !followsSymbol(buffer, i,[':','[',','])) {\n setError(i,format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: \"[\",\n secondToken: [\":\", \"[\", \",\"]\n }));\n break;\n }\n if (string === '{' && followsSymbol(buffer, i, ['{'])) {\n setError(i, format(locale.invalidToken.double, {\n token: \"{\"\n }));\n break;\n }\n buffer2.brackets.push(string);\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n bracketList.push({\n i: i,\n line: line,\n string: string\n });\n break;\n case '}':\n case ']':\n if (string === '}') {\n if (buffer2.brackets[buffer2.brackets.length-1] !== '{'){\n setError(i, format(locale.brace.curly.missingOpen));\n break;\n }\n }\n if (string === '}') {\n if (followsSymbol(buffer, i, [','])) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \",\",\n secondToken: \"}\"\n }));\n break;\n }\n }\n if (string === ']') {\n if (buffer2.brackets[buffer2.brackets.length-1] !== '[') {\n setError(i, format(locale.brace.square.missingOpen));\n break;\n }\n }\n if (string === ']') {\n if (followsSymbol(buffer, i, [':'])) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \":\",\n secondToken: \"]\"\n }));\n break;\n }\n }\n buffer2.brackets.pop();\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n bracketList.push({\n i: i,\n line: line,\n string: string\n });\n break;\n case ',':\n found = followsSymbol(buffer, i, ['{']);\n if (found) {\n if (followedBySymbol(buffer, i, ['}'])) {\n setError(i, format(locale.brace.curly.cannotWrap, {\n token: \",\"\n }));\n break;\n }\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"{\",\n secondToken: \",\"\n }));\n break;\n }\n if (followedBySymbol(buffer, i, ['}', ',', ']'])) {\n setError(i, format(locale.noTrailingOrLeadingComma));\n break;\n }\n\n found = typeFollowed(buffer, i);\n switch (found) {\n case 'key':\n case 'colon':\n setError(i, format(locale.invalidToken.termSequence.prohibited, {\n firstTerm: found==='key' ? locale.types.key : locale.symbols.colon,\n secondTerm: locale.symbols.comma\n }));\n break;\n case 'symbol' :\n if (followsSymbol(buffer, i, ['{'])) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"{\",\n secondToken: \",\"\n }));\n break;\n }\n break;\n }\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n }\n buffer.json += string;\n break;\n case 'colon':\n found = followsSymbol(buffer, i, ['[']);\n if (found) {\n if (followedBySymbol(buffer, i, [']'])) {\n setError(i, format(locale.brace.square.cannotWrap, {\n token: \":\"\n }));\n break;\n }\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"[\",\n secondToken: \":\"\n }));\n break;\n }\n if (typeFollowed(buffer, i) !== 'key') {\n setError(i, format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.symbols.colon,\n secondTerm: locale.types.key\n }));\n break;\n }\n if (followedBySymbol(buffer, i, ['}', ']'])) {\n setError(i, format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.symbols.colon,\n secondTerm: locale.types.value\n }));\n break;\n }\n buffer2.isValue = true;\n buffer.json += string;\n break;\n case 'key':\n case 'string':\n let firstChar = string.charAt(0),\n lastChar = string.charAt(string.length - 1),\n quote_primary = quotes.indexOf(firstChar);\n\n if (quotes.indexOf(firstChar) === -1 && quotes.indexOf(lastChar) !== -1) {\n setError(i,format(locale.string.missingOpen, {\n quote: firstChar\n }));\n break;\n }\n if (quotes.indexOf(lastChar) === -1 && quotes.indexOf(firstChar) !== -1) {\n setError(i, format(locale.string.missingClose, {\n quote: firstChar,\n }));\n break;\n }\n if (quotes.indexOf(firstChar) > -1 && firstChar !== lastChar) {\n setError(i, format(locale.string.missingClose, {\n quote: firstChar,\n }));\n break;\n }\n if ('string' === type && quotes.indexOf(firstChar) === -1 && quotes.indexOf(lastChar) === -1) {\n setError(i, format(locale.string.mustBeWrappedByQuotes));\n break;\n }\n if ('key' === type && followedBySymbol(buffer, i, ['}', ']'])) {\n setError(i, format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.types.key,\n secondTerm: locale.symbols.colon\n }));\n }\n if (quotes.indexOf(firstChar) === -1 && quotes.indexOf(lastChar) === -1) {\n for (var h = 0; h < string.length; h++) {\n if (error) { break }\n const c = string.charAt(h);\n if (alphanumeric.indexOf(c) === -1) {\n setError(i, format(locale.string.nonAlphanumeric, {\n token: c,\n }));\n break;\n }\n }\n }\n string = firstChar === \"'\" ? '\"' + string.slice(1,-1) + '\"' : (firstChar !== '\"' ? '\"' + string + '\"' : string);\n if ('key' === type) {\n if ('key' === typeFollowed(buffer, i)) {\n if (i > 0 && !isNaN(buffer.tokens_merge[i-1])) {\n buffer.tokens_merge[i-1] += buffer.tokens_merge[i];\n setError(i, format(locale.key.numberAndLetterMissingQuotes));\n break;\n }\n setError(i, format(locale.key.spaceMissingQuotes));\n break;\n }\n if (!followsSymbol(buffer, i, ['{', ','])) {\n setError(i, format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"{\", \",\"]\n }));\n break;\n }\n if (buffer2.isValue) {\n setError(i, format(locale.string.unexpectedKey));\n break;\n }\n }\n if ('string' === type) {\n if (!followsSymbol(buffer, i, ['[', ':', ','])) {\n setError(i, format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"[\", \":\", \",\"]\n }));\n break;\n }\n if (!buffer2.isValue) {\n setError(i, format(locale.key.unexpectedString));\n break;\n }\n }\n buffer.json += string;\n break;\n case 'number':\n case 'primitive':\n if (followsSymbol(buffer, i, ['{'])) {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n string = '\"' + string + '\"';\n } else if (typeFollowed(buffer, i) === 'key') {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n } else if (!followsSymbol(buffer, i, ['[', ':', ','])) {\n setError(i, format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"[\", \":\", \",\"]\n }));\n break;\n }\n if (type !== 'key' && !buffer2.isValue) {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n string = '\"' + string + '\"';\n }\n if (type === 'primitive' && string === 'undefined') {\n setError(i,format(locale.invalidToken.useInstead, {\n badToken: \"undefined\",\n goodToken: \"null\"\n }));\n }\n buffer.json += string;\n break;\n }\n }\n\n let noEscapedSingleQuote = '';\n\n for (var i = 0; i < buffer.json.length; i++) {\n let current = buffer.json.charAt(i),\n next = '';\n if (i+1 < buffer.json.length) {\n next = buffer.json.charAt(i+1);\n if (current === '\\\\' && next === \"'\") {\n noEscapedSingleQuote += next;\n i++;\n continue;\n }\n }\n noEscapedSingleQuote += current;\n }\n buffer.json = noEscapedSingleQuote;\n\n let _line = 1,\n _depth = 0;\n\n const newIndent = () => Array(_depth * 2).fill('&nbsp;').join(''),\n newLineBreak = (byPass=false) => {\n _line++;\n return (_depth > 0 || byPass) ? '<br>' : '';\n },\n newLineBreakAndIndent = (byPass=false) => newLineBreak(byPass) + newIndent();\n\n if (error) {\n let _line_fallback = 1;\n const countCarrigeReturn = (str) => (str.match(/[\\n\\r]/g) || []).length;\n _line = 1;\n\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n const token = buffer.tokens_merge[i],\n type = token.type,\n string = token.string;\n\n if (type === 'linebreak') { _line++ }\n buffer.markup += this.newSpan(i,token,_depth);\n _line_fallback += countCarrigeReturn(string);\n }\n\n _line++;\n _line_fallback++;\n if (_line < _line_fallback) {\n _line = _line_fallback;\n }\n } else {\n const maxIterations = Math.ceil(bracketList.length / 2);\n let round = 0,\n delta = false;\n\n const removePair = (index) => {\n bracketList.splice(index + 1,1);\n bracketList.splice(index,1);\n if (!delta) { delta = true }\n }\n\n while (bracketList.length > 0) {\n delta = false;\n for (var tokenCount = 0; tokenCount < bracketList.length - 1; tokenCount++) {\n const pair = bracketList[tokenCount].string + bracketList[tokenCount+1].string;\n if (['[]', '{}'].indexOf(pair) > -1) {\n removePair(tokenCount);\n }\n }\n round++;\n if (!delta) { break }\n if (round >= maxIterations) { break }\n }\n\n if (bracketList.length > 0) {\n const _tokenString = bracketList[0].string,\n _tokenPosition = bracketList[0].i,\n _closingBracketType = _tokenString === '[' ? ']' : '}';\n line = bracketList[0].line;\n setError(_tokenPosition, format(locale.brace[_closingBracketType === ']' ? 'square' : 'curly'].missingClose));\n }\n\n if ([undefined, ''].indexOf(buffer.json) === -1) {\n try {\n buffer.jsObject = JSON.parse(buffer.json);\n } catch (err) {\n const errorMessage = err.message,\n subsMark = errorMessage.indexOf('position');\n\n if (subsMark === -1) {\n throw new Error('Error parsing failed');\n }\n\n const errPositionStr = errorMessage.substring(subsMark + 9,errorMessage.length),\n errPosition = parseInt(errPositionStr);\n let charTotal = 0,\n tokenIndex = 0,\n token = false,\n exitWhile = false;\n\n while (charTotal < errPosition && !exitWhile) {\n token = buffer.tokens_merge[tokenIndex];\n if ('linebreak' === token.type) _line++;\n if (['space', 'linebreak'].indexOf(token.type) === -1) {\n charTotal += token.string.length;\n }\n if (charTotal >= errPosition) break;\n tokenIndex++;\n if (!buffer.tokens_merge[tokenIndex+1]) {\n exitWhile = true;\n }\n }\n\n line = _line;\n let backslashCount = 0;\n\n for (let i = 0; i < token.string.length; i++) {\n const char = token.string.charAt(i);\n if (char==='\\\\') {\n backslashCount = backslashCount > 0 ? backslashCount + 1 : 1;\n } else {\n if ((backslashCount % 2 !== 0 || backslashCount === 0) && '\\'\"bfnrt'.indexOf(char) === -1) {\n setError(tokenIndex,format(locale.invalidToken.unexpected, {\n token: '\\\\'\n }));\n }\n backslashCount = 0;\n }\n }\n if (!error) {\n setError(tokenIndex,format(locale.invalidToken.unexpected, {\n token: token.string\n }));\n }\n }\n }\n\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n const token = buffer.tokens_merge[i],\n string = token.string,\n type = token.type;\n\n switch (type) {\n case 'space':\n case 'linebreak':\n break;\n case 'string':\n case 'number':\n case 'primitive':\n case 'error':\n buffer.markup += ((followsSymbol(buffer, i,[',','[']) ? newLineBreakAndIndent() : '') + this.newSpan(i,token,_depth));\n break;\n case 'key':\n buffer.markup += (newLineBreakAndIndent() + this.newSpan(i,token,_depth));\n break;\n case 'colon':\n buffer.markup += (this.newSpan(i,token,_depth) + '&nbsp;');\n break;\n case 'symbol':\n switch (string) {\n case '[':\n case '{':\n buffer.markup += ((!followsSymbol(buffer, i,[':']) ? newLineBreakAndIndent() : '') + this.newSpan(i,token,_depth)); _depth++;\n break;\n case ']':\n case '}':\n _depth--;\n const islastToken = i === buffer.tokens_merge.length - 1,\n _adjustment = i > 0 ? (['[','{'].indexOf(buffer.tokens_merge[i-1].string) > -1 ? '' : newLineBreakAndIndent(islastToken)) : '';\n buffer.markup += _adjustment + this.newSpan(i,token,_depth);\n break;\n case ',':\n buffer.markup += this.newSpan(i,token,_depth);\n break;\n }\n break;\n }\n }\n }\n\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n let token = buffer.tokens_merge[i];\n buffer.indented += token.string;\n if (['space', 'linebreak'].indexOf(token.type) === -1) {\n buffer.tokens_plainText += token.string;\n }\n }\n\n if (error) {\n const isFunction = (functionToCheck) => functionToCheck && {}.toString.call(functionToCheck) === '[object Function]';\n if ('modifyErrorText' in this.props && isFunction(this.props.modifyErrorText)) {\n error.reason = this.props.modifyErrorText(error.reason);\n }\n }\n\n return {\n tokens: buffer.tokens_merge,\n noSpaces: buffer.tokens_plainText,\n indented: buffer.indented,\n json: buffer.json,\n jsObject: buffer.jsObject,\n markup: buffer.markup,\n lines: _line,\n error: error\n };\n }\n\n JSON_Placeholder(obj) {\n const locale = this.props.locale || defaultLocale;\n let buffer = {\n inputText: JSON.stringify(obj),\n position: 0,\n currentChar: '',\n tokenPrimary: '',\n tokenSecondary: '',\n brackets: [],\n isValue: false,\n stringOpen: false,\n stringStart: 0,\n tokens: []\n };\n\n buffer.inputText.split('').forEach((char, i) => {\n buffer.position = i;\n buffer.currentChar = char;\n if (!determine_value(buffer) && !determine_string(buffer) && !escape_character(buffer)) {\n if (!buffer.stringOpen) {\n buffer.tokenSecondary += buffer.currentChar;\n }\n }\n })\n\n let buffer2 = {\n brackets: [],\n isValue: false,\n tokens: []\n };\n\n buffer2.tokens = buffer.tokens.map(token => {\n let rtn = {\n type: '',\n string: '',\n value: '',\n depth: 0\n }\n\n switch(token) {\n case ',':\n updateArray(rtn, {\n type: 'symbol',\n string: token,\n value: token\n })\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length-1] === '[');\n break;\n case ':':\n updateArray(rtn, {\n type: 'symbol',\n string: token,\n value: token\n })\n buffer2.isValue = true;\n break;\n case '{':\n case '[':\n updateArray(rtn, {\n type: 'symbol',\n string: token,\n value: token\n })\n buffer2.brackets.push(token);\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length-1] === '[');\n break;\n case '}':\n case ']':\n updateArray(rtn, {\n type: 'symbol',\n string: token,\n value: token\n })\n buffer2.brackets.pop();\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length-1] === '[');\n break;\n case 'undefined':\n updateArray(rtn, {\n type: 'primitive',\n string: token,\n value: undefined\n })\n break;\n case 'null':\n updateArray(rtn, {\n type: 'primitive',\n string: token,\n value: null\n })\n break;\n case 'false':\n updateArray(rtn, {\n type: 'primitive',\n string: token,\n value: false\n })\n break;\n case 'true':\n updateArray(rtn, {\n type: 'primitive',\n string: token,\n value: true\n })\n break;\n default:\n if ('\\'\"'.indexOf(token.charAt(0)) > -1) {\n rtn.type = buffer2.isValue ? 'string' : 'key';\n rtn.string = rtn.type === 'key' ? stripQuotesFromKey(token) : rtn.string;\n\n if (rtn.type === 'string') {\n const chars = token.slice(1, -1).split('');\n rtn.string = \"'\" + chars.map((c, ii) => ('\\'\\\"'.indexOf(c) > -1 ? '\\\\' : '') + c).join('') + \"'\";\n }\n rtn.value = rtn.string;\n break;\n }\n if (!isNaN(token)) {\n updateArray(rtn, {\n type: 'number',\n string: token,\n value: Number(token)\n })\n break;\n }\n if (token.length > 0 && !buffer2.isValue) {\n updateArray(rtn, {\n type: 'key',\n string: token,\n value: token.indexOf(' ') > -1 ? \"'\" + token + \"'\" : token\n })\n break;\n }\n }\n rtn.depth = buffer2.brackets.length\n return rtn\n });\n\n const clean = buffer2.tokens.map(t => t.string).join('')\n\n const indentation = buffer2.tokens.map((token, i) => {\n switch (token.string) {\n case '[':\n case '{':\n const nextToken = i < (buffer2.tokens.length - 1) - 1 ? buffer2.tokens[i+1] : '';\n return token.string + ('}]'.indexOf(nextToken.string) === -1 ? indent(token.depth) : '')\n case ']':\n case '}':\n const prevToken = i > 0 ? buffer2.tokens[i-1] : '';\n return ('[{'.indexOf(prevToken.string) === -1 ? indent(token.depth) : '') + token.string;\n case ':':\n return token.string + ' ';\n case ',':\n return token.string + indent(token.depth);\n default:\n return token.string;\n }\n }).join('')\n\n let lines = 1;\n const indentII = (number) => {\n if (number > 0 ) lines++;\n return (number > 0 ? '<br>' : '') + Array(number * 2).fill('&nbsp;').join('');\n }\n\n const lastIndex = buffer2.tokens.length - 1;\n const markup = buffer2.tokens.map((token, i) => {\n let span = this.newSpan(i, token, token.depth);\n\n switch (token.string) {\n case '{':\n case '[':\n const nextToken = i < (buffer2.tokens.length - 1) - 1 ? buffer2.tokens[i+1] : '';\n return span + ('}]'.indexOf(nextToken.string) === -1 ? indentII(token.depth) : '');\n case '}':\n case ']' :\n const prevToken = i > 0 ? buffer2.tokens[i-1] : '';\n return ('[{'.indexOf(prevToken.string) === -1 ? indentII(token.depth) + (lastIndex === i ? '<br>' : '') : '') + span;\n case ':':\n return span + ' ';\n case ',':\n return span + indentII(token.depth);\n default:\n return span;\n }\n }).join('')\n\n return {\n tokens: buffer2.tokens,\n noSpaces: clean,\n indented: indentation,\n json: JSON.stringify(obj),\n jsObject: obj,\n markup: markup,\n lines: lines + 2\n };\n }\n\n tokenize (obj) {\n let objType = getType(obj)\n\n if (objType !== 'object') {\n return console.error('tokenize() expects object type properties only. Got \\'' + objType + '\\' type instead.');\n }\n\n // DOM NODE || ONBLUR OR UPDATE\n if('nodeType' in obj){\n return this.DomNode_Update(obj)\n }\n\n // JS OBJECTS || PLACEHOLDER\n if (!('nodeType' in obj)) {\n return this.JSON_Placeholder(obj)\n }\n }\n}\n\nexport default JADNInput;" }, { "alpha_fraction": 0.5045542120933533, "alphanum_fraction": 0.509798526763916, "avg_line_length": 26.446969985961914, "blob_id": "9300b58ff9afd74c9ef5d6a765f33d05b3a91841", "content_id": "336c5c720a2437ba29f6d59757e96ebe5144e87d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3623, "license_type": "permissive", "max_line_length": 114, "num_lines": 132, "path": "/orchestrator/gui/client/src/components/command/pages/info.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport JSONPretty from 'react-json-pretty'\n\nimport {\n format,\n parseISO\n} from 'date-fns'\n\nimport {\n RemotePageTable\n} from '../../utils'\n\nimport * as CommandActions from '../../../actions/command'\n\nconst str_fmt = require('string-format')\n\nclass CommandInfo extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.tableColumns = [\n {\n text: 'Command ID',\n dataField: 'command_id',\n sort: true\n },{\n text: 'Received',\n dataField: 'received_on',\n sort: true\n },{\n text: 'Status',\n dataField: 'status',\n sort: true\n },{\n text: 'Command',\n dataField: 'command',\n formatter: (cell, row) => <span>{ cell.action } - { Object.keys(cell.target)[0] || '' }</span>\n }\n ]\n\n if (this.props.command) {\n if (this.props.command.command_id !== this.props.command_id) {\n this.props.getCommand(this.props.command_id)\n }\n }\n }\n\n render() {\n let cmd = this.props.command\n let received = parseISO(cmd.received_on)\n let maxHeight = 500\n\n try {\n received = format(received, \"EEEE, MMMM do yyyy, h:mm:ss a zzzz\")\n } catch (e) {\n received = \"...\"\n }\n\n return (\n <div className=\"col-md-10 mx-auto jumbotron\">\n <h2>Command Info</h2>\n\n <p><strong>Command ID:</strong> { cmd.command_id }</p>\n\n <p><strong>Received:</strong> { received }</p>\n\n <div>\n <p><strong>Actuators:</strong></p>\n <ul className=\"list-group\">\n { (cmd.actuators || []).map((act, i) => <li key={ i } className=\"list-group-item\">{ act.name }</li>) }\n </ul>\n </div>\n\n <div>\n <p className=\"m-0\"><strong>Command:</strong></p>\n <div className='position-relative' style={{ maxHeight: maxHeight+'px' }}>\n <JSONPretty\n id='command'\n className='scroll-xl border'\n style={{ minHeight: 2.5+'em' }}\n json={ cmd.command }\n />\n </div>\n </div>\n\n <div>\n <p className=\"m-0\"><strong>Responses:</strong></p>\n\n <div className=\"p-1 border border-primary scroll\" style={{ maxHeight: maxHeight+'px' }}>\n {\n (cmd.responses || []).map((rsp, i) => {\n return (\n <div key={ i }>\n <p className=\"m-0\"><strong>{ rsp.actuator || 'Error' }:</strong></p>\n <div className='position-relative mb-2'>\n <JSONPretty\n id={ 'response-' + i }\n className='border'\n style={{ minHeight: 2.5+'em' }}\n json={ rsp.response }\n />\n </div>\n </div>\n )\n })\n }\n </div>\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state, props) => {\n let cmd = state.Command.commands.filter(c=> c.command_id === props.command_id)\n return {\n siteTitle: state.Util.site_title,\n orchestrator: {\n name: state.Util.name || 'N/A'\n },\n admin: state.Auth.access.admin,\n command: cmd.length == 1 ? cmd[0] : {}\n }\n}\n\nconst mapDispatchToProps= (dispatch) => ({\n getCommands: (page, sizePerPage, sort) => dispatch(CommandActions.getCommands(page, sizePerPage, sort)),\n getCommand: (cmd) => dispatch(CommandActions.getCommand(cmd))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(CommandInfo)\n" }, { "alpha_fraction": 0.5770202279090881, "alphanum_fraction": 0.5782828330993652, "avg_line_length": 32.70212936401367, "blob_id": "4c5084dcbbaddf326f309057080257dcd9ec2f76", "content_id": "a739480c1aab8e0ae91430e2c3fc001c6703273f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3168, "license_type": "permissive", "max_line_length": 175, "num_lines": 94, "path": "/orchestrator/gui/client/src/components/utils/theme-switcher/download_themes.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env node\n\nconst path = require(\"path\");\nconst fs = require(\"fs-extra\");\nconst NamedRegExp = require(\"named-regexp-groups\");\nconst download = require(\"download-file\");\nconst request = require(\"sync-request\");\nconst csso = require(\"csso\");\n\nconst ROOT_DIR = __dirname\nconst CHECK_DIRS = [\"themes\", \"assets\", \"assets/fonts\"]\n\nconst THEME_API = \"https://bootswatch.com/api/4.json\"\nconst THEME_FONT_DIR = \"/assets/\"\n\nconst CSS_URL_IMPORT = new NamedRegExp(/^@import url\\([\\\"\\'](:<url>.*?)[\\\"\\']\\);\\s*?$/);\nconst FILE_URL_IMPORT = new NamedRegExp(/\\s*?src:( local\\(.*?\\),)? local\\([\\'\\\"](:<name>.*?)[\\'\\\"]\\), url\\([\\'\\\"]?(:<url>.*?)[\\'\\\"]?\\) format\\([\\'\\\"](:<format>.*?)[\\'\\\"]\\);/);\nconst URL_REPLACE = new NamedRegExp(/url\\([\\\"\\\"]?(:<url>.*?)[\\\"\\\"]?\\)/)\n\nfor (i in CHECK_DIRS) {\n let dir = path.join(ROOT_DIR, CHECK_DIRS[i])\n if (!fs.pathExistsSync(dir)) {\n fs.mkdirSync(dir);\n }\n}\n\nlet themes = request(\"GET\", THEME_API);\nthemes = JSON.parse(themes.getBody(\"utf8\"));\ntheme_names = []\n\nfor (let theme of themes[\"themes\"]) {\n console.log(\"Downloading Theme: \" + theme[\"name\"]);\n let theme_name = theme[\"name\"].toLowerCase();\n theme_names.push(theme_name)\n\n let css = request(\"GET\", theme[\"css\"]).getBody(\"utf8\"),\n pre_css_lines = [],\n post_css_lines = []\n\n for (let line of css.split(/\\n\\r?/gm)) {\n if (line.startsWith(\"@import url(\")) {\n let css_import_url = line.replace(CSS_URL_IMPORT, \"$+{url}\");\n css_import = request(\"GET\", css_import_url).getBody(\"utf8\");\n\n pre_css_lines.push(\"/* \" + line + \" */\")\n pre_css_lines = pre_css_lines.concat(css_import.split(/\\n\\r?/g))\n } else {\n pre_css_lines.push(line)\n }\n }\n\n // set imports to local & download files\n for (let line of pre_css_lines) {\n if (line.match(/\\s*?src:.*url\\([\\\"\\']?https?:\\/\\/.*/) && !line.startsWith('/*')) {\n let src = FILE_URL_IMPORT.exec(line)[\"groups\"]\n let ext = path.extname(src[\"url\"])\n let fileName = \"fonts/\" + src[\"name\"] + ext\n\n if (!fs.existsSync(path.join(ROOT_DIR, THEME_FONT_DIR, fileName))) {\n let opts = {\n directory: path.join(ROOT_DIR, THEME_FONT_DIR, \"fonts\"),\n filename: src[\"name\"] + ext\n }\n download(src[\"url\"], opts, (err) => {\n if (err) throw err\n console.log(\"Downloaded file: \" + opts[\"filename\"])\n });\n }\n line = line.replace(URL_REPLACE, \"url('\" + THEME_FONT_DIR + fileName + \"')\")\n }\n\n line = line.replace(/\\\\[^\\\\]/g, \"\\\\\\\\\")\n line = line.replace(/^\\s+\\*/, \"*\")\n line = line.replace(/^\\s+/, \"\\t\")\n post_css_lines.push(line)\n }\n\n let theme_css = fs.createWriteStream(path.join(ROOT_DIR, \"assets\", \"css\", theme_name + \".css\"), {flags: \"w\"});\n\n styles = csso.minify(post_css_lines.join(\"\"), {\n comments: false,\n restructure: true,\n sourceMap: false\n }).css\n\n theme_css.write(styles)\n theme_css.end()\n}\n\n// make theme index file\nlet theme_index_file = fs.createWriteStream(path.join(ROOT_DIR, \"themes.js\"), {flags: \"w\"});\ntheme_index_file.write(\"let validThemes = [\\n\\t'\"+ theme_names.join(\"',\\n\\t '\") +\"'\\n]\\n\")\ntheme_index_file.write(\"export default validThemes\")\ntheme_index_file.end()\n" }, { "alpha_fraction": 0.6548140048980713, "alphanum_fraction": 0.6668490171432495, "avg_line_length": 20.02298927307129, "blob_id": "d1c7cbea8eb90ede516c9efc184aad62b2096922", "content_id": "e1e6aa64992c6b6bcdb7b1485fb34b897b3e3fda", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1828, "license_type": "permissive", "max_line_length": 204, "num_lines": 87, "path": "/docs/Serializations.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "## Serializations\n- Currently implemented:\n\t- CBOR\n\t- JSON\n\t- XML\n\t- YAML\n\n- Adding Additional\n\n##### Note: Python is the default language used within the OIF, all python modules can be found on [PyPi](https://pypi.org/)\n1. Open the `modules/utils/sb_utils/message.py` file\n2. Add the serialization to the serializations dictionary\n\t- Note: The key should be lowercase and not begin with a number or special character for all serializations added\n\t- Simple Serializations, single function - BSON\n\t\n\t```python\n\timport bson\n\t...\n\tserializations = dict(\n\t\tencode=dict(\n\t\t\tbson=bson.dumps\n\t ),\n\t\tdecode=dict(\n\t\t\tbson=bson.loads\n\t \t)\n\t)\n\t```\n\t\n\t- Wrapped Serializations, multiple functions - CBOR\n\t\n\t```python\n\timport base64\n\timport cbor2\n\t...\n\tserializations = dict(\n\t\tencode=dict(\n\t\t\tcbor=lambda x: base64.b64encode(cbor2.dumps(x)).decode('utf-8'),\n\t ),\n\t\tdecode=dict(\n\t\t\tcbor=lambda x: base64.b64decode(cbor2.loads(x)),\n\t \t)\n\t)\n\t```\n\n3. Add the non standard packages used for the encoding to the `modules/utils/requirements.txt`\n\t- For BSON, bson\n\t- For CBOR, cbor2\n\n\t```text\n\t...\n\tbson\n\tcbor2\n\t...\n\t```\n\n4. Open the `orchestrator/core/orc_server/data/fixtures/orchestrator.json` file\n5. Add an entry for the new serialization to the file, incrementing the pk field\n\t- Note: The name field can be any combination of uppercase or lowercase with numbers and special characters, it however __must match__ the serialization key, from above, when all characters are lowercase\n\t- BSON\n\t\t\n\t```json\n\t...\n\t{\n \t\"model\": \"orchestrator.serialization\",\n \t\t\"pk\": X,\n\t\t\"fields\": {\n\t\t\t\"name\": \"BSON\"\n \t\t}\n\t},\n\t...\n\t```\n\t\t\t\n- CBOR\n\n\t```json\n\t...\n\t{\n \t\t\"model\": \"orchestrator.serialization\",\n \t\t\t\"pk\": X,\n\t\t\"fields\": {\n\t\t\t\"name\": \"CBOR\"\n \t\t}\n\t},\n\t...\n\t``` \n\t\n6. Rerun the `configure.py` script to add the additional serializations" }, { "alpha_fraction": 0.6861313581466675, "alphanum_fraction": 0.6861313581466675, "avg_line_length": 26.399999618530273, "blob_id": "bfcda324e2146841a33a074a649f4091601b08eb", "content_id": "2ce129c779c49684c53ef736bb86780e2adae301", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "permissive", "max_line_length": 71, "num_lines": 10, "path": "/orchestrator/gui/server/gui_server/tracking/urls/gui.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\n\nfrom .. import views\n\n\nurlpatterns = [\n path('', views.gui_root, name='tracking.gui_root'),\n path('events', views.gui_events, name='tracking.gui_events'),\n path('requests', views.gui_requests, name='tracking.gui_requests'),\n]\n" }, { "alpha_fraction": 0.49556049704551697, "alphanum_fraction": 0.5110988020896912, "avg_line_length": 27.603174209594727, "blob_id": "667171b83b88d3555736b5d2f4ab6f8dc232a38d", "content_id": "c0ef2f66566209e3f0815719fe2cd114cec55340", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1802, "license_type": "permissive", "max_line_length": 49, "num_lines": 63, "path": "/orchestrator/core/orc_server/conformance/tests/general_tests.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nOpenC2 Generic Conformance\n\"\"\"\nimport random\n\nfrom test_setup import SetupTestCase\n\n\nclass General_UnitTests(SetupTestCase):\n \"\"\"\n General OpenC2 Conformance Tests\n \"\"\"\n profile = \"General\"\n\n def test_headers(self):\n \"\"\"\n Test of OpenC2 specific headers\n \"\"\"\n print(\"Test `content_type` header...\")\n if random.randint(0, 2) == 1:\n self.fail(\"No `content_type` header\")\n\n print(\"Test `request_id` header...\")\n if random.randint(0, 2) == 1:\n self.fail(\"No `request_id` header\")\n\n with self.subTest(header='msg_type'):\n print(\"Test `msg_type` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `msg_type` header\")\n\n with self.subTest(header='status'):\n print(\"Test `status` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `status` header\")\n\n with self.subTest(header='created'):\n print(\"Test `created` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `created` header\")\n\n with self.subTest(header='from'):\n print(\"Test `from` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `from` header\")\n\n with self.subTest(header='to'):\n print(\"Test `to` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `to` header\")\n\n '''\n with self.subTest(header='content'):\n print(\"Test `content` header...\")\n if random.randint(0, 1) == 1:\n self.fail(\"No `content` header\")\n '''\n\n def test_query_features(self):\n \"\"\"\n Test of basic OpenC2 conformance\n \"\"\"\n print(\"Test Query Features...\")\n" }, { "alpha_fraction": 0.7281830906867981, "alphanum_fraction": 0.7381974458694458, "avg_line_length": 26.920000076293945, "blob_id": "78bb13a10e4a6c0699b63be06b0331e5d1e1ee5d", "content_id": "5d59ac34172a4133de2cf421a26d1fa473823e94", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 699, "license_type": "permissive", "max_line_length": 78, "num_lines": 25, "path": "/orchestrator/transport/mqtt/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"MQTT Transport Module\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Transport Module container\"\n\nADD requirements.txt /tmp/requirements.txt\nADD MQTT/ /opt/transport/MQTT\n#ADD certs /opt/transport/MQTT/certs\nADD docker_dev_start.sh /opt/transport/MQTT/dev_start.sh\n\nRUN pip3 install -r /tmp/requirements.txt && \\\n chmod +x /opt/transport/MQTT/dev_start.sh && \\\n dos2unix /opt/transport/MQTT/dev_start.sh\n\n# Set working directory\nWORKDIR /opt/transport/MQTT\n\n# Run command when container launches\nCMD [\"./dev_start.sh\"]\n\n" }, { "alpha_fraction": 0.5458167195320129, "alphanum_fraction": 0.5577689409255981, "avg_line_length": 24.100000381469727, "blob_id": "41af4ad63a26a020e57a0a47abcffca4e79f2ca4", "content_id": "b479af88e77cdffe13c5509252fb414342ce7bf9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1757, "license_type": "permissive", "max_line_length": 106, "num_lines": 70, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/basicField.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_jadn,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass BasicField extends Component {\n inputOpts(type) {\n switch (type) {\n case 'duration':\n return {\n type: 'number',\n placeholder: 0,\n }\n case 'date-time':\n return {\n type: 'datetime',\n placeholder: '2000-01-01T00:00:00-00:00',\n }\n default:\n return {\n type: 'text'\n }\n }\n }\n\n render() {\n let name = this.props.name || this.props.def.name\n let msgName = (this.props.parent ? [this.props.parent, name] : [name]).join(\".\")\n\n if (this.props.def.name >= 0) { // name is type if not field\n return <Field def={ this.props.def } parent={ msgName } optChange={ this.props.optChange } />\n } else {\n let opts = this.inputOpts(this.props.def.type)\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (isOptional_jadn(this.props.def) ? '' : '*') + name }</legend>\n <Input\n type={ opts.type || 'text' }\n placeholder={ opts.placeholder || '' }\n name={ name }\n onChange={ e => this.props.optChange(msgName, e.target.value, this.props.arr ? true : false) }\n />\n { this.props.def.desc ? <FormText color=\"muted\">{ this.props.def.desc }</FormText> : '' }\n </FormGroup>\n )\n }\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(BasicField)\n" }, { "alpha_fraction": 0.5774742364883423, "alphanum_fraction": 0.5783699154853821, "avg_line_length": 31.12949562072754, "blob_id": "4beeb55f20e4292d09a53de5e3b0bdc6b7cb0615", "content_id": "08a09b9a5d2875f670c888d70014582d7ace0f80", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4466, "license_type": "permissive", "max_line_length": 138, "num_lines": 139, "path": "/logger/gui/config/prod.config.babel.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import webpack from 'webpack';\nimport merge from 'webpack-merge';\nimport path from 'path';\n\nimport CopyWebpackPlugin from 'copy-webpack-plugin';\nimport FaviconsWebpackPlugin from 'favicons-webpack-plugin';\nimport MiniCssExtractPlugin from 'mini-css-extract-plugin';\nimport OptimizeCSSAssetsPlugin from 'optimize-css-assets-webpack-plugin';\nimport TerserPlugin from 'terser-webpack-plugin';\nimport { BundleAnalyzerPlugin } from 'webpack-bundle-analyzer';\nimport { CleanWebpackPlugin } from 'clean-webpack-plugin';\n\nimport baseConfig from './base.config.babel';\n\nconst env = 'production';\n\nconst ROOT_DIR = path.join(__dirname, '..');\nconst BUILD_DIR = path.join(ROOT_DIR, 'build');\nconst COMPONENTS_DIR = path.join(ROOT_DIR, 'src', 'components');\nconst DEPEND_DIR = path.join(COMPONENTS_DIR, 'dependencies');\n\nexport default merge.smart(baseConfig, {\n mode: env,\n devtool: 'source-map',\n resolve: {\n alias: {\n // 'jquery': 'jquery-min', // TODO: Verify the jquery-min version\n }\n },\n plugins: [\n new webpack.DefinePlugin({\n NODE_ENV: env\n }),\n new BundleAnalyzerPlugin({\n analyzerMode: 'static',\n generateStatsFile: true,\n openAnalyzer: false,\n statsFilename: path.join(ROOT_DIR, 'analyzer.stats.json'),\n reportFilename: path.join(ROOT_DIR, 'analyzer.stats.html')\n }),\n new MiniCssExtractPlugin({\n filename: 'css/[name].bundle.min.css',\n chunkFilename: 'css/[name].bundle.min.css',\n allChunks: true\n }),\n new CopyWebpackPlugin([\n { // Custom Assets\n from: path.join(DEPEND_DIR, 'assets'),\n to: path.join(BUILD_DIR, 'assets'),\n toType: 'dir'\n },\n { // Theme Assets\n from: path.join(COMPONENTS_DIR, 'utils', 'theme-switcher', 'assets'),\n to: path.join(BUILD_DIR, 'assets'),\n toType: 'dir'\n }\n ]),\n new FaviconsWebpackPlugin({\n logo: path.join(DEPEND_DIR, 'img', 'log-favicon.png'),\n cache: true,\n outputPath: 'img/favicons/',\n prefix: 'img/favicons/',\n statsFilename: 'favicons-[hash].json',\n inject: true,\n favicons: {\n appName: 'Logger UI',\n background: '#ffffff',\n theme_color: '#333',\n appleStatusBarStyle: 'black-translucent',\n pixel_art: false,\n icons: {\n android: true, // Create Android homescreen icon. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n appleIcon: true, // Create Apple touch icons. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n appleStartup: true, // Create Apple startup images. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n coast: false, // Create Opera Coast icon. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n favicons: true, // Create regular favicons. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n firefox: true, // Create Firefox OS icons. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n opengraph: false,\n twitter: false,\n windows: true, // Create Windows 8 tile icons. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n yandex: false // Create Yandex browser icon. `boolean` or `{ offset, background, mask, overlayGlow, overlayShadow }`\n }\n }\n }),\n // new LodashModuleReplacementPlugin(),\n new CleanWebpackPlugin({\n dry: false\n })\n ],\n optimization: {\n minimizer: [\n new TerserPlugin({\n cache: true,\n parallel: true,\n sourceMap: false,\n terserOptions: {\n output: {\n comments: false\n }\n }\n }),\n new OptimizeCSSAssetsPlugin({\n cssProcessorPluginOptions: {\n preset: [\n 'default',\n {\n discardComments: {\n removeAll: true\n }\n }\n ]\n },\n canPrint: true\n })\n ]\n },\n module: {\n rules: [\n {\n test: /\\.(c|le)ss$/,\n use: [\n MiniCssExtractPlugin.loader,\n {\n loader: 'css-loader',\n options: {\n url: false\n }\n },\n {\n loader: 'less-loader',\n options: {\n strictMath: true\n }\n }\n ]\n }\n ]\n }\n});\n" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.5660377144813538, "avg_line_length": 28.064516067504883, "blob_id": "159949fb9cb92c946519ebcbf63c1447e11f49ba", "content_id": "e6805d43c576608dfefcf58c713608f7c7998b3a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "permissive", "max_line_length": 86, "num_lines": 31, "path": "/orchestrator/gui/server/gui_server/account/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import os\nimport pkgutil\nimport sys\n\nfrom importlib import import_module\n\n\nfor _, name, _ in pkgutil.iter_modules([os.path.dirname(__file__)]):\n module = import_module(f'.{name}', package=__name__)\n\n exports = getattr(module, 'exports', None)\n\n if exports:\n for export in exports:\n if hasattr(module, export) and not hasattr(sys.modules[__name__], export):\n attr = getattr(module, export)\n setattr(sys.modules[__name__], export, attr)\n del attr\n del export\n\n else:\n for itm in list(filter(lambda x: not x.startswith('_'), dir(module))):\n if not hasattr(sys.modules[__name__], itm):\n attr = getattr(module, itm)\n setattr(sys.modules[__name__], itm, attr)\n del attr\n del itm\n\n del name, module, exports\n\ndel os, pkgutil, sys, import_module\n" }, { "alpha_fraction": 0.5735912322998047, "alphanum_fraction": 0.6013456583023071, "avg_line_length": 22.780000686645508, "blob_id": "7665605a085817499d9a4d0cc21f006bc0eb6713", "content_id": "6084bfa9a7a0f667bd5f38b3de7b0839e0a5d12f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "permissive", "max_line_length": 80, "num_lines": 50, "path": "/orchestrator/core/orc_server/tracking/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "default_app_config = 'tracking.conf.TrackingConfig'\n\n\nclass FrozenDict(dict):\n def __init__(self, *args, **kwargs):\n self._hash = None\n super(FrozenDict, self).__init__(*args, **kwargs)\n\n def __hash__(self):\n if self._hash is None:\n self._hash = hash(tuple(sorted(self.items()))) # iteritems() on py2\n return self._hash\n\n def __getattr__(self, item):\n return self.get(item, None)\n\n def _immutable(self, *args, **kws):\n raise TypeError('cannot change object - object is immutable')\n\n __setitem__ = _immutable\n __delitem__ = _immutable\n pop = _immutable\n popitem = _immutable\n clear = _immutable\n update = _immutable\n setdefault = _immutable\n\n\nLEVELS = (\n 'Debug',\n 'Error',\n 'Fatal',\n 'Info',\n 'Trace',\n 'Warn'\n)\n\n_DB_LEVELS = tuple((l[0].upper(), l) for l in LEVELS)\n\nEVENT_LEVELS = FrozenDict({l: l[0].upper() for l in LEVELS})\n\nLEVEL_EVENTS = FrozenDict(map(reversed, EVENT_LEVELS.items()))\n\nREQUEST_LEVELS = FrozenDict(\n Information=range(100, 199),\n Success=range(200, 299),\n Redirect=range(300, 399),\n Client_Error=range(400, 499),\n Server_Error=range(500, 599)\n)\n" }, { "alpha_fraction": 0.5333235263824463, "alphanum_fraction": 0.5411586165428162, "avg_line_length": 34.763572692871094, "blob_id": "96b6b0a9b0e1ec2ffb593304893ef93d79024739", "content_id": "d9b43975bdf460f93d7f912ff7b8ac06c48dc72c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 20421, "license_type": "permissive", "max_line_length": 302, "num_lines": 571, "path": "/orchestrator/gui/client/src/components/command/pages/generate/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport ReactDOM from 'react-dom'\nimport { connect } from 'react-redux'\nimport { toast } from 'react-toastify'\nimport JSONPretty from 'react-json-pretty'\nimport classnames from 'classnames'\n\nimport {\n Button,\n ButtonGroup,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n Nav,\n NavItem,\n NavLink,\n TabContent,\n TabPane,\n Tooltip\n} from 'reactstrap'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faLongArrowAltRight } from '@fortawesome/free-solid-svg-icons'\n\nimport {\n JADN_Field,\n JSON_Field,\n keys,\n zip\n} from './lib'\n\nimport {\n delMultiKey,\n escaped2cbor,\n format,\n getMultiKey,\n generateUUID4,\n hexify,\n loadURL,\n minify,\n safeGet,\n setMultiKey,\n validateUUID4,\n validURL\n} from '../../../utils'\n\nimport JADNInput from '../../../utils/jadn-editor'\nimport JSONInput from 'react-json-editor-ajrm'\nimport locale from 'react-json-editor-ajrm/locale/en'\n\nimport * as UtilActions from '../../../../actions/util'\nimport * as GenerateActions from '../../../../actions/generate'\nimport * as CommandActions from '../../../../actions/command'\n\nconst str_fmt = require('string-format')\nconst Ajv = require('ajv')\n\n\nclass GenerateCommands extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.optChange = this.optChange.bind(this)\n this.selectChange = this.selectChange.bind(this)\n this.clearCommand = this.clearCommand.bind(this)\n this.sendCommand = this.sendCommand.bind(this)\n this.jadn_keys = [\"meta\", \"types\"]\n this.json_validator = new Ajv({\n unknownFormats: \"ignore\"\n })\n this.msg_form = null\n\n this.theme = {\n schema: { // Theming for JADN/JSON input\n default: '#D4D4D4',\n background: '#FCFDFD',\n background_warning: '#FEECEB',\n string: '#FA7921',\n number: '#70CE35',\n colon: '#49B8F7',\n keys: '#59A5D8',\n keys_whiteSpace: '#835FB6',\n primitive: '#386FA4'\n },\n message: { // Theming for JSONPretty\n main: 'color:#D4D4D4;background:#FCFDFD;overflow:auto;',\n error: 'color:#f92672;background:#FEECEB;overflow:auto;',\n key: 'color:#59A5D8;',\n string: 'color:#FA7921;',\n value: 'color:#386FA4;',\n boolean: 'color:#386FA4;',\n }\n }\n\n this.state = {\n active_tab: 'creator',\n msg_record: '',\n channel: {\n serialization: '',\n protocol: ''\n },\n schema: {\n schema: {},\n selected: 'empty',\n type: '',\n jadn_fmt: false,\n exports: []\n },\n message: {},\n message_warnings: []\n }\n\n this.props.actuatorInfo()\n this.props.deviceInfo()\n }\n\n shouldComponentUpdate(nextProps, nextState) {\n let props_update = this.props != nextProps\n let state_update = this.state != nextState\n\n if (this.state.schema.schema != nextState.schema.schema) {\n this.props.setSchema(nextState.schema.schema)\n nextState.message = {}\n nextState.channel = {\n serialization: '',\n protocol: ''\n }\n\n let schema_keys = Object.keys(nextState.schema.schema)\n nextState.schema.jadn_fmt = (schema_keys.length === this.jadn_keys.length && schema_keys.every(v => this.jadn_keys.indexOf(v) !== -1))\n\n if (nextState.schema.jadn_fmt) {\n nextState.schema.exports = safeGet(safeGet(nextState.schema.schema, 'meta', {}), 'exports', [])\n } else {\n if (nextState.schema.schema.hasOwnProperty('properties')) {\n nextState.schema.exports = Object.keys(nextState.schema.schema.properties).map(k => {\n let def = safeGet(nextState.schema.schema.properties, k, {})\n return def.hasOwnProperty('$ref') ? def['$ref'].replace(/^#\\/definitions\\//, '') : ''\n })\n } else {\n nextState.schema.exports = safeGet(nextState.schema.schema, 'oneOf', []).map(exp => exp.hasOwnProperty('$ref') ? exp['$ref'].replace(/^#\\/definitions\\//, '') : '')\n }\n }\n nextState.schema.exports = nextState.schema.exports.filter(s => s)\n }\n return props_update || state_update\n }\n\n makeID() {\n this.setState(prevState => ({\n message: {\n ...prevState.message,\n command_id: generateUUID4()\n }\n }))\n }\n\n toggleTab(tab) {\n this.setState({\n active_tab: tab\n })\n }\n\n sendCommand() {\n if (this.state.schema.type == 'actuator') {\n if (this.state.channel.protocol == '') {\n toast(<div><p>Error:</p><p>Actuator protocol not set</p></div>, {type: toast.TYPE.WARNING})\n return\n }\n if (this.state.channel.serialization == '') {\n toast(<div><p>Error:</p><p>Actuator serialization not set</p></div>, {type: toast.TYPE.WARNING})\n return\n }\n }\n\n let actuator = str_fmt('{type}/{selected}', {\n type: this.state.schema.type,\n selected: this.state.schema.selected\n })\n\n toast.info(\"Request sent\");\n\n Promise.resolve(this.props.sendCommand(this.state.message, actuator, this.state.channel)).then(() => {\n let errs = safeGet(this.props.errors, CommandActions.SEND_COMMAND_FAILURE, {})\n\n if (Object.keys(errs).length !== 0) {\n if (errs.hasOwnProperty('non_field_errors')) {\n Object.values(errs).forEach((err) => {\n toast(<p>Error: { err }</p>, {type: toast.TYPE.WARNING})\n })\n } else {\n Object.keys(errs).forEach((err) => {\n toast(<div><p>Error { err }:</p><p>{ errs[err] }</p></div>, {type: toast.TYPE.WARNING})\n })\n }\n } else {\n \n // TODO: Process responses ??\n }\n })\n }\n\n clearCommand() {\n ReactDOM.findDOMNode(this.msg_form).reset()\n this.setState({\n message: {}\n })\n }\n\n optChange(k, v) {\n this.setState(prevState => {\n let msg = prevState.message || {}\n let keys = k.split('.')\n let errors = []\n keys = this.state.schema.exports.indexOf(keys[0]) == -1 ? keys : keys.slice(1)\n\n if (keys.length > 1 && msg[keys[0]] && !msg[keys[0]][keys[1]]) {\n delMultiKey(msg, keys[0])\n }\n if (['', ' ', null, undefined, [], {}].indexOf(v) == -1) {\n setMultiKey(msg, k, v)\n } else {\n delMultiKey(msg, k)\n }\n // TODO: Validate message - errors to state.message_warnings as array\n if (this.state.schema.jadn_fmt) {\n console.log(\"Generated from JADN\", msg)\n\n } else {\n // console.log(\"Generated from JSON\", this.state.msg_record, msg)\n let tmp_msg = msg\n if (this.state.schema.schema.hasOwnProperty('properties')) {\n let idx = this.state.schema.exports.indexOf(this.state.msg_record)\n let msg_wrapper = Object.keys(this.state.schema.schema.properties)[idx]\n tmp_msg = {\n [msg_wrapper]: msg\n }\n }\n\n var valid = this.json_validator.validate(this.state.schema.schema, tmp_msg)\n if (!valid) {\n errors = this.json_validator.errors\n }\n }\n\n return {\n message: msg,\n message_warnings: errors\n }\n })\n }\n\n selectChange(e) {\n let type = e.target.id.split('-')[0]\n let selected = e.target.value\n let idx = e.nativeEvent.target.selectedIndex\n let field = e.nativeEvent.target[idx].getAttribute('field')\n let schema_act = ''\n\n if (field == 'profile') {\n let act_profile = this.props.actuators.filter((act) => act.profile == selected)\n\n if (act_profile.length == 0) {\n toast(<p>Something happened, invalid profile</p>, {type: toast.TYPE.WARNING})\n return\n } else {\n act_profile = act_profile[Math.floor(Math.random()*act_profile.length)]\n }\n schema_act = act_profile.actuator_id\n\n } else if (field == 'actuator') {\n let act_name = this.props.actuators.filter((act) => act.actuator_id == selected)\n\n if (act_name.length == 0 || act_name.length > 1) {\n toast(<p>Something happened, invalid actuator</p>, {type: toast.TYPE.WARNING})\n return\n } else {\n act_name = act_name[0]\n }\n schema_act = act_name.actuator_id\n }\n\n this.setState(prevState => ({\n msg_record: '',\n message: {},\n schema: {\n ...prevState.schema,\n selected: selected,\n type: field\n }\n }), () => {\n Promise.resolve(this.props.actuatorSelect(schema_act, field)).then(() => {\n if (Object.keys(this.props.selected.schema).length === 0) {\n toast(<p>No schema defined</p>, {type: toast.TYPE.INFO})\n }\n this.setState(prevState => ({\n schema: {\n ...prevState.schema,\n schema: this.props.selected.schema,\n profile: this.props.selected.profile\n }\n }))\n })\n })\n }\n\n schema(maxHeight) {\n let profile_schemas = []\n let actuator_schemas = []\n let Editor = this.state.schema.jadn_fmt ? JADNInput : JSONInput\n\n this.props.actuators.forEach((act, i) => {\n let dev = this.props.devices.filter(d => d.device_id == act.device)\n dev = dev.length == 1 ? dev[0] : {}\n actuator_schemas.push(<option key={ i } value={ act.actuator_id } field='actuator' >{ dev ? dev.name + ' - ' : '' }{ act.name }</option>)\n if (profile_schemas.indexOf(act.profile) === -1) {\n profile_schemas.push(act.profile)\n }\n })\n\n profile_schemas = profile_schemas.map((p, i) => <option key={ i } value={ p } field='profile' >{ p }</option>)\n return (\n <div className=\"col-md-6\">\n <div id=\"schema-card\" className=\"tab-pane fade active show\">\n <div className=\"card\">\n <div className=\"card-header\">\n <div className=\"row float-left col-sm-10 pl-0\">\n <div className=\"form-group col-md-6 pr-0 pl-1\">\n <select id=\"schema-list\" name=\"schema-list\" className=\"form-control\" default=\"empty\" onChange={ this.selectChange }>\n <option value=\"empty\">Schema</option>\n <optgroup label=\"Profiles\">\n { profile_schemas }\n </optgroup>\n <optgroup label=\"Actuators\">\n { actuator_schemas }\n </optgroup>\n </select>\n </div>\n </div>\n </div>\n\n <div className=\"form-control border card-body p-0\" style={{ height: maxHeight+'px' }}>\n <Editor\n id='schema'\n placeholder={ this.state.schema.schema }\n colors={ this.theme.schema }\n locale={ locale }\n reset={ false }\n height='100%'\n width='100%'\n viewOnly={ true }\n />\n </div>\n </div>\n </div>\n </div>\n )\n }\n\n cmdCreator(maxHeight) {\n let export_records = this.state.schema.exports.map((rec, i) => <option key={ i } value={ rec }>{ rec }</option>)\n let Record_Def = \"\"\n let act_protos = []\n let act_serials = []\n\n if (this.props.selected.schema) {\n let record_def = {}\n if (this.state.schema.jadn_fmt) {\n record_def = this.props.selected.schema.hasOwnProperty('types') ? this.props.selected.schema.types.filter(type => type[0] == this.state.msg_record) : []\n record_def = zip(keys.Structure, record_def.length == 1 ? record_def[0] : [])\n Record_Def = <JADN_Field def={ record_def } optChange={ this.optChange } />\n } else {\n if (this.props.selected.schema.definitions && this.props.selected.schema.definitions.hasOwnProperty(this.state.msg_record)) {\n record_def = this.props.selected.schema.definitions[this.state.msg_record]\n Record_Def = <JSON_Field name={ this.state.msg_record } def={ record_def } root={ true } optChange={ this.optChange } />\n }\n }\n }\n\n if (this.state.schema.type === 'actuator') {\n let act = this.props.actuators.filter(act => act.actuator_id === this.state.schema.selected)\n act = act.length == 1 ? act[0] : {}\n let dev = this.props.devices.filter(dev => dev.device_id === act.device)\n dev = dev.length == 1 ? dev[0] : {}\n\n act_protos = dev.transport.map((trans, i) => {\n if (trans.protocol == this.state.channel.protocol) {\n act_serials = trans.serialization.map((serial, i) => <option key={ i } value={ serial }>{ serial }</option>)\n\n if (trans.serialization.indexOf(this.state.channel.serialization) == -1 && this.state.channel.serialization !== '') {\n this.setState(prevState => ({\n channel: {\n ...prevState.channel,\n serialization: ''\n }\n }))\n }\n }\n return (<option key={ i } value={ trans.protocol }>{ trans.protocol }</option>)\n })\n }\n\n return (\n <div className='col-md-6'>\n <Nav tabs>\n <NavItem>\n <NavLink className={classnames({ active: this.state.active_tab === 'creator' })} onClick={() => this.toggleTab('creator') }>Creator</NavLink>\n </NavItem>\n <NavItem>\n <NavLink className={classnames({ active: this.state.active_tab === 'message' })} onClick={() => this.toggleTab('message') }>Message</NavLink>\n </NavItem>\n <NavItem>\n <NavLink className={classnames({ active: this.state.active_tab === 'warning' })} onClick={() => this.toggleTab('warning') }>Warnings <span className={ \"badge badge-\" + ( this.state.message_warnings.length > 0 ? \"warning\" : \"success\")}>{ this.state.message_warnings.length }</span></NavLink>\n </NavItem>\n </Nav>\n\n <TabContent activeTab={ this.state.active_tab }>\n <TabPane tabId='creator'>\n <div className='card col-12 p-0 mx-auto'>\n <div className='card-header'>\n <FormGroup className='col-md-6 p-0 m-0 float-left'>\n <Input type='select' className='form-control' value={ this.state.msg_record } onChange={e => { this.setState({'msg_record': e.target.value, message: {}}) }}>\n <option value=''>Message Type</option>\n <optgroup label=\"Exports\">\n { export_records }\n </optgroup>\n </Input>\n </FormGroup>\n <Button color='primary' className='float-right' onClick={ () => this.makeID() }>Generate ID</Button>\n </div>\n\n <Form id='command-fields' className='card-body' onSubmit={ () => { return false; } } ref={el => this.msg_form = el } style={{ height: maxHeight-30+'px', overflowY: 'scroll' }}>\n <div id=\"fieldDefs\">\n {\n this.state.msg_record == \"\" ?\n <FormText color=\"muted\">Message Fields will appear here after selecting a type</FormText>\n :\n Record_Def\n }\n </div>\n </Form>\n </div>\n </TabPane>\n\n <TabPane tabId='message'>\n <div className='card col-12 p-0 mx-auto'>\n <div className='card-header'>\n <ButtonGroup className='float-right col-2' vertical={ true }>\n <Button color='danger' onClick={ this.clearCommand } style={{ padding: \".1rem 0\" }}>Clear</Button>\n <Button color='primary' onClick={ this.sendCommand } style={{ padding: \".1rem 0\" }}>Send</Button>\n </ButtonGroup>\n <div className={ 'col-10 p-0 ' + (this.state.schema.type === 'actuator' ? '' : ' d-none') }>\n <FormGroup className='col-md-6 p-0 m-0 float-left'>\n <Input type='select' className='form-control' value={ this.state.channel.protocol } onChange={ (e) => { this.setState({ channel: { ...this.state.channel, protocol: e.target.value }}) }}>\n <option value=''>Protocol</option>\n { act_protos }\n </Input>\n </FormGroup>\n <FormGroup className='col-md-6 p-0 m-0 float-left'>\n <Input type='select' className='form-control' value={ this.state.channel.serialization } onChange={ (e) => { this.setState({ channel: { ...this.state.channel, serialization: e.target.value }}) }}>\n <option value=''>Serialization</option>\n { act_serials }\n </Input>\n </FormGroup>\n </div>\n </div>\n\n <div className='card-body p-1 position-relative' style={{ height: maxHeight-25+'px', overflowY: 'scroll' }}>\n <JSONPretty\n id='message'\n className='scroll-xl'\n style={{ minHeight: 2.5+'em' }}\n data={ this.state.message }\n theme={ this.theme.message }\n />\n </div>\n </div>\n </TabPane>\n\n <TabPane tabId='warning'>\n <div className='card col-12 p-0 mx-auto'>\n <div className='card-header h3'>\n Message Warnings\n </div>\n <div className='card-body p-2 position-relative' style={{ height: maxHeight-25+'px', overflowY: 'scroll' }}>\n {\n this.state.message_warnings.length == 0 ?\n <p>Warnings for the generated message will appear here if available</p>\n :\n this.state.message_warnings.map((err, i) => {\n return (\n <div key={ i } className=\"border border-warning mb-2 px-2 pt-2\">\n <p>Warning from message `{ err.dataPath || \".\" }`\n <FontAwesomeIcon\n icon={ faLongArrowAltRight }\n className=\"mx-2\"\n />\n \"{ err.keyword }\"\n </p>\n <p className=\"text-warning\">{ err.message }</p>\n </div>\n )\n })\n }\n </div>\n </div>\n </TabPane>\n </TabContent>\n </div>\n )\n }\n\n render() {\n let maxHeight = window.innerHeight - (parseInt(document.body.style.paddingTop, 10) || 0) - 260\n\n return (\n <div className='row mt-3'>\n { this.schema(maxHeight) }\n\n <div className='col-12 m-2 d-md-none' />\n\n { this.cmdCreator(maxHeight) }\n\n <div id='cmd-status' className='modal'>\n <div className='modal-dialog h-100 d-flex flex-column justify-content-center my-0' role='document'>\n <div className='modal-content'>\n <div className='modal-header'>\n <h5 className='modal-title'>Command: <span></span></h5>\n <button type='button' className='close' data-dismiss='modal' aria-label='Close'>\n <span aria-hidden='true'>&times;</span>\n </button>\n </div>\n\n <div className='modal-body'>\n <p className='cmd-details'><b>Details:</b> <span></span></p>\n <p className='mb-1'><b>Command:</b></p>\n <pre className='border code command' />\n <p className='mb-1'><b>Responses:</b></p>\n <div className='p-1 border border-primary responses' />\n </div>\n\n <div className='modal-footer'>\n <button type='button' className='btn btn-secondary' data-dismiss='modal'>Close</button>\n </div>\n </div>\n </div>\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n actuators: state.Generate.actuators || [],\n devices: state.Generate.devices || [],\n selected: state.Generate.selected || {},\n message: state.Generate.message,\n errors: state.Command.errors\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n setSchema: (schema) => dispatch(GenerateActions.setSchema(schema)),\n actuatorInfo: () => dispatch(GenerateActions.actuatorInfo()),\n actuatorSelect: (act, t) => dispatch(GenerateActions.actuatorSelect(act, t)),\n deviceInfo: () => dispatch(GenerateActions.deviceInfo()),\n sendCommand: (cmd, act, chan) => dispatch(CommandActions.sendCommand(cmd, act, chan))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(GenerateCommands)\n" }, { "alpha_fraction": 0.6542553305625916, "alphanum_fraction": 0.6542553305625916, "avg_line_length": 16.090909957885742, "blob_id": "8b94db2dd573968807a6da6b27410ccf602a9313", "content_id": "a84a094d314f906e1d5e86a0a2cb44fc756754e4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "permissive", "max_line_length": 49, "num_lines": 11, "path": "/orchestrator/core/orc_server/command/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .actions import action_send\nfrom .viewsets import HistoryViewSet, SentHistory\n\n\n__all__ = [\n # Actions\n 'action_send',\n # Viewsets\n 'HistoryViewSet',\n 'SentHistory',\n]\n" }, { "alpha_fraction": 0.6511024832725525, "alphanum_fraction": 0.6511024832725525, "avg_line_length": 26.535715103149414, "blob_id": "22aec0b729ef0d1d183491b9d6bfc07462d394a2", "content_id": "4c4add9cbabc7880fc1eee5140198811a8ab356f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "permissive", "max_line_length": 90, "num_lines": 28, "path": "/orchestrator/core/orc_server/account/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\n\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register('', views.UserViewSet)\nrouter.register('(?P<username>[^/.]+)/history', views.UserHistoryViewSet)\n\nurlpatterns = [\n # JWT Tokens\n path('jwt/', include([\n path('', obtain_jwt_token),\n path('refresh/', refresh_jwt_token),\n path('verify/', verify_jwt_token),\n ])),\n\n # User Actions\n path('', include(router.urls)),\n\n # Actuator Access\n path('<str:username>/actuator/', include([\n path('', views.ActuatorAccess.as_view()),\n path('<str:actuator_id>/', views.actuatorDelete)\n ]))\n]\n" }, { "alpha_fraction": 0.570282518863678, "alphanum_fraction": 0.5744680762290955, "avg_line_length": 25.302751541137695, "blob_id": "779f00e4d82cbbdd819641428813b45959095f3e", "content_id": "7a2695238d7d7c2c1258aee75a757244b2ac1815", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2867, "license_type": "permissive", "max_line_length": 194, "num_lines": 109, "path": "/orchestrator/gui/client/src/components/admin/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\n\nimport {\n Settings,\n Users\n} from './pages'\n\nimport {\n titleCase\n} from '../utils'\n\nconst str_fmt = require('string-format')\n\nclass Admin extends Component {\n constructor(props, context) {\n super(props, context)\n this.changePage = this.changePage.bind(this)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Admin'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.validPages = ['settings', 'users']\n\n let page = this.props.match.params.page || 'users'\n if (this.validPages.indexOf(page) === -1) {\n page = 'users'\n }\n\n this.state = {\n activeTab: page\n }\n }\n\n changePage(e) {\n e.preventDefault()\n this.toggleTab(e.target.dataset.page)\n }\n\n toggleTab(tab) {\n if (this.state.activeTab !== tab) {\n this.props.history.push({\n pathname: str_fmt('/admin/{tab}', {tab: tab})\n })\n this.setState({\n activeTab: tab\n })\n }\n }\n\n getContent() {\n let content = \"\"\n switch (this.state.activeTab) {\n case 'users':\n content = <Users />\n break;\n default:\n content = <Settings />\n break;\n }\n return (\n <div className=\"col-12\">\n { content }\n </div>\n )\n }\n\n render() {\n return (\n <div className=\"row mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <nav className=\"navbar navbar-expand-lg navbar-dark bg-dark w-100\">\n <a className=\"navbar-brand\" href=\"#\" data-page=\"\" onClick={ this.changePage }>Admin</a>\n <button className=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\"#navbarColor02\" aria-controls=\"navbarColor02\" aria-expanded=\"false\" aria-label=\"Toggle navigation\">\n <span className=\"navbar-toggler-icon\"></span>\n </button>\n <div className=\"collapse navbar-collapse\" id=\"navbarColor02\">\n <ul className=\"navbar-nav mr-auto\">\n <li className=\"nav-item active\">\n <a className=\"nav-link\" href=\"#\" data-page=\"users\" onClick={ this.changePage } >Users</a>\n </li>\n <li className=\"nav-item active\">\n <a className=\"nav-link\" href=\"#\" data-page=\"settings\" onClick={ this.changePage } >Settings</a>\n </li>\n </ul>\n </div>\n </nav>\n\n { this.getContent() }\n\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title,\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Admin)\n" }, { "alpha_fraction": 0.6198830604553223, "alphanum_fraction": 0.6198830604553223, "avg_line_length": 18, "blob_id": "4d5be3279d30897f8d2e9340b4f67c6c851f1e59", "content_id": "6f49b514dd32eeed5558584e3b234d4ceed8278f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "permissive", "max_line_length": 74, "num_lines": 9, "path": "/orchestrator/core/orc_server/conformance/tests/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .utils import get_tests, load_test_suite, tests_in_suite, TestResults\n\n\n__all__ = [\n 'get_tests',\n 'load_test_suite',\n 'tests_in_suite',\n 'TestResults'\n]\n" }, { "alpha_fraction": 0.7239819169044495, "alphanum_fraction": 0.7345399856567383, "avg_line_length": 26.58333396911621, "blob_id": "19b96c6db0d77458613f1fbdf69172e003dc3e9e", "content_id": "49f9c66a483f193de0c5c9cd1536da3c5603e5c9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 663, "license_type": "permissive", "max_line_length": 78, "num_lines": 24, "path": "/orchestrator/transport/coap/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"COAP Transport Module\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Transport Module container\"\n\nADD requirements.txt /tmp/requirements.txt\nADD COAP/ /opt/transport/COAP\nADD docker_dev_start.sh /opt/transport/COAP/dev_start.sh\n\nRUN pip3 install -r /tmp/requirements.txt && \\\n dos2unix /opt/transport/COAP/dev_start.sh && \\\n chmod +x /opt/transport/COAP/dev_start.sh \n\n# Set working directory\nWORKDIR /opt/transport/COAP\n\n# Run command when container launches\nCMD [\"./dev_start.sh\"]\n\n" }, { "alpha_fraction": 0.6019802093505859, "alphanum_fraction": 0.6046204566955566, "avg_line_length": 24.25, "blob_id": "b3751f6ed15207cb53c4d271235084ff85f734da", "content_id": "e0bc63cee323c6f340ea45114a2adb5bcdd6a930", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1515, "license_type": "permissive", "max_line_length": 75, "num_lines": 60, "path": "/orchestrator/gui/client/src/reducers/socket.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import { WebSocketBridge } from 'django-channels'\nimport * as socket from '../actions/socket'\n\nconst initialState = {\n connected: false,\n connection: null,\n endpoint: 'ws://' + location.hostname + ':8080',\n queue: []\n}\n\nexport default (state=initialState, action=null) => {\n switch(action.type) {\n case socket.SOCKET_SETUP:\n console.log(\"WebSocket Setup\", action.payload)\n return {\n ...state,\n connection: action.payload.socket || state.socket,\n endpoint: action.payload.endpoint || state.endpoint,\n queue: action.payload.queue || state.queue\n }\n\n case socket.SOCKET_CONNECTED:\n console.log(\"WebSocket Connected\")\n return {\n ...state,\n connected: action.payload.connected || true\n }\n\n case socket.SOCKET_DISCONNECTED:\n console.log(\"WebSocket Disconnected\")\n return {\n ...state,\n connected: action.payload.connected || false\n }\n\n case socket.RECEIVED_SOCKET_DATA:\n console.log(\"WebSocket Data\")\n try {\n let act = JSON.parse(action.payload.data)\n console.log(act)\n action.asyncDispatch(act)\n } catch(err) {\n console.log(err)\n action.asyncDispatch(socket.createErrorAction(state.endpoint, err))\n }\n return {\n ...state,\n }\n\n case socket.SOCKET_ERROR:\n console.log(\"WebSocket Error\", action.payload)\n return {\n ...state,\n error: action.payload.error || ''\n }\n\n default:\n return state\n }\n}\n" }, { "alpha_fraction": 0.6648351550102234, "alphanum_fraction": 0.692307710647583, "avg_line_length": 19.22222137451172, "blob_id": "48468c99187fa204650560a91f208df55e493dfc", "content_id": "3ba4424ef3d317c8d295a6f0b177d3bf5fd0d911", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "permissive", "max_line_length": 67, "num_lines": 9, "path": "/orchestrator/gui/server/gui_server/webApp/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import sys\n\nver = sys.version_info\n\nif ver < (3, 6):\n print('PythonVersionError: Minimum version of v3.6+ not found')\n exit(1)\n\ndefault_app_config = 'webApp.apps.WebAppConfig'\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 22.263158798217773, "blob_id": "97de644c192b1052e7c055301b36587e28ac7c7b", "content_id": "2af717ea7cd3a14deb261c8d86d965b9219d9a2b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "permissive", "max_line_length": 64, "num_lines": 19, "path": "/orchestrator/core/orc_server/backup/utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from rest_framework_msgpack.parsers import MessagePackParser\nfrom rest_framework_msgpack.renderers import MessagePackRenderer\n\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.renderers import XMLRenderer\n\nfrom .xls import XLSParser, XLSRenderer\n\n__all__ = [\n # MsgPack\n 'MessagePackParser',\n 'MessagePackRenderer',\n # XLS\n 'XLSParser',\n 'XLSRenderer',\n # XML\n 'XMLParser',\n 'XMLRenderer'\n]\n" }, { "alpha_fraction": 0.6104651093482971, "alphanum_fraction": 0.6119186282157898, "avg_line_length": 26.520000457763672, "blob_id": "7a0689bf6b2129a1dfa5990fe8f69bccb22b8b68", "content_id": "5505991cc72be94d6ccb22e18bd53f72919a17fe", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "permissive", "max_line_length": 77, "num_lines": 25, "path": "/orchestrator/core/orc_server/orchestrator/management/commands/createsuperuser_default.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import sys\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n \"\"\"\n Custom django command - createsuperuser_default\n Create a default superuser is one does not exist\n \"\"\"\n def handle(self, *args, **kwargs):\n \"\"\"\n Handle command execution\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n if User.objects.filter(username='admin', is_superuser=True).exists():\n print('Superuser Exists')\n else:\n print('Creating SuperUser')\n User.objects.create_superuser('admin', '[email protected]', 'password')\n\n sys.exit(0)\n" }, { "alpha_fraction": 0.6821971535682678, "alphanum_fraction": 0.6915154457092285, "avg_line_length": 33.559322357177734, "blob_id": "308efe7f0c3f8409c9b0d42b60d6e28ce2231cb4", "content_id": "8f6362d5b14ca96fe01eaee21e91088f52d27ac3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2039, "license_type": "permissive", "max_line_length": 270, "num_lines": 59, "path": "/orchestrator/transport/https/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OASIS TC Open: oif-orchestrator-transport-https\n## OpenC2 HTTPS Transport\n\n### About this Image\n- This is the OpenC2 HTTPS transfer container for use with the O.I.F.\n\n### How to use this image\n#### Transport Setup\n\n- The HTTPS Transport Module is configured to run from a docker container as a part of the OIF-Orchestrator docker stack. Use the [configure.py](../../../configure.py) script to build the images needed to run the entirety of this Transport as a part of the Orchestrator.\n\n#### Configuration\n\n##### Adding Own Certs\n\n- The user has the option of adding personal certs instead of self signed generated certs on startup in development mode.\n \n1. Generate certs to be used.\n \n2. Put certs into the certs folder.\n \n - Certs Folder:\n ```\n /HTTPS/certs\n ```\n - Rename the certs for the flask app:\n ```\n server.crt\n server.key\n ```\n \n3. Edit the transport file to use certs.\n\n - Edit line in https_transport.py\n ```\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n to\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=/opt/transport/HTTPS/certs/CERTNAME)\n ```\n \n4. Edit Dockerfile\n\n - Remove self-signed certificate generation.\n ```\n RUN openssl genrsa -des3 -passout pass:x -out /opt/transport/HTTPS/certs/server.pass.key 2048 && \\\n openssl rsa -passin pass:x -in /opt/transport/HTTPS/certs/server.pass.key -out /opt/transport/HTTPS/certs/server.key && \\\n rm /opt/transport/HTTPS/certs/server.pass.key && \\\n openssl req -new -key /opt/transport/HTTPS/certs/server.key -out /opt/transport/HTTPS/certs/server.csr \\\n -subj \"/C=US/O=flask/OU=Screaming Bunny\" && \\\n openssl x509 -req -days 365 -in /opt/transport/HTTPS/certs/server.csr -signkey /opt/transport/HTTPS/certs/server.key -out /opt/transport/HTTPS/certs/server.crt\n ```\n\n#### Starting Container\n - To start the container\n\n - Use dev-compose.yaml, this will pull latest image from gitlab and start the service.\n ```\n docker-compose -f dev-compose.yaml up\n ```\n" }, { "alpha_fraction": 0.610779881477356, "alphanum_fraction": 0.6145957708358765, "avg_line_length": 24.723926544189453, "blob_id": "ca2ca0b8d24bf3d00133bdf613225e6dbb642a6b", "content_id": "168a237aaa858969d94950db6943c63d3ba822c6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4193, "license_type": "permissive", "max_line_length": 100, "num_lines": 163, "path": "/base/modules/utils/root/sb_utils/message/helpers.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nSerialization encode/decode helper functions\n\"\"\"\nimport bencode\nimport collections\nimport json\nimport os\nimport sexpdata\nimport tempfile\nimport xmltodict\n\nfrom subprocess import Popen, PIPE\nfrom typing import (\n Union\n)\n\nfrom ..general import (\n check_values,\n default_encode,\n floatByte\n)\n\n\n# Message Conversion helpers for Bencode\ndef bencode_encode(msg: dict) -> bytes:\n \"\"\"\n Encode the given message to Bencode format\n :param msg: message to convert\n :return: Bencode formatted message\n \"\"\"\n return bencode.bencode(default_encode(msg, {float: floatByte}))\n\n\ndef bencode_decode(msg: bytes) -> dict:\n \"\"\"\n Decode the given message to Bencode format\n :param msg: message to convert\n :return: JSON formatted message\n \"\"\"\n return default_encode(bencode.bdecode(msg), {bytes: floatByte})\n\n\n# Message Conversion helpers for S-Expression\ndef _sp_decode(val):\n if isinstance(val, list) and isinstance(val[0], sexpdata.Symbol):\n rtn = {}\n for idx in range(0, len(val), 2):\n k = val[idx].value()\n k = k[1:] if k.startswith(\":\") else k\n rtn[k] = _sp_decode(val[idx + 1])\n return rtn\n return val\n\n\ndef sp_encode(msg: dict) -> str:\n \"\"\"\n Encode the given message to S-Expression format\n :param msg: message to convert\n :return: S-Expression formatted message\n \"\"\"\n return sexpdata.dumps(msg)\n\n\ndef sp_decode(msg: str) -> dict:\n \"\"\"\n Decode the given message to JSON format\n :param msg: message to convert\n :return: JSON formatted message\n \"\"\"\n rtn = sexpdata.loads(msg)\n return _sp_decode(rtn)\n\n\n# Message Conversion helpers for XML\ndef _xml_root(msg: dict) -> Union[dict, str]:\n \"\"\"\n Get the message or determine the root key\n :param msg: message to find the root\n :return: root of message\n \"\"\"\n if \"command\" in msg:\n return msg.get(\"command\", {})\n\n if \"response\" in msg:\n return msg.get(\"response\", {})\n\n if \"action\" in msg:\n return \"command\"\n\n if \"status\" in msg:\n return \"response\"\n\n return msg\n\n\ndef _xml_to_dict(xml: dict) -> dict:\n \"\"\"\n Convert XML data to a dict\n :param xml: XML data to convert\n :return: dict repr of given XML\n \"\"\"\n tmp = {}\n for k, v in xml.items():\n k = k[1:] if k.startswith((\"@\", \"#\")) else k\n if k in tmp:\n raise KeyError(f\"Duplicate key from `attr_prefix` or `cdata_key` - {k}\")\n tmp[k] = _xml_to_dict(v) if isinstance(v, collections.OrderedDict) else check_values(v)\n return tmp\n\n\ndef xml_encode(msg: dict) -> str:\n \"\"\"\n Encode the given message to XML format\n :param msg: message to convert\n :return: XML formatted message\n \"\"\"\n return xmltodict.unparse({_xml_root(msg): msg})\n\n\ndef xml_decode(msg: str) -> dict:\n \"\"\"\n Decode the given message to JSON format\n :param msg: message to convert\n :return: JSON formatted message\n \"\"\"\n return _xml_root(_xml_to_dict(xmltodict.parse(msg)))\n\n\n# Message Conversion helpers for VelocityPack (VPack)\ndef vpack_encode(msg: dict) -> bytes:\n rtn = b\"\"\n with tempfile.NamedTemporaryFile(delete=True) as msg_tmp:\n with open(msg_tmp.name, \"w\") as f:\n f.write(json.dumps(msg))\n os.chmod(msg_tmp.name, 0o0777)\n msg_tmp.file.close()\n\n with tempfile.NamedTemporaryFile(delete=True) as enc_tmp:\n process = Popen([\"json-to-vpack\", msg_tmp.name, enc_tmp.name], stdout=PIPE, stderr=PIPE)\n _, _ = process.communicate()\n\n with open(enc_tmp.name, \"rb\") as f:\n rtn = f.read()\n\n return rtn\n\n\ndef vpack_decode(msg: bytes) -> dict:\n rtn = {}\n with tempfile.NamedTemporaryFile(delete=True) as msg_tmp:\n with open(msg_tmp.name, \"wb\") as f:\n f.write(msg)\n os.chmod(msg_tmp.name, 0o0777)\n msg_tmp.file.close()\n\n with tempfile.NamedTemporaryFile(delete=True) as dec_tmp:\n process = Popen([\"vpack-to-json\", msg_tmp.name, dec_tmp.name], stdout=PIPE, stderr=PIPE)\n _, _ = process.communicate()\n\n with open(dec_tmp.name, \"rb\") as f:\n rtn = json.load(f)\n\n return rtn\n" }, { "alpha_fraction": 0.8695651888847351, "alphanum_fraction": 0.8913043737411499, "avg_line_length": 6.833333492279053, "blob_id": "6e48ec510b285ac89de2f06b8106b005261c6b42", "content_id": "01c95f6a4dfdbc053aa56d0ec6e6663854c25dc6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 46, "license_type": "permissive", "max_line_length": 9, "num_lines": 6, "path": "/orchestrator/transport/https/requirements.txt", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "kombu\ncbor2\nrequests\nflask\ndicttoxml\nxmltodict" }, { "alpha_fraction": 0.6951871514320374, "alphanum_fraction": 0.7219251394271851, "avg_line_length": 25.714284896850586, "blob_id": "c222a67457abae4003236e3d58417109aa1599fc", "content_id": "3eb5f0812e18b58b2a5eb78d40cdede7b9fa31d0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "permissive", "max_line_length": 67, "num_lines": 7, "path": "/orchestrator/core/orc_server/orchestrator/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import sys\n\nif sys.version_info < (3, 6):\n print('PythonVersionError: Minimum version of v3.6+ not found')\n sys.exit(1)\n\ndefault_app_config = 'orchestrator.apps.OrchestratorConfig'\n" }, { "alpha_fraction": 0.5754263401031494, "alphanum_fraction": 0.5771753191947937, "avg_line_length": 30.76388931274414, "blob_id": "fb642756b879752dcc36fa083dea8b8a9a926d72", "content_id": "225a04a6e3eb66f2eb8cf3085fcfc4fc6eaf6867", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2287, "license_type": "permissive", "max_line_length": 152, "num_lines": 72, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/json_field/enumerated.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n FormGroup,\n FormText,\n Label,\n Input\n} from 'reactstrap'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\nclass EnumeratedField extends Component {\n change(val) {\n let def_type = this.props.def.type\n switch(def_type) {\n case \"integer\":\n val = parseInt(val, 10) || null\n break;\n case \"number\":\n val = parseFloat(val.replace(\",\", \".\")) || null\n break;\n }\n this.props.optChange(this.parent, val)\n }\n\n render() {\n this.parent = \"\"\n if (this.props.parent) {\n this.parent = [this.props.parent, this.props.name].join('.')\n } else if (this.props.name.match(/^[a-z]/)) {\n this.parent = this.props.name\n }\n\n let def_opts = []\n\n if (this.props.def.hasOwnProperty(\"enum\")) {\n if (this.props.def.hasOwnProperty(\"options\")) {\n def_opts = this.props.def.options.map((opt, i) => <option key={ i } value={ opt.value } data-subtext={ opt.description }>{ opt.label }</option>)\n } else {\n def_opts = this.props.def.enum.map(opt => <option key={ opt } value={ opt } data-subtext={ opt }>{ opt }</option>)\n }\n } else if (this.props.def.hasOwnProperty(\"oneOf\")) {\n def_opts = this.props.def.oneOf.map((opt, i) => <option key={ i } value={ opt.const } data-subtext={ opt.description }>{ opt.const }</option>)\n } else {\n def_opts = [<option key={ 0 } value=\"\">Unknown Enumerated format</option>]\n }\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (this.props.required ? '*' : '') } { this.props.name }</legend>\n { this.props.def.description ? <FormText color=\"muted\">{ this.props.def.description }</FormText> : '' }\n <Input\n type=\"select\"\n name={ name }\n title={ name }\n className=\"selectpicker\"\n onChange={ e => this.change(e.target.value) }\n >\n <option data-subtext={ name + ' options' } value='' >{ name + ' options' }</option>\n { def_opts }\n </Input>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(EnumeratedField)\n" }, { "alpha_fraction": 0.7214022278785706, "alphanum_fraction": 0.7214022278785706, "avg_line_length": 26.794872283935547, "blob_id": "b185a649662a1fa492d1b89a267b9d1acf988dc9", "content_id": "17281b3cd69b5e5f2b3e1bc8a6f5e10a8b5c949b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1084, "license_type": "permissive", "max_line_length": 98, "num_lines": 39, "path": "/orchestrator/gui/client/src/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport ReactDOM from 'react-dom'\nimport { Provider } from 'react-redux'\nimport { HelmetProvider } from 'react-helmet-async'\n\nimport registerServiceWorker from './registerServiceWorker'\n\n// Styles\nimport { ThemeSwitcher } from './components/utils'\nimport 'bootstrap'\nimport 'react-toastify/dist/ReactToastify.css'\nimport './components/dependencies/css/styles.less'\n\n// Orchestrator Application\nimport App from './app'\n\n// Config\nimport { createBrowserHistory } from 'history'\nimport configureStore from './store'\n\nconst history = createBrowserHistory()\nconst store = configureStore(history)\n\n// Theme Options\nconst validThemes = ['cyborg', 'darkly', 'flatly', 'litera', 'lumen', 'slate', 'spacelab', 'yeti']\n\nconst Root = () => (\n <Provider store={ store } >\n <HelmetProvider>\n <ThemeSwitcher storeThemeKey=\"theme\" defaultTheme=\"lumen\" themeOptions={ validThemes }>\n <App history={ history } />\n </ThemeSwitcher>\n </HelmetProvider>\n </Provider>\n)\n\nReactDOM.render(<Root />, document.getElementById('root'));\n\nregisterServiceWorker();\n" }, { "alpha_fraction": 0.7185184955596924, "alphanum_fraction": 0.7185184955596924, "avg_line_length": 23.10714340209961, "blob_id": "a3a330fb6526785933c89175113f1e39a97862ac", "content_id": "cddb6ea0b89044cb61cd1958971eade8577380a3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 675, "license_type": "permissive", "max_line_length": 54, "num_lines": 28, "path": "/orchestrator/core/orc_server/orchestrator/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom rest_framework.authtoken.admin import TokenAdmin\n\nfrom .models import Protocol, Serialization\n\n\nclass ProtocolAdmin(admin.ModelAdmin):\n \"\"\"\n Protocol model admin\n \"\"\"\n list_display = ('name', 'pub_sub', 'port')\n\n\nclass SerializationAdmin(admin.ModelAdmin):\n \"\"\"\n Serialization model admin\n \"\"\"\n list_display = ('name', )\n\n\n# Register Models\nadmin.site.register(Protocol, ProtocolAdmin)\nadmin.site.register(Serialization, SerializationAdmin)\n\n# Update TokenAdmin Model\nTokenAdmin.list_display = ('key', 'user', 'created')\nTokenAdmin.readonly_fields = ('key', 'created')\nTokenAdmin.list_select_related = ('user', )\n" }, { "alpha_fraction": 0.6737499833106995, "alphanum_fraction": 0.6737499833106995, "avg_line_length": 22.52941131591797, "blob_id": "9b7ac82693895b3db6f98078d8371eb445ec6fc7", "content_id": "f31bd289457a09beedfcf43e7aa41a5395d0a30a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "permissive", "max_line_length": 65, "num_lines": 34, "path": "/orchestrator/core/orc_server/device/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Device, DeviceGroup, Transport\n\n\nclass TransportAdmin(admin.ModelAdmin):\n \"\"\"\n Transport model admin\n \"\"\"\n list_display = ('transport_id', 'host', 'port', 'protocol', )\n filter_horizontal = ('serialization', )\n\n\nclass DeviceAdmin(admin.ModelAdmin):\n \"\"\"\n Device model admin\n \"\"\"\n readonly_fields = ('device_id', )\n list_display = ('device_id', 'name', )\n filter_horizontal = ('transport',)\n\n\nclass DeviceGroupAdmin(admin.ModelAdmin):\n \"\"\"\n Device Group model admin\n \"\"\"\n list_display = ('name', )\n filter_horizontal = ('users', 'devices')\n\n\n# Register models\nadmin.site.register(Device, DeviceAdmin)\nadmin.site.register(DeviceGroup, DeviceGroupAdmin)\nadmin.site.register(Transport, TransportAdmin)\n" }, { "alpha_fraction": 0.6764305233955383, "alphanum_fraction": 0.6914169192314148, "avg_line_length": 22.69354820251465, "blob_id": "e473b12e8547be27509e2161f1d280fe3e64473a", "content_id": "d7a00480cf26fab2d5e5421d55d7d7c76b11af57", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1468, "license_type": "permissive", "max_line_length": 78, "num_lines": 62, "path": "/orchestrator/gui/server/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"Orchestrator GUI Server\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Orchestrator GUI Server container\"\n\nADD requirements.txt /tmp/requirements.txt\nADD gui_server /opt/gui_server\nADD docker-entrypoint.sh /docker-entrypoint.sh\n\n# Set environment vars\n# DJANGO Settings\nENV DJANGO_SETTINGS_MODULE=webApp.settings \\\n DJANGO_ENV=\"prod\"\n\n# Requirements install\n# System packages\nRUN apk add --no-cache --virtual .build-deps \\\n gcc \\\n make \\\n libc-dev \\\n musl-dev \\\n python3-dev \\\n pcre-dev \\\n openssl-dev \\\n linux-headers \\\n pcre-dev && \\\napk add --no-cache pcre mailcap && \\\n# python/pip packages\npip3 install -r /tmp/requirements.txt && \\\n#\n# Collect Static Filesz\ncd /opt/orc_server && \\\npython3 manage.py collectstatic --noinput && \\\ncd && \\\n#\n# Mod Entrypoint Script\nchmod +x /docker-entrypoint.sh && \\\nchmod +x /opt/gui_server/dev_start.sh && \\\ndos2unix /docker-entrypoint.sh && \\\ndos2unix /opt/gui_server/dev_start.sh && \\\n#\n# Cleanup\napk del .build-deps && \\\nrm -rf /var/cache/apk/* *.tar.gz* /usr/src /root/.gnupg /tmp/*\n\n# Ports\nEXPOSE 8080/tcp\n\n# Orchestrator Core Working Directory\nWORKDIR /opt/orc_server\n\n# entrypoint Command\nENTRYPOINT [\"/docker-entrypoint.sh\"]\n\n# Startup Command\nCMD [\"daphne\", \"-b\", \"0.0.0.0\", \"-p\", \"8080\", \"webApp.asgi:application\"]" }, { "alpha_fraction": 0.7055449485778809, "alphanum_fraction": 0.7074570059776306, "avg_line_length": 23.904762268066406, "blob_id": "9ad87b3d83871df9520bf602fc1be21a05ea43a5", "content_id": "6d4ddbcf1f83fae0a662e702ead892e8a8bd2903", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "permissive", "max_line_length": 85, "num_lines": 21, "path": "/orchestrator/gui/server/gui_server/orchestrator/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\nfrom ..models import Orchestrator\n\n\n@api_view(['GET'])\n@permission_classes((permissions.IsAuthenticated,))\ndef api_root(request):\n \"\"\"\n Orchestrator api root\n \"\"\"\n rtn = dict(\n registered={orc.name: str(orc.orc_id) for orc in Orchestrator.objects.all()},\n )\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.636304497718811, "alphanum_fraction": 0.6467143893241882, "avg_line_length": 25.05084800720215, "blob_id": "4b368436a4f2107acd54951b63cd742743627b54", "content_id": "e9155440dd07fa567257878fb105b1c00e10f349", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "permissive", "max_line_length": 71, "num_lines": 59, "path": "/orchestrator/core/orc_server/orchestrator/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom rest_framework.authtoken.models import Token\n\n\nclass Protocol(models.Model):\n \"\"\"\n OpenC2 Protocols\n \"\"\"\n name = models.CharField(\n help_text=\"Name of the Protocol\",\n max_length=30\n )\n pub_sub = models.BooleanField(\n blank=False,\n default=False,\n help_text=\"Protocol is Pub/Sub\"\n )\n port = models.IntegerField(\n default=8080,\n help_text=\"Port of the transport\",\n validators=[\n MinValueValidator(1),\n MaxValueValidator(65535)\n ]\n )\n\n def __str__(self):\n return f'Protocol - {self.name}'\n\n\nclass Serialization(models.Model):\n \"\"\"\n OpenC2 Serializations\n \"\"\"\n name = models.CharField(\n max_length=30,\n help_text=\"Name of the Serialization\"\n )\n\n def __str__(self):\n return f'Serialization - {self.name}'\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_auth_token(sender, instance=None, created=False, **kwargs):\n \"\"\"\n Create a auth token when a user is created\n :param sender: model 'sending' the action - USER_MODEL\n :param instance: SENDER instance\n :param created: bool - instance created or updated\n :param kwargs: key/value args\n :return: None\n \"\"\"\n if created:\n Token.objects.create(user=instance)\n" }, { "alpha_fraction": 0.7379679083824158, "alphanum_fraction": 0.7379679083824158, "avg_line_length": 22.375, "blob_id": "8939c2eae5855078577711c022f85cbfdf832b72", "content_id": "282c1a308a55335f4c892a2bd7888bb1f74ca9a3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "permissive", "max_line_length": 59, "num_lines": 16, "path": "/orchestrator/gui/server/gui_server/orchestrator/urls/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\n\nfrom rest_framework import routers\n\nfrom .. import views\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'registered', views.OrchestratorViewSet)\nrouter.register(r'auth', views.OrchestratorAuthViewSet)\n\nurlpatterns = [\n # Routers\n path('', views.api_root, name='orchestrator.api_root'),\n path('', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.43802815675735474, "alphanum_fraction": 0.4426056444644928, "avg_line_length": 21.0155029296875, "blob_id": "dfce7d5c981d6c3b65edb394a4be02403bfa6f85", "content_id": "901fce332a241e63bd53b7b8fc558cef12d18cd5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2840, "license_type": "permissive", "max_line_length": 64, "num_lines": 129, "path": "/logger/gui/config/base.config.babel.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import webpack from 'webpack';\nimport path from 'path';\n\nimport HtmlWebpackPlugin from 'html-webpack-plugin';\n\nconst env = 'production';\n\nconst ROOT_DIR = path.join(__dirname, '..');\nconst BUILD_DIR = path.join(ROOT_DIR, 'build');\nconst COMPONENTS_DIR = path.join(ROOT_DIR, 'src', 'components');\nconst DEPEND_DIR = path.join(COMPONENTS_DIR, 'dependencies');\n\nexport default {\n mode: env,\n devtool: 'inline-source-map',\n entry: {\n main: path.join(ROOT_DIR, 'src', 'index.js')\n },\n output: {\n path: BUILD_DIR,\n publicPath: '/',\n filename: 'js/[name].bundle.min.js'\n },\n context: ROOT_DIR,\n resolve: {\n extensions: ['.js', '.jsx', '.json'],\n modules: ['node_modules', path.join(ROOT_DIR, 'src')]\n },\n plugins: [\n new webpack.DefinePlugin({\n NODE_ENV: env\n }),\n new webpack.NamedModulesPlugin(),\n new HtmlWebpackPlugin({\n filename: 'index.html',\n template: path.join(DEPEND_DIR, 'index.html')\n })\n ],\n optimization: {\n mergeDuplicateChunks: true,\n runtimeChunk: false,\n splitChunks: {\n automaticNameDelimiter: '_',\n cacheGroups: {\n vendors: {\n test: /[\\\\/]node_modules[\\\\/]/,\n name: 'vendors',\n chunks: 'all'\n },\n utils: {\n test: /components\\/utils[\\\\/]/,\n name: 'utils',\n chunks: 'all'\n }\n }\n }\n },\n target: 'web',\n module: {\n rules: [\n {\n test: /\\.jsx?$/,\n exclude: /(node_modules|bower_components)/,\n use: {\n loader: 'babel-loader',\n options: {\n cacheDirectory: true\n }\n }\n },\n {\n test: /\\.(c|le)ss$/,\n use: [\n 'style-loader',\n {\n loader: 'css-loader',\n options: {\n url: false\n }\n },\n {\n loader: 'less-loader',\n options: {\n strictMath: true\n }\n }\n ]\n },\n {\n test: /\\.svg$/,\n loader: 'svg-url-loader',\n options: {\n limit: 10 * 1024,\n noquotes: true,\n fallback: {\n loader: 'file-loader',\n options: {\n name: 'assets/img/[name].[ext]'\n }\n }\n }\n },\n {\n test: /\\.(jpe?g|gif|bmp|tiff|png|ico)$/,\n use: [{\n loader: 'url-loader',\n options: {\n limit: 10 * 1024,\n fallback: {\n loader: 'file-loader',\n options: {\n name: 'assets/img/[name].[ext]'\n }\n }\n }\n }]\n },\n {\n test: /\\.(ttf|eot|woff|woff2)$/,\n use: [{\n loader: 'file-loader',\n options: {\n name: 'css/fonts/[name].[ext]'\n }\n }]\n }\n ]\n }\n};\n" }, { "alpha_fraction": 0.687960684299469, "alphanum_fraction": 0.6949222087860107, "avg_line_length": 26.75, "blob_id": "8a2e15fab00b6ee0a41436eb656c5c30afa8f05f", "content_id": "55acaa40b3f36c82286c11a97bc4cabbe20ae971", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2442, "license_type": "permissive", "max_line_length": 94, "num_lines": 88, "path": "/orchestrator/core/orc_server/orchestrator/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.urls import include, path\n\n# from rest_framework import routers\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework_swagger.views import get_swagger_view\n\n# from dynamic_preferences.api.viewsets import GlobalPreferencesViewSet\n# GlobalPreferencesViewSet.pagination_class = None\n# from dynamic_preferences.users.viewsets import UserPreferencesViewSet\n\nfrom . import views\n\nadmin.site.site_title = 'OpenC2 Orchestrator Administration'\nadmin.site.site_header = 'OpenC2 Orchestrator Admin'\nadmin.site.index_title = 'OpenC2 Orchestrator'\n\n# Catch all URL\nhandler400 = views.bad_request\nhandler403 = views.permission_denied\nhandler404 = views.page_not_found\nhandler500 = views.server_error\n\n# preferenceRouter = routers.SimpleRouter()\n# preferenceRouter.register('', GlobalPreferencesViewSet, base_name='global')\n\napi_patterns = [\n # Root Info\n path('', views.api_root, name='api.root'),\n\n # Account App\n path('account/', include('account.urls')),\n\n # Actuator App\n path('actuator/', include('actuator.urls')),\n\n # Backup App\n path('backup/', include('backup.urls')),\n\n # Command App\n path('command/', include('command.urls')),\n\n # Conformance App\n path('conformance/', include('conformance.urls')),\n\n # Device App\n path('device/', include('device.urls')),\n\n # Preferences Routers\n # path('preferences/', include(preferenceRouter.urls)),\n\n # Groups Routers\n path('log/', include('tracking.urls.api')),\n\n # Schema\n path('schema/', include([\n path('', get_schema_view(title='OpenC2 Orchestrator API'), name='api.schema'),\n path('swagger/', get_swagger_view(title='OpenC2 Orchestrator API'), name='api.schema')\n ])),\n]\n\ngui_patterns = [\n # No GUI - Redirect to API\n path('', views.gui_redirect),\n\n # Account URLs - Needed for schema views by user permissions\n path('account/', include('django.contrib.auth.urls')),\n]\n\nif settings.ADMIN_GUI is True:\n # Admin GUI URLs\n gui_patterns.append(path('admin/', admin.site.urls))\nelse:\n # Admin GUI Redirect\n gui_patterns.append(path('admin/', views.gui_redirect))\n\n\nurlpatterns = [\n # API Patterns\n path('api/', include(api_patterns), name='api'),\n\n # GUI Patterns\n path('', include(gui_patterns), name='gui'),\n\n # Default favicon\n path('favicon.ico', views.api_favicon, name='api.favicon')\n]\n" }, { "alpha_fraction": 0.7049505114555359, "alphanum_fraction": 0.7168316841125488, "avg_line_length": 35.92683029174805, "blob_id": "30ec27449550d096c4bb3a5698039a01f5f85754", "content_id": "54126205011fdd54b9d3123dee438b31d3ede13c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1515, "license_type": "permissive", "max_line_length": 163, "num_lines": 41, "path": "/orchestrator/transport/https/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"HTTPS Transport Module\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the HTTPS Transport Module container\"\n\nADD requirements.txt /tmp/requirements.txt\nADD docker_dev_start.sh /opt/transport/dev_start.sh\n\n# Copy directories\nADD HTTPS/ /opt/transport/HTTPS\n\n# Set working directory\nWORKDIR /opt/transport\n\n# Requirements install\nRUN apk update\nRUN apk add --no-cache \\\n bash \\\n openssl && \\\n pip3 install -r /tmp/requirements.txt && \\\n rm -r /root/.cache && \\\n chmod +x /opt/transport/dev_start.sh && \\\n chmod +w /opt/transport/HTTPS/certs && \\\n dos2unix /opt/transport/dev_start.sh\n\n# Create certs for flask app, not needed if using own certs\nRUN openssl genrsa -des3 -passout pass:develop -out /opt/transport/HTTPS/certs/server.pass.key 2048 && \\\n openssl rsa -passin pass:develop -in /opt/transport/HTTPS/certs/server.pass.key -out /opt/transport/HTTPS/certs/server.key && \\\n rm /opt/transport/HTTPS/certs/server.pass.key && \\\n openssl req -new -key /opt/transport/HTTPS/certs/server.key -out /opt/transport/HTTPS/certs/server.csr \\\n -subj \"/C=US/O=flask/OU=Screaming Bunny\" && \\\n openssl x509 -req -days 365 -in /opt/transport/HTTPS/certs/server.csr -signkey /opt/transport/HTTPS/certs/server.key -out /opt/transport/HTTPS/certs/server.crt\n\n# Run command when container launches\nCMD [\"./dev_start.sh\"]\n\n" }, { "alpha_fraction": 0.557986855506897, "alphanum_fraction": 0.6061269044876099, "avg_line_length": 24.38888931274414, "blob_id": "a9d492c6f192566c5aa1e15e17f152dd163bc042", "content_id": "3a5c408c91a9a07d10242a4674a00dbe97e29136", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "permissive", "max_line_length": 129, "num_lines": 18, "path": "/orchestrator/core/orc_server/command/migrations/0002_senthistory__coap_id.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-05-02 17:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('command', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='senthistory',\n name='_coap_id',\n field=models.CharField(blank=True, help_text='Unique 16-bit hex ID for CoAP', max_length=10, null=True, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.535315990447998, "alphanum_fraction": 0.535315990447998, "avg_line_length": 30.647058486938477, "blob_id": "a5a31a902cf87120cca4d163832fd0e50fde08dc", "content_id": "689882e973cfd7e5143b5cb5268c55c0e90327d3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "permissive", "max_line_length": 104, "num_lines": 34, "path": "/orchestrator/core/orc_server/es_mirror/document.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.db.models import Model\nfrom elasticsearch_dsl import Document as DSL_Document, InnerDoc, Nested, Object\n\nfrom .utils import es_dict, get_nestedFields\n\n\nclass Document(DSL_Document):\n @classmethod\n def model_init(cls, model: Model = None) -> 'Document':\n fields = dict(\n _id=model.pk\n )\n for f_name, f_type, _ in cls._ObjectBase__list_fields():\n prepare = getattr(cls, f'prepare_{f_name}', None)\n if prepare:\n val = prepare(cls, model)\n else:\n val = getattr(model, f_name, None)\n if isinstance(f_type, (Nested, Object)):\n nested_fields = get_nestedFields(list(f_type._doc_class._ObjectBase__list_fields()))\n\n if isinstance(f_type, Nested):\n val = [es_dict(v, nested_fields) for v in val.all()]\n else:\n val = es_dict(val, nested_fields)\n\n fields[f_name] = val\n return cls(**fields)\n\n\n__all__ = [\n 'Document',\n 'InnerDoc'\n]\n" }, { "alpha_fraction": 0.758474588394165, "alphanum_fraction": 0.758474588394165, "avg_line_length": 18.66666603088379, "blob_id": "164d2e938fc9b659da1a1f4420a58196eb377a18", "content_id": "7321646d97cefbe5a3a2c438c16e630de7afbf6a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "permissive", "max_line_length": 41, "num_lines": 12, "path": "/orchestrator/core/orc_server/es_mirror/apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\nfrom .settings import SETTINGS\nfrom .utils import ElasticHooks\n\n\nES_Hooks = ElasticHooks(**SETTINGS)\n\n\nclass EsMirrorConfig(AppConfig):\n name = 'es_mirror'\n verbose_name = 'Elasticsearch Mirror'\n" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 15.222222328186035, "blob_id": "d9fdfd3df7971cf9082707b94212a3727fdd6e16", "content_id": "d5f2043fd64b2ba0dc11f8b1e3dfc131de8e3f47", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 145, "license_type": "permissive", "max_line_length": 41, "num_lines": 9, "path": "/orchestrator/gui/client/src/components/auth/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import Login from './login'\nimport Logout from './logout'\nimport PrivateRoute from './privateRoute'\n\nexport {\n Login,\n Logout,\n PrivateRoute\n}" }, { "alpha_fraction": 0.6514851450920105, "alphanum_fraction": 0.7762376070022583, "avg_line_length": 25.578947067260742, "blob_id": "28499bd660e3ac554c42fe8d3ded212abb70cb9e", "content_id": "a09514aa459b7a3115cc5376c7cbf692c462b1de", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 505, "license_type": "permissive", "max_line_length": 38, "num_lines": 19, "path": "/orchestrator/core/requirements.txt", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "bleach==3.1.4\ndjango==2.2.10\ndjango-cors-headers==3.2.1\ndjango-dynamic-preferences==1.8.1\ndjango-rest-swagger==2.2.0\ndjangorestframework==3.11.0\ndjangorestframework-datatables==0.5.1\ndjangorestframework-files==1.1.0\ndjangorestframework-jwt==1.11.0\ndjangorestframework-msgpack==1.0.2\ndjangorestframework-queryfields==1.0.0\ndjangorestframework-xml==1.4.0\ndrf-writable-nested==0.5.4\nelasticsearch-dsl==7.1.0\njsonfield2==4.0.0\nmysql-connector-python==8.0.19\npyexcel-xls==0.5.8\nuwsgi==2.0.18\nwhitenoise==5.0.1\n" }, { "alpha_fraction": 0.6347002983093262, "alphanum_fraction": 0.6397476196289062, "avg_line_length": 24.158729553222656, "blob_id": "c304cb3185f725c4da8bdccfae2deb583b5dd48d", "content_id": "cac70d060e307998b17543e1b472901528cb21f9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1585, "license_type": "permissive", "max_line_length": 77, "num_lines": 63, "path": "/orchestrator/gui/server/gui_server/utils/api_test.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from orchestrator_api import OrchestratorAPI\n\norc_api = OrchestratorAPI('http://localhost:8080/api/', ws=False)\n# orc_api = OrchestratorAPI('http://localhost:8080/', ws=True)\n\njwt = orc_api.account.jwt(body=dict(username=\"admin\", password=\"password\"))\ntoken = jwt.body.get('token', '')\nopts = dict(\n headers=dict(\n Authorization=f\"JWT {token}\"\n )\n)\n\nprint('Orchestrator Root')\nrsp = orc_api.root.info(**opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\n\nrsp = orc_api.root.api(**opts, params={\"format\": \"corejson\"})\nprint(f\"{rsp.status_code} - {rsp.body}\")\nprint('')\n\nprint('Account Endpoints')\nrsp = orc_api.account.info(**opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\n\nrsp = orc_api.account.retrieve(\"admin\", **opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\n\n'''\nuser = {\n \"username\": \"test_user\",\n \"password\": \"password\",\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"is_active\": True,\n \"is_staff\": False\n}\nrsp = orc_api.account.create(**opts, body=user)\nprint(f\"{rsp.status_code} - {rsp.body}\")\n\nrsp = orc_api.account.destroy(\"test_user\", **opts)\nprint(f\"{rsp.status_code} - {rsp.body}\")\n'''\nprint('')\n\nprint('Actuator Endpoints')\nrsp = orc_api.actuator.info(**opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\nprint('')\n\nprint('Command Endpoints')\nrsp = orc_api.command.info(**opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\nprint('')\n\nprint('Device Endpoints')\nrsp = orc_api.actuator.info(**opts)\nprint(f\"{rsp.url} - {rsp.status_code} - {rsp.body}\")\nprint('')\n\nprint('Log Endpoints')\nprint('')\n" }, { "alpha_fraction": 0.7178841233253479, "alphanum_fraction": 0.7178841233253479, "avg_line_length": 25.46666717529297, "blob_id": "623561aff1faabe1eb43f11b0eef10db7deff684", "content_id": "39e84df701eb2a0af213a8c2b98d182d4caa9f99", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "permissive", "max_line_length": 94, "num_lines": 15, "path": "/orchestrator/core/orc_server/conformance/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import ConformanceTest\n\n\nclass ConformanceTestAdmin(admin.ModelAdmin):\n \"\"\"\n ConformanceTest admin\n \"\"\"\n list_display = ('test_id', 'actuator_tested', 'test_time')\n readonly_fields = ('test_id', 'actuator_tested', 'test_time', 'tests_run', 'test_results')\n\n\n# Register models\nadmin.site.register(ConformanceTest, ConformanceTestAdmin)\n" }, { "alpha_fraction": 0.4914896488189697, "alphanum_fraction": 0.5062976479530334, "avg_line_length": 34.138755798339844, "blob_id": "6d72c1665ffe1734236ab8aafe5422eb8c8edf26", "content_id": "37c51f440a8e4141f36a6744a42b825f29ec9f33", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29376, "license_type": "permissive", "max_line_length": 177, "num_lines": 836, "path": "/base/modules/utils/root/sb_utils/message/pysmile/encode.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nSMILE Encode\n\"\"\"\nimport copy\nimport decimal\nimport logging\nimport struct\n\nfrom typing import (\n Callable,\n Dict,\n Type,\n Union\n)\n\nfrom . import util\nfrom .constants import (\n BYTE_MARKER_END_OF_CONTENT,\n BYTE_MARKER_END_OF_STRING,\n HEADER_BIT_HAS_RAW_BINARY,\n HEADER_BIT_HAS_SHARED_NAMES,\n HEADER_BIT_HAS_SHARED_STRING_VALUES,\n HEADER_BYTE_1,\n HEADER_BYTE_2,\n HEADER_BYTE_3,\n HEADER_BYTE_4,\n MAX_SHARED_NAMES,\n MAX_SHARED_STRING_LENGTH_BYTES,\n MAX_SHARED_STRING_VALUES,\n MAX_SHORT_NAME_ASCII_BYTES,\n MAX_SHORT_NAME_UNICODE_BYTES,\n MAX_SHORT_VALUE_STRING_BYTES,\n TOKEN_BYTE_BIG_DECIMAL,\n TOKEN_BYTE_BIG_INTEGER,\n TOKEN_BYTE_FLOAT_32,\n TOKEN_BYTE_FLOAT_64,\n TOKEN_BYTE_INT_32,\n TOKEN_BYTE_INT_64,\n TOKEN_BYTE_LONG_STRING_ASCII,\n TOKEN_KEY_EMPTY_STRING,\n TOKEN_KEY_LONG_STRING,\n TOKEN_LITERAL_EMPTY_STRING,\n TOKEN_LITERAL_END_ARRAY,\n TOKEN_LITERAL_END_OBJECT,\n TOKEN_LITERAL_FALSE,\n TOKEN_LITERAL_NULL,\n TOKEN_LITERAL_START_ARRAY,\n TOKEN_LITERAL_START_OBJECT,\n TOKEN_LITERAL_TRUE,\n TOKEN_MISC_BINARY_7BIT,\n TOKEN_MISC_BINARY_RAW,\n TOKEN_MISC_LONG_TEXT_ASCII,\n TOKEN_PREFIX_KEY_ASCII,\n TOKEN_PREFIX_KEY_SHARED_LONG,\n TOKEN_PREFIX_KEY_SHARED_SHORT,\n TOKEN_PREFIX_SHARED_STRING_LONG,\n TOKEN_PREFIX_SHARED_STRING_SHORT,\n TOKEN_PREFIX_SMALL_INT,\n TOKEN_PREFIX_TINY_ASCII\n)\n\nlog = logging.getLogger()\nif not log.handlers:\n log.addHandler(logging.NullHandler())\n\n\ndef _utf_8_encode(s):\n try:\n return s.encode(\"UTF-8\")\n except UnicodeEncodeError:\n return s\n\n\nclass SMILEEncodeError(Exception):\n pass\n\n\nclass SharedStringNode:\n \"\"\"\n Helper class used for keeping track of possibly shareable String references (for field names\n and/or short String values)\n \"\"\"\n def __init__(self, value, index, nxt):\n self.value = value\n self.index = index\n self.next = nxt\n\n\nclass SmileEncoder:\n \"\"\"\n To simplify certain operations, we require output buffer length\n to allow outputting of contiguous 256 character UTF-8 encoded String\n value. Length of the longest UTF-8 code point (from Java char) is 3 bytes,\n and we need both initial token byte and single-byte end marker\n so we get following value.\n\n Note: actually we could live with shorter one; absolute minimum would be for encoding\n 64-character Strings.\n \"\"\"\n _encoders: Dict[Type, Callable]\n\n def __init__(self, shared_keys: bool = True, shared_values: bool = True, encode_as_7bit: bool = True):\n \"\"\"\n SmileEncoder Initializer\n :param encode_as_7bit: (optional - Default: `True`) Encode raw data as 7-bit\n :param shared_keys: (optional - Default: `True`) Shared Key String References\n :param shared_values: (optional - Default: `True`) Shared Value String References\n \"\"\"\n # Encoded data\n self.output = bytearray()\n\n # Shared Key Strings\n self.shared_keys = []\n\n # Shared Value Strings\n self.shared_values = []\n\n self.share_keys = bool(shared_keys)\n self.share_values = bool(shared_values)\n self.encode_as_7bit = bool(encode_as_7bit)\n\n # Encoder Switch\n self._encoders = {\n bool: lambda b: self.write_true() if b else self.write_false(),\n dict: self._encode_dict,\n float: self.write_number,\n int: self.write_number,\n list: self._encode_array,\n str: self.write_string,\n tuple: self._encode_array,\n set: self._encode_array,\n None: self.write_null\n }\n\n def write_header(self) -> None:\n \"\"\"\n Method that can be called to explicitly write Smile document header.\n Note that usually you do not need to call this for first document to output,\n but rather only if you intend to write multiple root-level documents\n with same generator (and even in that case this is optional thing to do).\n As a result usually only {@link SmileFactory} calls this method.\n \"\"\"\n last = HEADER_BYTE_4\n if self.share_keys:\n last |= HEADER_BIT_HAS_SHARED_NAMES\n if self.share_values:\n last |= HEADER_BIT_HAS_SHARED_STRING_VALUES\n if not self.encode_as_7bit:\n last |= HEADER_BIT_HAS_RAW_BINARY\n self.write_bytes(HEADER_BYTE_1, HEADER_BYTE_2, HEADER_BYTE_3, int(last))\n\n def write_ender(self) -> None:\n \"\"\"\n Write optional end marker (BYTE_MARKER_END_OF_CONTENT - 0xFF)\n \"\"\"\n self.write_byte(BYTE_MARKER_END_OF_CONTENT)\n\n # Encoding writers\n def write_7bit_binary(self, data: Union[bytes, str], offset: int = 0) -> None:\n l = len(data)\n self.write_positive_vint(l)\n while l >= 7:\n i = data[offset]\n offset += 1\n for x in range(1, 7):\n self.write_byte(int((i >> x) & 0x7F))\n i = (i << 8) | (data[offset + x] & 0xFF)\n offset += 1\n self.write_bytes(int((i >> 7) & 0x7F), int(i & 0x7F))\n l -= 7\n # and then partial piece, if any\n if l > 0:\n i = data[offset]\n offset += 1\n self.write_byte(int(i >> 1) & 0x7F)\n if l > 1:\n i = ((i & 0x01) << 8) | (data[offset] & 0xFF)\n offset += 1\n\n # 2nd\n self.write_byte(int(i >> 2) & 0x7F)\n if l > 2:\n i = ((i & 0x03) << 8) | (data[offset] & 0xFF)\n offset += 1\n # 3rd\n self.write_byte(int(i >> 3) & 0x7F)\n if l > 3:\n i = ((i & 0x07) << 8) | (data[offset] & 0xFF)\n offset += 1\n # 4th\n self.write_byte(int(i >> 4) & 0x7F)\n if l > 4:\n i = ((i & 0x0F) << 8) | (data[offset] & 0xFF)\n offset += 1\n # 5th\n self.write_byte(int(i >> 5) & 0x7F)\n if l > 5:\n i = ((i & 0x1F) << 8) | (data[offset] & 0xFF)\n offset += 1\n # 6th\n self.write_byte(int(i >> 6) & 0x7F)\n self.write_byte(int(i & 0x3F))\n # last 6 bits\n else:\n self.write_byte(int(i & 0x1F))\n # last 5 bits\n else:\n self.write_byte(int(i & 0x0F))\n # last 4 bits\n else:\n self.write_byte(int(i & 0x07))\n # last 3 bits\n else:\n self.write_byte(int(i & 0x03))\n # last 2 bits\n else:\n self.write_byte(int(i & 0x01))\n # last bit\n\n def write_big_number(self, i: str) -> None:\n \"\"\"\n Write Big Number\n :param i: Big Number\n \"\"\"\n if i is None:\n self.write_null()\n else:\n self.write_byte(TOKEN_BYTE_BIG_INTEGER)\n self.write_7bit_binary(bytearray(str(i)))\n\n def write_binary(self, data: bytes) -> None:\n \"\"\"\n Write Data\n :param data: Data\n \"\"\"\n if data is None:\n self.write_null()\n return\n if self.encode_as_7bit:\n self.write_byte(TOKEN_MISC_BINARY_7BIT)\n self.write_7bit_binary(data)\n else:\n self.write_byte(TOKEN_MISC_BINARY_RAW)\n self.write_positive_vint(len(data))\n self.write_bytes(data)\n\n def write_boolean(self, state: bool) -> None:\n \"\"\"\n Write Boolean\n :param state: Bool state\n \"\"\"\n self.write_byte(state and TOKEN_LITERAL_TRUE or TOKEN_LITERAL_FALSE)\n\n def write_byte(self, c: Union[bytes, int, str]) -> None:\n \"\"\"\n Write byte\n :param c: byte\n \"\"\"\n if isinstance(c, (bytearray, bytes)):\n pass\n elif isinstance(c, str):\n c = c.encode(\"utf-8\")\n elif isinstance(c, float):\n c = str(c)\n elif isinstance(c, int):\n c = struct.pack(\"B\", c)\n else:\n raise ValueError(f\"Invalid type for param 'c' - {type(c)}!\")\n self.output.extend(c)\n\n def write_bytes(self, *args: Union[bytes, int, str]) -> None:\n \"\"\"\n Write bytes\n :param args: args\n \"\"\"\n for arg in args:\n self.write_byte(arg)\n\n def write_decimal_number(self, num: str) -> None:\n \"\"\"\n Write decimal\n :param num: String of a decimal number\n \"\"\"\n if num is None:\n self.write_null()\n else:\n self.write_number(decimal.Decimal(num))\n\n def write_end_array(self) -> None:\n \"\"\"\n Write end array token\n \"\"\"\n self.write_byte(TOKEN_LITERAL_END_ARRAY)\n\n def write_end_object(self) -> None:\n \"\"\"\n Write end object token\n \"\"\"\n self.write_byte(TOKEN_LITERAL_END_OBJECT)\n\n def write_false(self) -> None:\n \"\"\"\n Write True Value\n \"\"\"\n self.write_byte(TOKEN_LITERAL_FALSE)\n\n def write_field_name(self, name: Union[bytes, str]) -> None:\n \"\"\"\n Write Field Name\n :param name: Name\n \"\"\"\n str_len = len(name)\n if not name:\n self.write_byte(TOKEN_KEY_EMPTY_STRING)\n return\n\n # First: is it something we can share?\n if self.share_keys:\n ix = self._find_seen_name(name)\n if ix >= 0:\n self.write_shared_name_reference(ix)\n return\n\n if str_len > MAX_SHORT_NAME_UNICODE_BYTES:\n # can not be a 'short' String; off-line (rare case)\n self.write_non_short_field_name(name)\n return\n\n if str_len <= MAX_SHORT_NAME_ASCII_BYTES:\n self.write_bytes(int((TOKEN_PREFIX_KEY_ASCII - 1) + str_len), name)\n else:\n self.write_bytes(TOKEN_KEY_LONG_STRING, name, BYTE_MARKER_END_OF_STRING)\n\n if self.share_keys:\n self._add_seen_name(name)\n\n def write_integral_number(self, num: str, neg: bool = False) -> None:\n \"\"\"\n Write Int\n :param num: String of an integral number\n :param neg: Is the value negative\n \"\"\"\n if num is None:\n self.write_null()\n else:\n num_len = len(num)\n if neg:\n num_len -= 1\n # try:\n if num_len <= 18:\n self.write_number(int(num))\n else:\n self.write_big_number(num)\n\n def write_non_shared_string(self, text: Union[bytes, str]) -> None:\n \"\"\"\n Helper method called to handle cases where String value to write is known to be long\n enough not to be shareable\n :param text: Text\n \"\"\"\n if len(text) <= MAX_SHORT_VALUE_STRING_BYTES:\n self.write_bytes(int(TOKEN_PREFIX_TINY_ASCII - 1) + len(text), text)\n else:\n self.write_bytes(TOKEN_MISC_LONG_TEXT_ASCII, text, BYTE_MARKER_END_OF_STRING)\n\n def write_non_short_field_name(self, name: str) -> None:\n \"\"\"\n Write nonshort field name\n :param name: Name\n \"\"\"\n self.write_byte(TOKEN_KEY_LONG_STRING)\n try:\n utf_8_name = name.encode(\"utf-8\")\n except UnicodeEncodeError:\n utf_8_name = name\n self.write_bytes(utf_8_name)\n if self.share_keys:\n self._add_seen_name(name)\n self.write_byte(BYTE_MARKER_END_OF_STRING)\n\n def write_null(self) -> None:\n \"\"\"\n Generated source for method writeNull\n \"\"\"\n self.write_byte(TOKEN_LITERAL_NULL)\n\n def write_number(self, num: Union[int, float, str, decimal.Decimal]) -> None:\n \"\"\"\n Write Number\n :param num: number\n \"\"\"\n def w_decimal(d: Union[float, decimal.Decimal]) -> None:\n if isinstance(d, decimal.Decimal):\n self.write_byte(TOKEN_BYTE_BIG_DECIMAL)\n scale = d.as_tuple().exponent\n self.write_signed_vint(scale)\n self.write_7bit_binary(bytearray(str(d.to_integral_value())))\n else:\n try:\n d = util.float_to_bits(d)\n self.write_bytes(\n TOKEN_BYTE_FLOAT_32,\n int(d & 0x7F),\n *[(d >> 7*i) & 0x7F for i in range(1, 5)]\n )\n except struct.error:\n d = util.float_to_raw_long_bits(d)\n self.write_bytes(\n TOKEN_BYTE_FLOAT_64,\n int(d & 0x7F),\n *[(d >> 7*i) & 0x7F for i in range(1, 10)]\n )\n\n def w_int(i: int) -> None:\n # First things first: let's zigzag encode number\n i = util.zigzag_encode(i)\n if util.is_int32(i):\n # tiny (single byte) or small (type + 6-bit value) number?\n if 0x3F >= i >= 0:\n if i <= 0x1F:\n self.write_byte(int(TOKEN_PREFIX_SMALL_INT + i))\n else:\n # nope, just small, 2 bytes (type, 1-byte zigzag value) for 6 bit value\n self.write_bytes(TOKEN_BYTE_INT_32, int(0x80 + i))\n return\n # Ok: let's find minimal representation then\n b0 = int(0x80 + (i & 0x3F))\n i >>= 6\n if i <= 0x7F:\n # 13 bits is enough (== 3 byte total encoding)\n self.write_bytes(TOKEN_BYTE_INT_32, int(i), b0)\n return\n b1 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_32, int(i), b1, b0)\n return\n b2 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_32, int(i), b2, b1, b0)\n return\n # no, need all 5 bytes\n b3 = int(i & 0x7F)\n self.write_bytes(TOKEN_BYTE_INT_32, int(i >> 7), b3, b2, b1, b0)\n else:\n # 4 can be extracted from lower int\n b0 = int(0x80 + (i & 0x3F))\n # sign bit set in the last byte\n b1 = int((i >> 6) & 0x7F)\n b2 = int((i >> 13) & 0x7F)\n b3 = int((i >> 20) & 0x7F)\n # fifth one is split between ints:\n i = util.bsr(i, 27)\n b4 = int(i & 0x7F)\n # which may be enough?\n i = int(i >> 7)\n if i == 0:\n self.write_bytes(TOKEN_BYTE_INT_64, b4, b3, b2, b1, b0)\n return\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_64, int(i), b4, b3, b2, b1, b0)\n return\n b5 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_64, int(i), b5, b4, b3, b2, b1, b0)\n return\n b6 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_64, int(i), b6, b5, b4, b3, b2, b1, b0)\n return\n b7 = int((i & 0x7F))\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(TOKEN_BYTE_INT_64, int(i), b7, b6, b5, b4, b3, b2, b1, b0)\n return\n b8 = int(i & 0x7F)\n i >>= 7\n # must be done, with 10 bytes! (9 * 7 + 6 == 69 bits; only need 63)\n self.write_bytes(TOKEN_BYTE_INT_64, int(i), b8, b7, b6, b5, b4, b3, b2, b1, b0)\n\n def w_str(s: str) -> None:\n if not s:\n self.write_null()\n else:\n s = s.strip(\"-\")\n if s.isdigit():\n self.write_integral_number(s, s.startswith(\"-\"))\n else:\n self.write_decimal_number(s)\n\n writer = {\n float: w_decimal,\n int: w_int,\n str: w_str,\n decimal.Decimal: w_decimal\n\n }.get(type(num), None)\n if writer:\n writer(num)\n\n def write_positive_vint(self, i: int) -> None:\n \"\"\"\n Helper method for writing a 32-bit positive (really 31-bit then) value\n Value is NOT zigzag encoded (since there is no sign bit to worry about)\n :param i: Int\n \"\"\"\n # At most 5 bytes (4 * 7 + 6 bits == 34 bits)\n b0 = int(0x80 + (i & 0x3F))\n i >>= 6\n if i <= 0x7F:\n # 6 or 13 bits is enough (== 2 or 3 byte total encoding)\n if i > 0:\n self.write_byte(int(i))\n self.write_byte(b0)\n return\n b1 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(int(i), b1, b0)\n else:\n b2 = int(i & 0x7F)\n i >>= 7\n if i <= 0x7F:\n self.write_bytes(int(i), b2, b1, b0)\n else:\n b3 = int(i & 0x7F)\n self.write_bytes(int(i >> 7), b3, b2, b1, b0)\n\n def write_start_array(self) -> None:\n \"\"\"\n Write start array token\n \"\"\"\n self.write_byte(TOKEN_LITERAL_START_ARRAY)\n\n def write_start_object(self) -> None:\n \"\"\"\n Write start object token\n \"\"\"\n self.write_byte(TOKEN_LITERAL_START_OBJECT)\n\n def write_shared_name_reference(self, ix: int) -> None:\n \"\"\"\n Write Shared Name Ref\n :param ix: Index\n \"\"\"\n if ix >= len(self.shared_keys) - 1:\n raise ValueError(f\"Trying to write shared name with index {ix} but have only seen {len(self.shared_keys)}!\")\n if ix < 64:\n self.write_byte(int(TOKEN_PREFIX_KEY_SHARED_SHORT + ix))\n else:\n self.write_bytes(int(TOKEN_PREFIX_KEY_SHARED_LONG + (ix >> 8)), int(ix))\n\n def write_shared_string_value_reference(self, ix: int) -> None:\n \"\"\"\n Write shared string\n :param int ix: Index\n \"\"\"\n if ix > len(self.shared_values) - 1:\n raise ValueError(f\"Internal error: trying to write shared String value with index {ix}; but have only seen {len(self.shared_values)} so far!\")\n if ix < 31:\n # add 1, as byte 0 is omitted\n self.write_byte(TOKEN_PREFIX_SHARED_STRING_SHORT + 1 + ix)\n else:\n self.write_bytes(TOKEN_PREFIX_SHARED_STRING_LONG + (ix >> 8), int(ix))\n\n def write_signed_vint(self, i: int) -> None:\n \"\"\"\n Helper method for writing 32-bit signed value, using\n \"zig zag encoding\" (see protocol buffers for explanation -- basically,\n sign bit is moved as LSB, rest of value shifted left by one)\n coupled with basic variable length encoding\n :param i: Signed int\n \"\"\"\n self.write_positive_vint(util.zigzag_encode(i))\n\n def write_string(self, text: str) -> None:\n \"\"\"\n Write String\n :param text: String text\n \"\"\"\n if text is None:\n self.write_null()\n return\n if not text:\n self.write_byte(TOKEN_LITERAL_EMPTY_STRING)\n return\n # Longer string handling off-lined\n if len(text) > MAX_SHARED_STRING_LENGTH_BYTES:\n self.write_non_shared_string(text)\n return\n\n # Then: is it something we can share?\n if self.share_values:\n ix = self._find_seen_string_value(text)\n if ix >= 0:\n self.write_shared_string_value_reference(ix)\n return\n\n if len(text) <= MAX_SHORT_VALUE_STRING_BYTES:\n if self.share_values:\n self._add_seen_string_value(text)\n self.write_bytes(int(TOKEN_PREFIX_TINY_ASCII - 1) + len(text), text)\n else:\n self.write_bytes(TOKEN_BYTE_LONG_STRING_ASCII, text, BYTE_MARKER_END_OF_STRING)\n\n def write_string_field(self, name: str, value: str) -> None:\n \"\"\"\n Write String Field\n :param name: Name\n :param value: Value\n \"\"\"\n self.write_field_name(name)\n self.write_string(value)\n\n def write_true(self) -> None:\n \"\"\"\n Write True Value\n \"\"\"\n self.write_byte(TOKEN_LITERAL_TRUE)\n\n # Helper methods\n def _add_seen_name(self, name: Union[bytes, str]) -> None:\n # if self.seen_name_count == len(self.shared_keys):\n if self.shared_keys:\n if len(self.shared_keys) == MAX_SHARED_NAMES:\n # self.seen_name_count = 0\n self.shared_keys = [None] * len(self.shared_keys)\n else:\n old = copy.copy(self.shared_keys)\n self.shared_keys = [None] * MAX_SHARED_NAMES\n mask = MAX_SHARED_NAMES - 1\n for node in old:\n while node:\n ix = util.hash_string(node.value) & mask\n next_node = node.next\n try:\n node.next = self.shared_keys[ix]\n except IndexError:\n node.next = None\n self.shared_keys[ix] = node\n node = next_node\n # ref = self.seen_name_count\n if _is_valid_back_ref(len(self.shared_keys)):\n ix = util.hash_string(name) & (len(self.shared_keys) - 1)\n self.shared_keys[ix] = SharedStringNode(name, ref, self.shared_keys[ix])\n # self.seen_name_count = ref + 1\n\n def _add_seen_string_value(self, text: str) -> None:\n # if self.seen_string_count == len(self.shared_values):\n if self.shared_values:\n if self.seen_string_count == MAX_SHARED_STRING_VALUES:\n self.seen_string_count = 0\n self.shared_values = [None] * len(self.shared_values)\n else:\n old = copy.copy(self.shared_values)\n self.shared_values = [None] * MAX_SHARED_STRING_VALUES\n mask = MAX_SHARED_STRING_VALUES - 1\n for node in old:\n while node:\n ix = util.hash_string(node.value) & mask\n next_node = node.next\n try:\n node.next = self.shared_values[ix]\n except IndexError:\n node.next = None\n self.shared_values[ix] = node\n node = next_node\n # ref = self.seen_string_count\n if _is_valid_back_ref(len(self.shared_values)):\n ix = util.hash_string(text) & (len(self.shared_values) - 1)\n self.shared_values[ix] = SharedStringNode(text, ref, self.shared_values[ix])\n # self.seen_string_count = ref + 1\n\n def _find_seen_name(self, name: Union[bytes, str]) -> int:\n n_hash = util.hash_string(name)\n try:\n head = self.shared_keys[n_hash & (len(self.shared_keys) - 1)]\n except IndexError:\n return -1\n if head is None:\n return -1\n\n if head.value is name:\n return head.index\n\n node = head\n while node:\n if node.value is name:\n return node.index\n node = node.next\n node = head\n while node:\n if node.value == name and util.hash_string(node.value) == n_hash:\n return node.index\n node = node.next\n\n def _find_seen_string_value(self, text: str) -> int:\n hash_ = util.hash_string(text)\n try:\n head = self.shared_values[hash_ & (len(self.shared_values) - 1)]\n except IndexError:\n return -1\n if head is None:\n return -1\n node = head\n while node:\n if node.value is text:\n return node.index\n node = node.next\n node = head\n while node:\n if util.hash_string(node.value) == hash_ and node.value == text:\n return node.index\n node = node.next\n\n # Actual encoding\n def _encode_array(self, arr: Union[list, tuple, set]) -> None:\n self.write_start_array()\n for idx in arr:\n self._iter_encode(idx)\n self.write_end_array()\n\n def _encode_dict(self, d: dict) -> None:\n self.write_start_object()\n for k, v in d.items():\n if k is None:\n k = \"null\"\n elif isinstance(k, bool):\n k = \"true\" if k else \"false\"\n elif isinstance(k, int):\n k = str(k)\n elif isinstance(k, float):\n k = self._floatstr(k)\n elif not isinstance(k, str):\n raise TypeError(f\"Key {k} is not a string\")\n self.write_field_name(k)\n self._iter_encode(v)\n self.write_end_object()\n\n def _floatstr(self, flt: float) -> str:\n \"\"\"\n Convert a Python float into a JSON float string\n :param float flt: Floating point number\n :returns: JSON String representation of the float\n :rtype: str\n \"\"\"\n _inf = float(\"inf\")\n if flt != flt:\n text = \"NaN\"\n elif flt == _inf:\n text = \"Infinity\"\n elif flt == -_inf:\n text = \"-Infinity\"\n else:\n return repr(flt)\n return text\n\n def _iter_encode(self, obj: Union[dict, list, set, tuple]) -> None:\n encoder = self._encoders.get(type(obj), None)\n if encoder:\n encoder(obj)\n else:\n self._iter_encode(obj)\n\n def encode(self, py_obj: Union[dict, list, set, tuple], header: bool = True, ender: bool = False) -> bytes:\n \"\"\"\n SMILE Encode object\n :param dict|list|set|tuple py_obj: The object to be encoded\n :param bool header: (optional - Default: `True`)\n :param bool ender: (optional - Default: `False`)\n :returns: SMILE encoded data\n \"\"\"\n if isinstance(py_obj, (set, tuple)):\n py_obj = list(py_obj)\n elif not isinstance(py_obj, (dict, list)):\n raise ValueError(f\"Invalid type for 'obj' paramater. Must be one of dict, list, set, or tuple; given {type(py_obj)}\")\n\n if header:\n self.write_header()\n\n self._iter_encode(py_obj)\n\n if ender:\n self.write_ender()\n return bytes(self.output)\n\n @classmethod\n def encode_obj(cls, py_obj: Union[list, dict], header: bool = True, ender: bool = False, shared_keys: bool = True, shared_vals: bool = True, bin_7bit: bool = True) -> bytes:\n \"\"\"\n SMILE Encode object\n :param list|dict py_obj: The object to be encoded\n :param bool header: (optional - Default: `True`)\n :param bool ender: (optional - Default: `False`)\n :param bool bin_7bit: (optional - Default: `True`) Encode raw data as 7-bit\n :param bool shared_keys: (optional - Default: `True`) Shared Key String References\n :param bool shared_vals: (optional - Default: `True`) Shared Value String References\n :returns: SMILE encoded data\n \"\"\"\n if isinstance(py_obj, (tuple, set)):\n py_obj = list(py_obj)\n elif not isinstance(py_obj, (list, dict)):\n raise ValueError(\"Invalid type for \\\"obj\\\" paramater. Must be one of dict, list, set, or tuple\")\n\n enc_obj = cls(shared_keys, shared_vals, bin_7bit)\n return enc_obj.encode(py_obj, header, ender)\n\n\ndef _is_valid_back_ref(index):\n \"\"\"\n Helper method used to ensure that we do not use back-reference values\n that would produce illegal byte sequences (ones with byte 0xFE or 0xFF).\n Note that we do not try to avoid null byte (0x00) by default, although\n it would be technically possible as well.\n :param int index: Index\n :returns: Valid back ref\n :rtype: bool\n \"\"\"\n return (index & 0xFF) < 0xFE\n\n\ndef encode(py_obj: Union[list, dict], header: bool = True, ender: bool = False, shared_keys: bool = True, shared_vals: bool = True, bin_7bit: bool = True) -> bytes:\n \"\"\"\n SMILE Encode object\n :param dict|list|set|tuple py_obj: The object to be encoded\n :param bool header: (optional - Default: `True`)\n :param bool ender: (optional - Default: `False`)\n :param bool bin_7bit: (optional - Default: `True`) Encode raw data as 7-bit\n :param bool shared_keys: (optional - Default: `True`) Shared Key String References\n :param bool shared_vals: (optional - Default: `True`) Shared Value String References\n :returns: SMILE encoded data\n \"\"\"\n return SmileEncoder.encode_obj(py_obj, header, ender, shared_keys, shared_vals, bin_7bit)\n" }, { "alpha_fraction": 0.6028955578804016, "alphanum_fraction": 0.6028955578804016, "avg_line_length": 25.86111068725586, "blob_id": "cd79107eef4d8a5367b5d07f40e387e2350de366", "content_id": "1f3df1255988c6ba9f140ff284aa6cdf48c08acf", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1934, "license_type": "permissive", "max_line_length": 84, "num_lines": 72, "path": "/orchestrator/gui/client/config/prod.config.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "const generalConfig = require('./general.config')\nconst webpack = require('webpack')\nconst merge = require('webpack-merge')\nconst path = require('path')\nconst { CleanWebpackPlugin } = require('clean-webpack-plugin')\nconst TerserPlugin = require('terser-webpack-plugin')\nconst OptimizeCSSAssetsPlugin = require('optimize-css-assets-webpack-plugin')\nconst LodashModuleReplacementPlugin = require('lodash-webpack-plugin')\nconst BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin\n\nenv = 'production'\nconsole.log('NODE_ENV: ' + env)\n\nconst ROOT_DIR = path.join(__dirname, '..')\nconst BUILD_DIR = path.join(ROOT_DIR, 'build')\n\nmodule.exports = merge.smart(generalConfig, {\n mode: env,\n devtool: 'source-map',\n cache: false,\n resolve: {\n alias: {\n 'jquery': 'jquery-min', // TODO: Verify the jquery-min version\n \"react\": \"preact/compat\",\n \"react-dom/test-utils\": \"preact/test-utils\",\n \"react-dom\": \"preact/compat\",\n }\n },\n plugins: [\n new webpack.DefinePlugin({\n NODE_ENV: env\n }),\n new CleanWebpackPlugin({\n dry: false\n }),\n new BundleAnalyzerPlugin({\n analyzerMode: 'static',\n generateStatsFile: true,\n openAnalyzer: false,\n statsFilename: path.join(ROOT_DIR, 'analyzer.stats.json'),\n reportFilename: path.join(ROOT_DIR, 'analyzer.stats.html'),\n }),\n new LodashModuleReplacementPlugin()\n ],\n optimization: {\n minimizer: [\n new TerserPlugin({\n cache: true,\n sourceMap: false,\n parallel: true,\n terserOptions: {\n output: {\n comments: false\n }\n }\n }),\n new OptimizeCSSAssetsPlugin({\n cssProcessorPluginOptions: {\n preset: [\n 'default',\n {\n discardComments: {\n removeAll: true\n }\n }\n ]\n },\n canPrint: true\n })\n ]\n }\n});\n" }, { "alpha_fraction": 0.6679999828338623, "alphanum_fraction": 0.6679999828338623, "avg_line_length": 16.85714340209961, "blob_id": "ffb508ae766e2e436d90855d3ee1ecde7ef4e018", "content_id": "607d3a7db94ea51e97a1fd309df3f6059dcde85b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "permissive", "max_line_length": 52, "num_lines": 14, "path": "/base/modules/utils/twisted/sb_utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nScreaming Bunny Utils\nTwisted namespace\n\"\"\"\nfrom pkgutil import extend_path\n__path__ = extend_path(__path__, __name__)\n\nfrom .twisted_tools import PikaFactory, PikaProtocol\n\n__all__ = [\n # Twisted Tools\n 'PikaFactory',\n 'PikaProtocol'\n]\n" }, { "alpha_fraction": 0.7693474888801575, "alphanum_fraction": 0.7754173278808594, "avg_line_length": 49.769229888916016, "blob_id": "1d9e0427ad082aface050ca9e787b167255f7c06", "content_id": "12f16a146975fcfe8152c2286601b3002c65c404", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 659, "license_type": "permissive", "max_line_length": 127, "num_lines": 13, "path": "/docs/Compose.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Docker Compose\n- Compose is a tool for defining and running multi-container Docker applications\n- For more information that available in this readme see the [Docker Compose Docs](https://docs.docker.com/compose/)\n\n## Overview\n- Three basic steps:\n\t1. Create or modify available containers so that they are easily repoduced\n\t\t- All available conainers are on the [Docker Hub](https://hub.docker.com/explore/)\n\t2. Define the services (containers from previous step) in the compose file (orchestrator-compose.yaml and device-compose.yaml)\n\t3. Start the compose with `docker-compose up` to start and run the app's containers\n\n## Compose v3 File Reference\n- TBD" }, { "alpha_fraction": 0.5784735679626465, "alphanum_fraction": 0.5827788710594177, "avg_line_length": 25.340206146240234, "blob_id": "c25442ad0674ce7b8e1def09b80067bd0a8dcc1a", "content_id": "575125ae78d53e7a113567473c9540491df14bf9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2555, "license_type": "permissive", "max_line_length": 131, "num_lines": 97, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/json_field/choice.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_json,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass ChoiceField extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.handleChange = this.handleChange.bind(this)\n\n this.state = {\n selected: \"\"\n }\n }\n\n handleChange(e) {\n this.setState({\n selected: e.target.value\n }, () => {\n if (this.state.selected == -1) {\n this.props.optChange(this.props.def[1], undefined)\n }\n })\n }\n\n render() {\n parent = \"\"\n if (this.props.parent) {\n parent = [this.props.parent, this.props.name].join('.')\n } else if (this.props.name.match(/^[a-z]/)) {\n parent = this.props.name\n }\n\n let def_opts = []\n if (this.props.def.hasOwnProperty(\"properties\")) {\n Object.keys(this.props.def.properties).forEach((field, i) => {\n let def = this.props.def.properties[field]\n def_opts.push(<option key={ i } data-subtext={ def.desc || \"\" } value={ field }>{ field }</option>)\n })\n }\n\n if (this.props.def.hasOwnProperty(\"patternProperties\")) {\n // TODO: Pattern Properties\n console.log(\"Choice Pattern Props\", this.props.def.patternProperties)\n }\n\n let selectedDef = \"\"\n if (this.state.selected) {\n selectedDef = <Field\n name={ this.state.selected }\n parent={ parent }\n def={ this.props.def.properties[this.state.selected] || {} }\n required\n optChange={ this.props.optChange }\n />\n }\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (isOptional_json(this.props.def) ? '' : '*') + this.props.name }</legend>\n { this.props.def.description ? <FormText color=\"muted\">{ this.props.def.description }</FormText> : '' }\n <div className=\"col-12 my-1 px-0\">\n <Input type=\"select\" name={ name } title={ name } className=\"selectpicker\" onChange={ this.handleChange } default={ -1 }>\n <option data-subtext={ name + ' options' } value={ \"\" }>{ name } options</option>\n { def_opts }\n </Input>\n\n <div className=\"col-12 py-2\">\n { selectedDef }\n </div>\n </div>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(ChoiceField)\n" }, { "alpha_fraction": 0.554817259311676, "alphanum_fraction": 0.554817259311676, "avg_line_length": 11.583333015441895, "blob_id": "1286d10c3390afe0e3354aed88f0c28f46e684f3", "content_id": "f90c73e1fb4912aa6c72e3bfbc75978e2aafa76b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 301, "license_type": "permissive", "max_line_length": 26, "num_lines": 24, "path": "/orchestrator/gui/client/src/components/utils/theme-switcher/themes.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "let validThemes = [\n\t'cerulean',\n\t 'cosmo',\n\t 'cyborg',\n\t 'darkly',\n\t 'flatly',\n\t 'journal',\n\t 'litera',\n\t 'lumen',\n\t 'lux',\n\t 'materia',\n\t 'minty',\n\t 'pulse',\n\t 'sandstone',\n\t 'simplex',\n\t 'sketchy',\n\t 'slate',\n\t 'solar',\n\t 'spacelab',\n\t 'superhero',\n\t 'united',\n\t 'yeti'\n]\nexport default validThemes" }, { "alpha_fraction": 0.5409286618232727, "alphanum_fraction": 0.5619913935661316, "avg_line_length": 31.13846206665039, "blob_id": "d8f0560cc9e0571e9e60e854571f6066fcdb8979", "content_id": "0a3e09a34329ee22fa80972bd2abf8802cacc9d7", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2089, "license_type": "permissive", "max_line_length": 121, "num_lines": 65, "path": "/orchestrator/transport/https/HTTPS/main.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import re\n\nfrom datetime import datetime\nfrom flask import Flask, request, make_response\nfrom sb_utils import Producer, decode_msg, encode_msg, default_encode, safe_json\n\napp = Flask(__name__)\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef result():\n encode = re.search(r\"(?<=\\+)(.*?)(?=\\;)\", request.headers[\"Content-type\"]).group(1) # message encoding\n corr_id = request.headers[\"X-Request-ID\"] # correlation ID\n status = request.headers['Status']\n\n profile, device_socket = request.headers[\"From\"].rsplit(\"@\", 1)\n # profile used, device IP:port\n\n data = safe_json({\n \"headers\": dict(request.headers),\n \"content\": safe_json(request.data.decode('utf-8'))\n })\n print(f\"Received {status} response from {profile}@{device_socket} - {data}\")\n print(\"Writing to buffer.\")\n producer = Producer()\n producer.publish(\n message=decode_msg(request.data, encode), # message being decoded\n headers={\n \"socket\": device_socket,\n \"correlationID\": corr_id,\n \"profile\": profile,\n \"encoding\": encode,\n \"transport\": \"https\"\n },\n exchange=\"orchestrator\",\n routing_key=\"response\"\n )\n\n return make_response(\n # Body\n encode_msg({\n \"status\": 200,\n \"status_text\": \"received\"\n }, encode),\n # Status Code\n 200,\n # Headers\n {\n \"Content-type\": f\"application/openc2-rsp+{encode};version=1.0\",\n \"Status\": 200, # Numeric status code supplied by Actuator's OpenC2-Response\n \"X-Request-ID\": corr_id,\n \"Date\": f\"{datetime.utcnow():%a, %d %b %Y %H:%M:%S GMT}\", # RFC7231-7.1.1.1 -> Sun, 06 Nov 1994 08:49:37 GMT\n # \"From\": f\"{profile}@{device_socket}\",\n # \"Host\": f\"{orc_id}@{orc_socket}\",\n }\n )\n\n\nif __name__ == \"__main__\":\n ssl = (\n \"/opt/transport/HTTPS/certs/server.crt\", # Cert Path\n \"/opt/transport/HTTPS/certs/server.key\" # Key Path\n )\n\n app.run(ssl_context=ssl, host=\"0.0.0.0\", port=5000, debug=False)\n" }, { "alpha_fraction": 0.558474063873291, "alphanum_fraction": 0.560350239276886, "avg_line_length": 28.429447174072266, "blob_id": "50fce5d706848e5778ecb5c7da9fe68488848afe", "content_id": "af9bd1cf580efdf858801f1e33749ed177d0c117", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4797, "license_type": "permissive", "max_line_length": 145, "num_lines": 163, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/json_field/array.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { toast } from 'react-toastify'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faMinusSquare, faPlusSquare } from '@fortawesome/free-solid-svg-icons'\n\nimport {\n isOptional_json,\n Field\n} from './'\n\nimport { safeGet } from '../../../../../utils'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass ArrayField extends Component {\n constructor(props, context) {\n super(props, context)\n this.parent = this.props.name\n if (this.props.parent) {\n this.parent = [this.props.parent, this.props.name].join('.')\n } else if (this.props.name.match(/^[a-z]/)) {\n this.parent = this.props.name\n }\n\n this.msgName = (this.props.parent ? [this.props.parent, this.props.name] : [this.props.name]).join('.')\n\n this.opts = {\n min: this.props.def.minItems || 0,\n max: this.props.def.maxItems || 100\n }\n\n this.state = {\n min: false,\n max: false,\n count: 1,\n opts: {}\n }\n }\n\n addOpt(e) {\n e.preventDefault()\n let max = this.opts.max\n\n this.setState((prevState) => {\n let max_bool = prevState.count < max\n return {\n count: max_bool ? ++prevState.count : prevState.count,\n max: !max_bool\n }\n }, () => {\n this.props.optChange(this.parent, Array.from(new Set(Object.values(this.state.opts))))\n if (this.state.max) {\n toast(<div><p>Warning:</p><p>Cannot have more than { this.opts.max } items for { this.props.name }</p></div>, {type: toast.TYPE.WARNING})\n }\n })\n }\n\n removeOpt(e) {\n e.preventDefault()\n let min = this.opts.min\n\n this.setState((prevState) => {\n let min_bool = prevState.count > min\n let opts = prevState.opts\n if (min_bool) {\n delete opts[Math.max.apply(Math, Object.keys(opts))]\n }\n\n return {\n count: min_bool ? --prevState.count : prevState.count,\n min: !min_bool,\n opts: opts\n }\n }, () => {\n this.props.optChange(this.parent, Array.from(new Set(Object.values(this.state.opts))))\n if (this.state.min) {\n toast(<div><p>Warning:</p><p>Cannot have less than { this.opts.min } items for { this.props.name }</p></div>, {type: toast.TYPE.WARNING})\n }\n })\n }\n\n optChange(k, v, i) {\n this.setState((prevState) => {\n return {\n opts: {\n ...prevState.opts,\n [i]: v\n }\n }\n }, () => {\n this.props.optChange(this.parent, Array.from(new Set(Object.values(this.state.opts))))\n })\n }\n\n render() {\n this.desc = safeGet(this.props.def, \"description\", \"\")\n\n let fields = []\n for (let i=0; i < this.state.count; ++i) {\n if (Array.isArray(this.props.def.items)) {\n fields.push(this.props.def.items.map(field => {\n let name = field.hasOwnProperty(\"$ref\") ? field[\"$ref\"].replace(/^#\\/definitions\\//, \"\") : \"\"\n return <Field key={ i } name={ name } parent={ this.parent } def={ field } optChange={ this.optChange.bind(this) } idx={ i } />\n }))\n } else {\n let name = \"Field\"\n let ref = {}\n\n if (this.props.def.items.hasOwnProperty(\"$ref\")) {\n name = this.props.def.items[\"$ref\"].replace(\"#/definitions/\", \"\")\n ref = safeGet(safeGet(this.props.schema, \"definitions\", {}), name, {})\n } else if (this.props.def.items.hasOwnProperty(\"type\")) {\n ref = { ...this.props.def.items }\n }\n fields.push(<Field key={ i } name={ name } parent={ this.parent } def={ ref } optChange={ this.optChange.bind(this) } idx={ i } />)\n }\n }\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>\n { (isOptional_json(this.props.def.req, this.props.name) ? '' : '*') + this.props.name }\n <Button\n color=\"danger\"\n className={ 'float-right p-1' + (this.state.min ? ' disabled' : '') }\n onClick={ this.removeOpt.bind(this) }\n >\n <FontAwesomeIcon icon={ faMinusSquare } size=\"lg\"/>\n </Button>\n <Button\n color=\"primary\"\n className={ 'float-right p-1' + (this.state.max ? ' disabled' : '') }\n onClick={ this.addOpt.bind(this) }\n >\n <FontAwesomeIcon icon={ faPlusSquare } size=\"lg\"/>\n </Button>\n </legend>\n { this.desc ? <FormText color=\"muted\">{ this.desc }</FormText> : '' }\n { fields }\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema,\n baseTypes: state.Generate.types.base\n})\n\n\nexport default connect(mapStateToProps)(ArrayField)\n" }, { "alpha_fraction": 0.6175861954689026, "alphanum_fraction": 0.6210345029830933, "avg_line_length": 35.25, "blob_id": "db4e3e1d8a71e4cb48c292a988213d67c6e22613", "content_id": "27f4f50d60e100fea038fd1eba02875a40f9a78a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2900, "license_type": "permissive", "max_line_length": 116, "num_lines": 80, "path": "/orchestrator/core/orc_server/utils/messageQueue.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nCombination of AMQP Consumer/Producer as class for easier access within the Orchestrator code\n\"\"\"\nfrom sb_utils import safe_cast, Consumer, FrozenDict, Producer\n\n\nclass MessageQueue:\n _auth = FrozenDict({\n 'username': 'guest',\n 'password': 'guest'\n })\n _exchange = 'orchestrator'\n _consumerKey = 'response'\n _producerExchange = 'transport'\n\n def __init__(self, hostname='127.0.0.1', port=5672, auth=_auth, exchange=_exchange,\n consumer_key=_consumerKey, producer_exchange=_producerExchange, callbacks=None):\n \"\"\"\n Message Queue - holds a consumer class and producer class for ease of use\n :param hostname: server ip/hostname to connect\n :param port: port the AMQP Queue is listening\n :param exchange: name of the default exchange\n :param consumer_key: key to consumer\n :param producer_exchange: ...\n :param callbacks: list of functions to call on message receive\n \"\"\"\n self._exchange = exchange if isinstance(exchange, str) else self._exchange\n self._consumerKey = consumer_key if isinstance(consumer_key, str) else self._consumerKey\n self._producerExchange = producer_exchange if isinstance(producer_exchange, str) else self._producerExchange\n\n self._publish_opts = dict(\n host=hostname,\n port=safe_cast(port, int)\n )\n\n self._consume_opts = dict(\n host=hostname,\n port=safe_cast(port, int),\n exchange=self._exchange,\n routing_key=self._consumerKey,\n callbacks=callbacks\n )\n\n self.producer = Producer(**self._publish_opts)\n self.consumer = Consumer(**self._consume_opts)\n\n def send(self, msg, headers, exchange=_producerExchange, routing_key=None):\n \"\"\"\n Publish a message to the specified que and transport\n :param msg: message to be published\n :param headers: header information for the message being sent\n :param exchange: exchange name\n :param routing_key: routing key name\n :return: None\n \"\"\"\n headers = headers or {}\n exchange = exchange if exchange == self._producerExchange else self._producerExchange\n if routing_key is None:\n raise ValueError('Routing Key cannot be None')\n self.producer.publish(\n message=msg,\n headers=headers,\n exchange=exchange,\n routing_key=routing_key\n )\n\n def register_callback(self, fun):\n \"\"\"\n Register a function for when a message is received from the message queue\n :param fun: function to register\n :return: None\n \"\"\"\n self.consumer.add_callback(fun)\n\n def shutdown(self):\n \"\"\"\n Shutdown the connection to the queue\n \"\"\"\n self.consumer.shutdown()\n self.consumer.join()\n" }, { "alpha_fraction": 0.7069892287254333, "alphanum_fraction": 0.7069892287254333, "avg_line_length": 22.25, "blob_id": "b909f2bd645821893dfdc572a2eec4d379a480ce", "content_id": "03ff3c45d9bcaad42c3907e937f7886ca740951d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "permissive", "max_line_length": 58, "num_lines": 16, "path": "/orchestrator/gui/server/gui_server/webApp/routing.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom django.urls import path\n\nfrom .sockets import SocketConsumer\n\n\napplication = ProtocolTypeRouter({\n # (http->django views is added by default)\n 'websocket': AuthMiddlewareStack(\n URLRouter([\n path('', SocketConsumer),\n ])\n )\n})\n" }, { "alpha_fraction": 0.698924720287323, "alphanum_fraction": 0.698924720287323, "avg_line_length": 12.428571701049805, "blob_id": "9f9a2dbac4ceb3bcbce2e8c5393013e6a3d36079", "content_id": "d7fbea6b69c09f93172bec6ffb42eec242618806", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 93, "license_type": "permissive", "max_line_length": 33, "num_lines": 7, "path": "/orchestrator/gui/client/src/components/admin/pages/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import Settings from './settings'\nimport Users from './users'\n\nexport {\n Settings,\n Users\n}" }, { "alpha_fraction": 0.6268221735954285, "alphanum_fraction": 0.6268221735954285, "avg_line_length": 20.4375, "blob_id": "cf5198f536db9f7bdda1623da0935544637cb289", "content_id": "826d368f0a514e50c408f623f5d274e21eee0ee2", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "permissive", "max_line_length": 82, "num_lines": 16, "path": "/orchestrator/core/orc_server/orchestrator/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .api import api_favicon, api_root\nfrom .gui import gui_redirect\nfrom .handlers import bad_request, page_not_found, permission_denied, server_error\n\n__all__ = [\n # API\n 'api_favicon',\n 'api_root',\n # GUI\n 'gui_redirect',\n # Handlers\n 'bad_request',\n 'page_not_found',\n 'permission_denied',\n 'server_error',\n]\n" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.7827380895614624, "avg_line_length": 23, "blob_id": "037ce6c82787dd5e4f72533bba335f247a019a92", "content_id": "2d481a0c5195bc66f3f011314b5b345db3f15310", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 336, "license_type": "permissive", "max_line_length": 101, "num_lines": 14, "path": "/orchestrator/gui/server/gui_server/dev_start.sh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\ndockerize -wait tcp://$DATABASE_HOST:$DATABASE_PORT -wait tcp://$QUEUE_HOST:$QUEUE_PORT -timeout 30s\n\necho 'Initializing Database'\n\npython3 manage.py makemigrations_apps\n\npython3 manage.py migrate\n\npython3 manage.py loaddata_apps\n\npython3 manage.py createsuperuser_default\n\npython3 manage.py runserver 0.0.0.0:8081\n" }, { "alpha_fraction": 0.7141139507293701, "alphanum_fraction": 0.7424861788749695, "avg_line_length": 55.17567443847656, "blob_id": "aae77b7ab59293d34ad4576828d06a44722d58d7", "content_id": "08d41e083ab1def207fc91d99b922ad91a50e68a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4159, "license_type": "permissive", "max_line_length": 306, "num_lines": 74, "path": "/orchestrator/transport/mqtt/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OpenC2 MQTT Transport\nImplements MQTT utilizing [Paho MQTT](https://www.eclipse.org/paho/clients/python/docs/).\n\n## Running Transport\nThe MQTT Transport Module is configured to run from a docker container as a part of the OIF-Orchestrator docker stack. Use the [configure.py](../../../configure.py) script to build the images needed to run the entirety of this Transport as a part of the Orchestrator.\n\n## MQTT and OpenC2 Headers\n\nAt the time of writing this the OpenC2 MQTT Transport spec has not been finalized. The headers are meant to line up as closely with OpenC2 guidelines as closely as MQTT v3.11 can.\n\nThe payload of the message in MQTT is split into two parts, the header and the OpenC2 command itself. Here is an example of what that looks like:\n\n```json\n\"payload\": {\n \"header\": {\n \"to\":\"[email protected]:1883\",\n \"from\":\"[email protected]:1883\",\n \"content_type\":\"application/openc2-cmd+json;version=1.0\",\n \"correlationID\":\"a6b10d16-5537-41c9-9773-f69d17920600\",\n \"created\":\"Wed, 22 May 2019 16:12:23 UTC\",\n },\n \"body\": {\n \"action\": \"locate\",\n \"target\": {\n \"isr\": {\n \"signal\": {\n \"frequency\": \"92.3\"\n }\n }\n }\n }\n}\n```\n\nHeader descriptions:\n\n* `to`: Actuator profile name + the location of the MQTT broker. The transport on the device side uses this to route the message to the proper actuator\n* `from`: orchestratorID + the location of the MQTT broker for return sending. The Orchestrator-side transport is listening on a topic using the orchestratorID for responses.\n* `content_type`: The content_type of the message, contains the encoding type.\n* `correlationID`: Identifier for this specific command being sent. Needed for orchestrator to relate repsonse with original command.\n* `created`: Timestamp for when the message was initially created by the orchestrator.\n\n\nThe body is the content of the OpenC2 Command/Response.\n\n## MQTT Topics\n\nThe MQTT transport is subscribed to a [topic](https://www.hivemq.com/blog/mqtt-essentials-part-5-mqtt-topics-best-practices) that is related to the actuator that the OpenC2 message should be routed to. The current convention is topic=actuatorProfileName (eg. openc2_isr_actuator_profile).\n\nThe environment variable `MQTT_TOPICS` is a string of comma-separated topics (lists are unsupported) that can be appended to when new actuators are added. The `MQTT_TOPICS` variable is preset to contain the topics relating to the included default actuator(s).\n\n## Broker Location\n\nIf running the OIF as the Orchestrator and Device on a single machine, the RabbitMQ MQTT Broker will build as a part of the device stack. Otherwise, the RabbitMQ MQTT Broker host should be specified as the environment variable `MQTT_HOST` which should contain the ip or hostname of the desired MQTT Broker.\n\n## Ports\n\nDefault port for [RabbitMQ MQTT](https://www.rabbitmq.com/mqtt.html) Broker is `1883` or `8883` if TLS is activated for RabbitMQ MQTT. This can be modified through the `MQTT_PORT` environment variable (default 1883)\n\nRead/Writes to an internal RabbitMQ AMQP Broker at default port `5672`. Note that the internal buffer can not be accessed outside of the docker network created during docker-compose. \n\nAll ports can be edited under the Docker Compose file under the queue port options.\n\n## Adding certificates for TLS\n\nTo enable TLS set the environment variable `MQTT_TLS_ENABLE` to `1`\n\nTo indicate the use of self-signed certificates (not for production use) set the environment variable `MQTT_TLS_SELF_SIGNED` to `1`. For self-signed certificates, RabbitMQ recommends [tls-gen](https://github.com/michaelklishin/tls-gen).\n\nThe cert files needed to activate TLS are specified as environment variables: `MQTT_CAFILE`, `MQTT_CLIENT_CERT`, `MQTT_CLIENT_KEY`\n\nIf your broker is configured to require a username and password, use environment variables: `MQTT_DEFAULT_USERNAME`, `MQTT_DEFAULT_PASS`\n\nTo add the certificates uncomment the line in the Dockerfile `ADD certs /opt/transport/MQTT/certs` where `ADD <source> <dest>`\n\n\n" }, { "alpha_fraction": 0.5812527537345886, "alphanum_fraction": 0.5830048322677612, "avg_line_length": 30.27397346496582, "blob_id": "dc5b4f303e2839c883f6209bc6f64f0ed8d69095", "content_id": "3c9b098d4b66af02f4aae09e32911c708a718d8c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "permissive", "max_line_length": 109, "num_lines": 73, "path": "/orchestrator/core/orc_server/account/views/apiviews.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import bleach\nimport coreapi\nimport coreschema\n\nfrom django.contrib.auth.models import Group, User\nfrom rest_framework import permissions\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n# Local imports\nimport utils\nfrom actuator.models import ActuatorGroup\n\n\nclass ActuatorAccess(APIView):\n \"\"\"\n API endpoint that allows users actuator access to be viewed or edited.\n \"\"\"\n permission_classes = (permissions.IsAdminUser, )\n\n schema = utils.OrcSchema(\n manual_fields=[\n coreapi.Field(\n \"username\",\n required=True,\n location=\"path\",\n schema=coreschema.String(\n description='Username to list the accessible actuators'\n )\n )\n ],\n put_fields=[\n coreapi.Field(\n \"actuators\",\n required=True,\n location=\"body\",\n schema=coreschema.Array(\n items=coreschema.String(),\n min_items=1,\n unique_items=True\n )\n )\n ]\n )\n\n def get(self, request, username, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n API endpoint that lists the actuators a users can access\n \"\"\"\n username = bleach.clean(username)\n rtn = [g.name for g in ActuatorGroup.objects.filter(users__in=[User.objects.get(username=username)])]\n return Response(rtn)\n\n def put(self, request, username, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n API endpoint that adds actuators to a users access\n \"\"\"\n username = bleach.clean(username)\n user = User.objects.get(username=username)\n if user is None:\n return ParseError(detail='User cannot be found', code=404)\n\n rtn = []\n for actuator in request.data.get('actuators', []):\n actuator = bleach.clean(actuator)\n\n group = Group.objects.exclude(actuatorgroup__isnull=True).filter(name=actuator).first()\n if group:\n rtn.append(group.name)\n user.groups.add(group)\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.5911994576454163, "alphanum_fraction": 0.6139105558395386, "avg_line_length": 22.11475372314453, "blob_id": "5336f19054dc61062a78f0ae1777d1dcce4f2db8", "content_id": "e6501d353d8c595a305c3758e0398bbbba47225c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1409, "license_type": "permissive", "max_line_length": 124, "num_lines": 61, "path": "/orchestrator/gui/server/docker-entrypoint.sh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -e\n\ndockerize -wait tcp://$DATABASE_HOST:$DATABASE_PORT -wait tcp://$QUEUE_HOST:$QUEUE_PORT -timeout 30s\n\nmigrate() {\n init=0\n case $1 in\n -i) init=1; shift;;\n --init) init=1; shift;;\n esac\n\n echo 'Initializing Database'\n\n python3 manage.py makemigrations --noinput\n\n python3 manage.py makemigrations_apps\n\n python3 manage.py migrate --noinput\n\n if [[ $init -eq 1 ]]; then\n python3 manage.py loaddata_apps\n\n python3 manage.py createsuperuser_default\n fi\n\n echo $(python3 -c \"from email.utils import formatdate; print(formatdate());\") > orc_server/migration_complete\n}\n\ndate2unix() {\n echo $(python3 -c \"from datetime import datetime; print(f\\\"{datetime.strptime('$1', '%a, %d %b %Y %H:%M:%S %z'):%s}\\\")\")\n}\n\n\ndate_diff() {\n d=$(date2unix \"$1\")\n echo $(python3 -c \"from datetime import datetime; print(int(f\\\"{datetime.utcnow():%s}\\\") - $d);\")\n}\n\n\nif [[ -f /opt/orc_server/migration_complete ]]; then\n # seconds in a week - 604800\n if [[ 604800 > $(date_diff \"$(cat /opt/orc_server/migration_complete)\") ]]; then\n echo \"Less than week, not checking migration\"\n else\n migrate\n fi\n\nelse\n migrate --init\nfi\n\nif [[ \"x$DJANGO_MIGRATE\" = 'xon' ]]; then\n migrate --init\nfi\n\nif [[ \"x$DJANGO_COLLECTSTATIC\" = 'xon' ]]; then\n python3 manage.py collectstatic --noinput\nfi\n\nexec \"$@\"" }, { "alpha_fraction": 0.554347813129425, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 36.375, "blob_id": "ede3b63e13fe7311018c89f117f881bdfbdefacf", "content_id": "e5804ec669634afcca07fc4ae9bcfab6a64149be", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1196, "license_type": "permissive", "max_line_length": 118, "num_lines": 32, "path": "/orchestrator/gui/client/src/components/utils/jadn.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "const indent = 2\nconst fmt = require('string-format')\nconst vkbeautify = require('vkbeautify')\n\n/* Schema Utils */\nexport const FormatJADN = (schema, indent=2, _level=0) => {\n let _indent = (indent % 2 == 1 ? indent - 1 : indent) + (_level * 2)\n let ind = ' '.repeat(_indent)\n let ind_e = ' '.repeat(_indent - 2)\n\n if (Array.isArray(schema)) {\n let nested = schema && Array.isArray(schema[0])\n let lvl = (nested && Array.isArray(schema[schema.length - 1])) ? _level + 1: _level\n let lines = schema.map(val => FormatJADN(val, indent, lvl))\n\n if (nested) {\n return fmt(\"[\\n{ind}\", {ind: ind}) + lines.join(fmt(\",\\n{ind}\", {ind: ind})) + fmt(\"\\n{ind_e}]\", {ind_e: ind_e})\n }\n return fmt(\"[{lines}]\", {lines: lines.join(', ')})\n\n } else if (typeof(schema) == \"object\") {\n let lines = Object.keys(schema).map(key => {\n let val = schema[key]\n return fmt(\"{ind}\\\"{k}\\\": {v}\", {ind: ind, k: key, v: FormatJADN(val, indent, _level+1)})\n }).join(\",\\n\")\n return fmt(\"{{\\n{lines}\\n{ind_e}}}\", {lines: lines, ind_e: ind_e})\n } else if (['string', 'number'].indexOf(typeof(schema)) >= 0) {\n return JSON.stringify(schema)\n } else {\n return \"\\\"N/A\\\"\"\n }\n}\n" }, { "alpha_fraction": 0.6147368550300598, "alphanum_fraction": 0.6176841855049133, "avg_line_length": 29.84415626525879, "blob_id": "a19d81186de113edc0d15898493b7d841143a08b", "content_id": "934cd2999e5268dc8d98ee71d6f61f9457bb8acc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2375, "license_type": "permissive", "max_line_length": 95, "num_lines": 77, "path": "/orchestrator/gui/client/src/app.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport ReactDOM from 'react-dom'\nimport { connect } from 'react-redux'\nimport { ConnectedRouter } from 'connected-react-router'\nimport { Route, Switch } from 'react-router'\nimport { toast, ToastContainer } from 'react-toastify'\n\nimport qs from 'query-string'\n\nimport {\n Breadcrumbs,\n Error,\n Home,\n Nav\n} from './components/static'\n\nimport {\n Login,\n Logout,\n PrivateRoute\n} from './components/auth'\n\n// import Admin from './components/admin'\nimport Account from './components/account'\nimport Device from './components/device'\nimport Actuator from './components/actuator'\nimport Command from './components/command'\n\nimport * as AuthActions from './actions/auth'\nimport * as UtilActions from './actions/util'\n\nclass App extends Component {\n constructor(props, context) {\n super(props, context)\n this.props.info()\n }\n\n render() {\n return (\n <div id='contents' className=\"container-fluid mt-3\" >\n <Nav history={ this.props.history } />\n\n <ConnectedRouter history={ this.props.history }>\n <div className=\"row mx-auto\">\n <div className=\"col-12\">\n <Breadcrumbs navigate={ (path) => this.props.history.push(path) } />\n\n <Switch>\n <Route path=\"/:prefix*/login/\" component={ Login } />\n <Route path=\"/:prefix*/logout/\" component={ Logout } />\n <PrivateRoute exact path=\"/\" component={ Home } />\n <PrivateRoute path=\"/account/:page?\" component={ Account } />\n {/* <PrivateRoute path=\"/admin/:page?\" adminRequired component={ Admin } /> */}\n <PrivateRoute path=\"/device/\" component={ Device } />\n <PrivateRoute path=\"/actuator/\" component={ Actuator } />\n <PrivateRoute path=\"/command/:page?/:command?\" component={ Command } />\n <PrivateRoute component={ Error } /> // This should always be last route\n </Switch>\n </div>\n </div>\n </ConnectedRouter>\n\n <ToastContainer position={ toast.POSITION.BOTTOM_CENTER } autoClose={ 5000 } />\n </div>\n )\n }\n}\n\nconst mapStateToProps = state => ({\n errors: state.Auth.errors\n})\n\nconst mapDispatchToProps = dispatch => ({\n info: () => dispatch(UtilActions.info())\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(App)\n" }, { "alpha_fraction": 0.5678271055221558, "alphanum_fraction": 0.5684273838996887, "avg_line_length": 21.513513565063477, "blob_id": "16d9a7cb70e2a24d18f0bd462ec5442ebbae22d5", "content_id": "8841955ee2c911ecab01df27360797d7ea15acc3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1666, "license_type": "permissive", "max_line_length": 109, "num_lines": 74, "path": "/orchestrator/gui/client/src/components/command/pages/table.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n RemotePageTable\n} from '../../utils'\n\nimport * as CommandActions from '../../../actions/command'\n\nconst str_fmt = require('string-format')\n\nclass CommandTable extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.tableColumns = [\n {\n text: 'Command ID',\n dataField: 'command_id',\n sort: true\n },{\n text: 'Received',\n dataField: 'received_on',\n sort: true\n },{\n text: 'Status',\n dataField: 'status',\n sort: true\n },{\n text: 'Command',\n dataField: 'command',\n formatter: (cell, row) => <span>{ cell.action } - { Object.keys(cell.target || {})[0] || '' }</span>\n }\n ]\n\n this.editOptions = {\n info: this.props.cmdInfo\n }\n\n }\n\n render() {\n return (\n <RemotePageTable\n keyField='command_id'\n dataKey='Command.commands'\n dataGet={ this.props.getCommands }\n columns={ this.tableColumns }\n defaultSort={[\n {\n dataField: 'received_on',\n order: 'asc'\n }\n ]}\n editRows\n editOptions={ this.editOptions }\n />\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title,\n orchestrator: {\n name: state.Util.name || 'N/A'\n },\n admin: state.Auth.access.admin\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n getCommands: (page, sizePerPage, sort) => dispatch(CommandActions.getCommands(page, sizePerPage, sort)),\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(CommandTable)\n" }, { "alpha_fraction": 0.6339402794837952, "alphanum_fraction": 0.6373193264007568, "avg_line_length": 37.3237419128418, "blob_id": "c7a31cdccd28bd8fa780274bb444d1e3b764f096", "content_id": "0b7c02676cbd13aff95765f97486702344c15546", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5327, "license_type": "permissive", "max_line_length": 106, "num_lines": 139, "path": "/orchestrator/core/orc_server/device/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.db.utils import IntegrityError\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import filters, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom ..models import Device, DeviceSerializer\n\n\nclass DeviceViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Actuators to be viewed or edited.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = DeviceSerializer\n lookup_field = 'device_id'\n\n queryset = Device.objects.order_by('name')\n filter_backends = (filters.OrderingFilter,)\n ordering_fields = ('name', 'host', 'port', 'protocol', 'serialization', 'type')\n\n permissions = {\n 'create': (IsAdminUser,),\n 'destroy': (IsAdminUser,),\n 'partial_update': (IsAdminUser,),\n 'update': (IsAdminUser,),\n }\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all actuators that the user has permissions for\n \"\"\"\n self.pagination_class.page_size_query_param = 'length'\n self.pagination_class.max_page_size = 100\n queryset = self.filter_queryset(self.get_queryset())\n\n # TODO: set permissions\n '''\n if not request.user.is_staff: # Standard User\n user_devices = DeviceGroup.objects.filter(users__in=[request.user])\n user_devices = list(g.devices.values_list('name', flat=True) for g in user_devices)\n queryset = queryset.filter(name__in=user_devices) # | queryset.exclude(name__in=user_devices)\n ''' # pylint: disable=pointless-string-statement\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific actuators that the user has permissions for\n \"\"\"\n device = self.get_object()\n\n # TODO: set permissions\n '''\n if not request.user.is_staff: # Standard User\n user_devices = DeviceGroup.objects.filter(users__in=[request.user])\n user_devices = list(g.devices.values_list('name', flat=True) for g in user_devices)\n if device is not None and device.name not in user_devices:\n raise PermissionDenied(detail='User not authorised to access device', code=401)\n ''' # pylint: disable=pointless-string-statement\n\n serializer = self.get_serializer(device)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n Create a device if the user has permission\n \"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n try:\n self.perform_create(serializer)\n except IntegrityError as e:\n response = dict(\n IntegrityError=str(e).replace('key', 'field')\n )\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def update(self, request, *args, **kwargs):\n \"\"\"\n Update a specific device that a user has permission for\n \"\"\"\n partial = kwargs.pop('partial', False)\n device = self.get_object()\n serializer = self.get_serializer(device, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n\n try:\n self.perform_update(serializer)\n except IntegrityError as e:\n response = dict(\n IntegrityError=str(e).replace('key', 'field')\n )\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n if getattr(device, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset,\n # need to forcibly invalidate the prefetch cache on the instance\n device._prefetched_objects_cache = {}\n\n return Response(serializer.data)\n\n @action(methods=['GET'], detail=False)\n def users(self, request, *args, **kwargs):\n \"\"\"\n API endpoint that allows for Device user retrieval\n \"\"\"\n device = self.get_object()\n\n if not request.user.is_staff:\n device_groups = list(g.name for g in request.user.groups.exclude(devicegroup__isnull=True))\n\n if device.name not in device_groups:\n raise PermissionDenied(detail='User not authorised to access device', code=401)\n\n rtn = dict(\n users=list(u.username for u in User.objects.filter(groups__name=f'Device: {device.name}'))\n )\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.6934523582458496, "alphanum_fraction": 0.711309552192688, "avg_line_length": 14.318181991577148, "blob_id": "0a59f5db6b2ef72093623c9f79d8f53f3ca65b80", "content_id": "4cc2c078650b15cea1272003c8c6ae4f26345a6c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 336, "license_type": "permissive", "max_line_length": 41, "num_lines": 22, "path": "/orchestrator/core/orc_server/uwsgi.ini", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "[uwsgi]\nbase = /opt\nproject_dir = orc_server\nproject_name = orchestrator\n\n# Project\nchdir = %(base)/%(project_dir)\nmodule = %(project_name).wsgi:application\n\n# HTTP Port\nhttp-socket = :8080\n\n# Threads and processes\nmaster = true\nthreads = 4\nprocesses = 4\n\nlazy-apps = true\nhttp-keepalive = true\n\n# Mime Types\nmime-file = /etc/mime.types" }, { "alpha_fraction": 0.5998663902282715, "alphanum_fraction": 0.6052104234695435, "avg_line_length": 23.540983200073242, "blob_id": "d53ba192a7b74a002a49e772995b11444c0b5543", "content_id": "ee239381b34276c9044045592b15c92a2c6c6945", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1497, "license_type": "permissive", "max_line_length": 70, "num_lines": 61, "path": "/logger/gui/config/dev.config.babel.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import webpack from 'webpack';\nimport merge from 'webpack-merge';\nimport path from 'path';\n\nimport DeadCodePlugin from 'webpack-deadcode-plugin';\n// import CircularDependencyPlugin from 'circular-dependency-plugin';\n\nimport baseConfig from './base.config.babel';\n\nconst env = 'development';\n\nconst ROOT_DIR = path.join(__dirname, '..');\nconst BUILD_DIR = path.join(ROOT_DIR, 'build');\nconst COMPONENTS_DIR = path.join(ROOT_DIR, 'src', 'components');\nconst DEPEND_DIR = path.join(COMPONENTS_DIR, 'dependencies');\n\nexport default merge.smart(baseConfig, {\n mode: env,\n devtool: 'eval',\n plugins: [\n new webpack.DefinePlugin({\n NODE_ENV: env\n }),\n new DeadCodePlugin({\n patterns: [\n 'src/**/*.(js|jsx|css|less)'\n ],\n exclude: [\n '**/*.(stories|spec).(js|jsx)$',\n DEPEND_DIR,\n '**/theme-switcher/download_themes.js',\n path.join(COMPONENTS_DIR, 'utils', 'theme-switcher', 'assets')\n ]\n }),/*\n new CircularDependencyPlugin({\n exclude: /node_modules/,\n failOnError: false,\n allowAsyncCycles: false,\n cwd: ROOT_DIR\n }),*/\n new webpack.NoEmitOnErrorsPlugin()\n ],\n devServer: {\n contentBase: BUILD_DIR,\n compress: true,\n port: 3000,\n hot: true,\n open: false,\n historyApiFallback: true,\n proxy: {\n '/api': {\n target: 'http://localhost:9200',\n pathRewrite: {\"^/api/\" : \"\"},\n secure: false\n }\n }\n },\n optimization: {\n usedExports: true\n }\n});\n" }, { "alpha_fraction": 0.6219007968902588, "alphanum_fraction": 0.6239669322967529, "avg_line_length": 22.047618865966797, "blob_id": "d340717a50b48b93daf5b92ef59873584a404ac2", "content_id": "95b8cdccfbd06092158b031aa30ce6336121dd9c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 968, "license_type": "permissive", "max_line_length": 59, "num_lines": 42, "path": "/orchestrator/gui/server/gui_server/webApp/templatetags/var_dump.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django import template\nfrom django.template.defaultfilters import linebreaksbr\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\n\nfrom pprint import pformat\n\nregister = template.Library()\n\n\[email protected]\ndef var_dump(var):\n \"\"\"\n Dumps the value of the given object\n :param var: var to dump its value\n :return: dumped value of the given var\n \"\"\"\n if hasattr(var, '__dict__'):\n d = dict(\n __str__=str(var),\n __unicode__=str(var).encode('utf-8', 'strict'),\n __repr__=repr(var)\n )\n\n d.update(var.__dict__)\n var = d\n\n output = f\"{pformat(var)}\\n\"\n return output\n\n\[email protected]\ndef dump(var):\n \"\"\"\n Wrapper function for var_dump\n :param var: var object to dump\n :return: dumped value of the given var\n \"\"\"\n return mark_safe(linebreaksbr(escape(var_dump(var))))\n" }, { "alpha_fraction": 0.7045000195503235, "alphanum_fraction": 0.7045000195503235, "avg_line_length": 33.482757568359375, "blob_id": "88cb61956db22ae30a8c875e2634939711b558d9", "content_id": "6308df5ecf16da5a5f626b1cd45fa139e6663931", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "permissive", "max_line_length": 108, "num_lines": 58, "path": "/orchestrator/core/orc_server/orchestrator/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import importlib\nimport os\n\nfrom django.conf import settings\nfrom django.http import FileResponse\nfrom dynamic_preferences.registries import global_preferences_registry\nfrom inspect import isfunction\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n# Local imports\nfrom orchestrator.models import Serialization, Protocol\nfrom command.models import SentHistory, ResponseHistory\n\nglobal_preferences = global_preferences_registry.manager()\n\n\ndef get_stats():\n \"\"\"\n Gather stats from each installed app if the view has a function name defined as `settings.STATS_FUN`\n \"\"\"\n stats_results = {}\n for installed_app in settings.INSTALLED_APPS:\n app_views = getattr(importlib.import_module(installed_app), 'views', None)\n stats_view = getattr(app_views, settings.STATS_FUN, None)\n if stats_view and isfunction(stats_view):\n stats_results[installed_app] = stats_view()\n return stats_results\n\n\n@api_view([\"GET\"])\n@permission_classes((permissions.AllowAny,))\ndef api_favicon(request):\n favicon = os.path.join(settings.STATIC_ROOT, 'favicon.ico')\n return FileResponse(favicon, 'rb')\n\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny,))\ndef api_root(request):\n \"\"\"\n Orchestrator basic information\n \"\"\"\n rtn = dict(\n message=\"Hello, {}. You're at the orchestrator api index.\".format(request.user.username or 'guest'),\n commands=dict(\n sent=SentHistory.objects.count(),\n responses=ResponseHistory.objects.count()\n ),\n name=global_preferences.get('orchestrator__name', 'N/A'),\n id=global_preferences.get('orchestrator__id', 'N/A'),\n protocols={k: bool(v) for k, v in Protocol.objects.values_list('name', 'pub_sub', named=True)},\n serializations=Serialization.objects.values_list('name', flat=True),\n # app_stats=get_stats()\n )\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.6557823419570923, "alphanum_fraction": 0.6557823419570923, "avg_line_length": 21.272727966308594, "blob_id": "f1d9d7cf465b2b076d52b8c30c0c664e6842f324", "content_id": "eec934bd70b1a4e1f516d0a82634f42dccd5dd9d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "permissive", "max_line_length": 92, "num_lines": 33, "path": "/base/modules/utils/root/sb_utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nScreaming Bunny Utils\nRoot Namespace\n\"\"\"\nfrom pkgutil import extend_path\n__path__ = extend_path(__path__, __name__)\n\nfrom .amqp_tools import Consumer, Producer\nfrom .general import prefixUUID, default_decode, default_encode, safe_cast, safe_json, toStr\nfrom .ext_dicts import FrozenDict, ObjectDict, QueryDict\nfrom .message import decode_msg, encode_msg\nfrom .message_obj import Message\n\n__all__ = [\n # AMQP Tools\n 'Consumer',\n 'Producer',\n # General Utils\n 'default_decode',\n 'default_encode',\n 'prefixUUID',\n 'safe_cast',\n 'safe_json',\n 'toStr',\n # Extended Dictionaries\n 'FrozenDict',\n 'ObjectDict',\n 'QueryDict',\n # Message Utils\n 'Message',\n 'decode_msg',\n 'encode_msg'\n]\n" }, { "alpha_fraction": 0.6828829050064087, "alphanum_fraction": 0.6828829050064087, "avg_line_length": 22.125, "blob_id": "8cbc8d049c575b16ef7ee3642bb15f7485995810", "content_id": "75e29527395b3fad5fa8ff9d287ca08771fa639d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "permissive", "max_line_length": 66, "num_lines": 24, "path": "/orchestrator/core/orc_server/utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from sb_utils import decode_msg, encode_msg, FrozenDict, safe_cast\n\n# Local imports\nfrom .general import isHex, prefixUUID, randBytes, to_str\nfrom .messageQueue import MessageQueue\nfrom .model import get_or_none, ReadOnlyModelAdmin\nfrom .permissions import IsAdminOrIsSelf\nfrom .schema import OrcSchema\n\n__all__ = [\n \"decode_msg\",\n \"encode_msg\",\n \"get_or_none\",\n \"isHex\",\n \"randBytes\",\n \"prefixUUID\",\n \"safe_cast\",\n \"to_str\",\n \"FrozenDict\",\n \"IsAdminOrIsSelf\",\n \"OrcSchema\",\n \"MessageQueue\",\n \"ReadOnlyModelAdmin\"\n]\n" }, { "alpha_fraction": 0.5115941762924194, "alphanum_fraction": 0.5157970786094666, "avg_line_length": 34.20408248901367, "blob_id": "c828a714ce0857422039191acab889d442e65e7f", "content_id": "557ad15c36988f727d6075e03226edd9f5c517c6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6902, "license_type": "permissive", "max_line_length": 107, "num_lines": 196, "path": "/orchestrator/gui/server/gui_server/webApp/sockets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import json\n\nfrom channels.generic.websocket import JsonWebsocketConsumer\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.http import HttpRequest, JsonResponse, QueryDict\nfrom django.urls import resolve\nfrom rest_framework.request import Request\nfrom urllib.parse import parse_qsl, urlparse\n\nfrom .status_codes import Socket_Close_Codes\n\nfrom utils import FrozenDict, to_str\n\n\nclass SocketConsumer(JsonWebsocketConsumer):\n def connect(self):\n self.accept()\n # pretty_print(self.scope, \"-<>-\")\n\n self.send(text_data=json.dumps({\n \"message\": \"connected\"\n }))\n\n def disconnect(self, close_code):\n close_status = Socket_Close_Codes.get(close_code, (\"\", \"\"))\n print(f\"Socket Disconnect: {close_code}\\n--> Name: {close_status[0]}\\n--> Desc: {close_status[1]}\")\n\n def receive(self, text_data=None):\n payload = json.loads(text_data)\n # print(f\"Request for: {payload.get(\"method\", \"GET\")} -> {payload.get(\"endpoint\", \"/\")}\")\n\n request = self.create_dja_request(payload)\n view_func, args, kwargs = request.resolver_match\n\n try:\n view_rtn = view_func(request)\n except Exception as e:\n print(e)\n try:\n request = self.create_drf_request(request)\n view_rtn = view_func(request)\n except Exception as e:\n print(e)\n view_rtn = FrozenDict(\n status_code=500,\n data=dict()\n )\n\n rtn_type = \"success\" if view_rtn.status_code in [200, 201, 204, 304] else \"failure\"\n\n rtn_data = view_rtn.data if rtn_type == \"success\" else dict(response=view_rtn.data)\n rtn_data = json.loads(JsonResponse(rtn_data).content)\n\n rtn_type = payload.get(\"types\", {}).get(rtn_type, \"oops...\")\n rtn_state = rtn_type[\"type\"] if type(rtn_type) is dict else rtn_type\n\n meta = rtn_type.get(\"meta\", {}) if type(rtn_state) is dict else {}\n meta.update(\n status_code=view_rtn.status_code\n )\n\n try:\n self.send_json(dict(\n type=rtn_state,\n payload=rtn_data,\n meta=meta\n ))\n except Exception as e:\n print(e)\n self.send_json(dict(\n type=rtn_state,\n payload={},\n meta=meta\n ))\n\n def create_dja_request(self, data={}):\n print(\"Create Django Request\")\n request = HttpRequest()\n\n headers = {to_str(kv[0]): to_str(kv[1]) for kv in self.scope.get(\"headers\", {})}\n host, port = headers.get(\"host\", \"\").split(\":\")\n url = urlparse(data.get(\"endpoint\", \"\"))\n\n try:\n resolver = resolve(url.path + (\"\" if url.path.endswith(\"/\") else \"/\"))\n except Exception as e:\n print(f\"URL Resolve failed: {url.path}\")\n resolver = (\n lambda r: FrozenDict(\n status_code=404,\n data=dict(\n message=\"page not found\"\n ),\n ), (), {}\n )\n\n params = dict(\n # accepted_renderer=\"rest_framework.renderers.JSONRenderer\",\n body=b\"\", # bytes(request_body, \"utf-8\"),\n content_type=\"application/json\",\n content_params={},\n encoding=None,\n # FILES - MultiValueDict\n method=data.get(\"method\", \"GET\"),\n path=url.path,\n path_info=url.path,\n resolver_match=resolver,\n scheme=\"http\",\n session=self.scope.get(\"session\", None),\n # site=shortcuts.get_current_site(request),\n user=self.scope.get(\"user\", AnonymousUser), # TODO: Verify this is valid in this instance\n url_conf=None,\n\n GET=QueryDict(mutable=True),\n META=dict(\n CONTENT_LENGTH=0,\n CONTENT_TYPE=\"application/json\",\n HTTP_ACCEPT=\"application/json\",\n HTTP_ACCEPT_ENCODING=headers.get(\"accept-encoding\", \"\"),\n HTTP_ACCEPT_LANGUAGE=headers.get(\"accept-language\", \"\"),\n HTTP_HOST=host,\n # HTTP_REFERER – The referring page, if any.\n HTTP_USER_AGENT=headers.get(\"user-agent\", \"\"),\n HTTP_AUTHORIZATION=f\"JWT {data.get('jwt', '')}\",\n REMOTE_ADDR=host,\n REMOTE_HOST=host,\n REMOTE_USER=self.scope.get(\"user\", AnonymousUser),\n REQUEST_METHOD=data.get(\"method\", \"GET\"),\n SERVER_NAME=self.scope.get(\"server\", [\"\", \"\"])[0],\n SERVER_PORT=self.scope.get(\"server\", [\"\", \"\"])[1],\n QUERY_STRING=url.query,\n ),\n POST=QueryDict(mutable=True),\n COOKIES=self.scope.get(\"session\", {}),\n )\n\n if params[\"method\"].lower() == \"get\" and url.query:\n request_query = dict(parse_qsl(url.query))\n request.GET.update(request_query)\n\n request_data = data.get(\"data\", {})\n if len(request_data.keys()) >= 1:\n request_body = json.dumps(request_data)\n update_request = dict(\n _body=bytes(request_body, \"utf-8\"),\n body=bytes(request_body, \"utf-8\"),\n data=request_data\n )\n\n if params[\"method\"].lower() in [\"post\", \"put\"]:\n tmp_qrydict = QueryDict(mutable=True)\n\n if params[\"method\"].lower() == \"post\":\n tmp_qrydict.update(request_data)\n update_request.update(dict(\n raw_post_data=request.POST.urlencode(),\n ))\n\n if params[\"method\"].lower() == \"put\":\n tmp_qrydict.update(request_data)\n\n update_request.update({\n params[\"method\"]: tmp_qrydict\n })\n\n params.update(update_request)\n\n for key, val in params.items():\n try:\n setattr(request, key, val)\n except Exception as e:\n # print(f\"--- {e} - {key}: {val}\")\n pass\n\n request.META[\"CONTENT_LENGTH\"] = len(to_str(params.get(\"body\", \"\")))\n\n return request\n\n def create_drf_request(self, dja_request: HttpRequest):\n print(\"Create Django Rest Request\")\n request = Request(dja_request)\n params = dict()\n\n for key, val in params.items():\n try:\n setattr(request, key, val)\n except Exception as e:\n # print(f\"--- {e} - {key}: {val}\")\n pass\n\n return request\n\n def send_all_json(self, json_data={}):\n print(f'Send to all {len(self._clients)} Clients')\n for client in self._clients:\n client.send_json(json_data)\n" }, { "alpha_fraction": 0.610909104347229, "alphanum_fraction": 0.6618182063102722, "avg_line_length": 27.947368621826172, "blob_id": "199bd2a9ea174fb41cb6198e1b4b418d51b38f64", "content_id": "5ad436d0e44e8ee4c019e148288518e9ae44dd69", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "permissive", "max_line_length": 194, "num_lines": 19, "path": "/orchestrator/core/orc_server/orchestrator/migrations/0002_protocol_port.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-05-07 14:52\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('orchestrator', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='protocol',\n name='port',\n field=models.IntegerField(default=8080, help_text='Port of the transport', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)]),\n ),\n ]\n" }, { "alpha_fraction": 0.6688888669013977, "alphanum_fraction": 0.6866666674613953, "avg_line_length": 26, "blob_id": "4addc269850eef6f720b43fb916337b06e683433", "content_id": "e7d264cb287100232a44030a3632b716dc39352b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1350, "license_type": "permissive", "max_line_length": 94, "num_lines": 50, "path": "/orchestrator/gui/client/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# GUI Builder\nFROM alpine as builder\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nADD . /tmp/gui\n\nRUN apk add --update nodejs nodejs-npm && \\\ncd /tmp/gui && \\\nnpm install && \\\nnpm run init && \\\nnpm run build && \\\nmkdir -p /opt/orchestrator/gui && \\\ncp -r build/* /opt/orchestrator/gui\n\n\n# GUI Image\nFROM alpine\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"Orchestrator GUI\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Orchestrator GUI container\"\n\nADD httpd/httpd.conf /etc/apache2/httpd.conf\nCOPY --from=builder /opt/orchestrator/gui /opt/orchestrator/gui\n\n# Package installation\n# Packages - https://pkgs.alpinelinux.org/packages\n#\n# HTTPD Install & Config\nRUN apk add --no-cache apache2 apache2-utils apache2-ssl apache2-proxy apache2-proxy-html && \\\nmkdir -p /run/apache2 && \\\n# addgroup -g 82 -S www-data && \\\nadduser -u 82 -D -S -G www-data www-data && \\\nln -s /usr/lib/apache2 /etc/apache2/modules && \\\n# GUI Config\n# mkdir -p /opt/syslog/gui && \\\nchown -R www-data:www-data /opt/orchestrator/gui && \\\n# Cleanup\nrm -rf /var/cache/apk/* *.tar.gz* /usr/src /root/.gnupg /tmp/*\n\n# Ports\nEXPOSE 80/tcp 443/tcp\n\n# Startup Command\nCMD [\"/usr/sbin/httpd\", \"-DFOREGROUND\", \"-f\", \"/etc/apache2/httpd.conf\"]\n" }, { "alpha_fraction": 0.5316358208656311, "alphanum_fraction": 0.533178985118866, "avg_line_length": 13.727272987365723, "blob_id": "2134f3cf0230902e5e900effca2ffc960f5de34d", "content_id": "1c544f464009fa98108855ee4a10af5aa6326431", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1296, "license_type": "permissive", "max_line_length": 65, "num_lines": 88, "path": "/orchestrator/core/orc_server/es_mirror/field.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom elasticsearch_dsl.field import (\n Boolean,\n Binary,\n Byte,\n Completion,\n CustomField,\n Date,\n DateRange,\n Double,\n DoubleRange,\n Field,\n Float,\n FloatRange,\n GeoPoint,\n GeoShape,\n HalfFloat,\n Integer,\n IntegerRange,\n Ip,\n IpRange,\n Join,\n Keyword,\n Long,\n LongRange,\n Murmur3,\n Nested,\n Object,\n Percolator,\n RangeField,\n RankFeature,\n ScaledFloat,\n Short,\n Text,\n TokenCount\n)\n\n\nclass UUID(Text):\n name = 'uuid'\n _coerce = True\n\n def _deserialize(self, data):\n return str(data)\n\n def _serialize(self, data):\n if data is None:\n return None\n return uuid.UUID(data) if isinstance(data, str) else data\n\n\n__all__ = [\n 'Boolean',\n 'Binary',\n 'Byte',\n 'Completion',\n 'CustomField',\n 'Date',\n 'DateRange',\n 'Double',\n 'DoubleRange',\n 'Field',\n 'Float',\n 'FloatRange',\n 'GeoPoint',\n 'GeoShape',\n 'HalfFloat',\n 'Integer',\n 'IntegerRange',\n 'Ip',\n 'IpRange',\n 'Join',\n 'Keyword',\n 'Long',\n 'LongRange',\n 'Murmur3',\n 'Nested',\n 'Object',\n 'Percolator',\n 'RangeField',\n 'RankFeature',\n 'ScaledFloat',\n 'Short',\n 'Text',\n 'TokenCount',\n 'UUID'\n]\n" }, { "alpha_fraction": 0.5386533737182617, "alphanum_fraction": 0.5386533737182617, "avg_line_length": 21.27777862548828, "blob_id": "c3e8397010d6735db75b10d56d69bd277139c497", "content_id": "ab79e22bd339ece175f56cd20aa88aa41a6afe2f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 401, "license_type": "permissive", "max_line_length": 65, "num_lines": 18, "path": "/logger/gui/src/components/utils/theme-switcher/theme-actions.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for Theme endpoints\nconst baseAPI = '/assets/css/'; // ${theme}.css`\n\n// API Calls\n// GET - '/assets/css/{theme}.css'\nexport const loadTheme = async theme => {\n return fetch(`${window.location.origin}${baseAPI}${theme}.css`)\n .then(rsp => rsp.text())\n .then(data => ({\n theme,\n styles: data\n }))\n .catch(err => ({\n err,\n theme,\n styles: ''\n }));\n};\n" }, { "alpha_fraction": 0.7012711763381958, "alphanum_fraction": 0.7012711763381958, "avg_line_length": 22.600000381469727, "blob_id": "1c732ab53dc14d82ecd2f88a67bb4972c1a193c1", "content_id": "ca1d7c8dbfb6a618d07fc475c13147c7922ce1fe", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "permissive", "max_line_length": 50, "num_lines": 20, "path": "/orchestrator/gui/server/gui_server/utils/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .general import prefixUUID, safe_cast, to_str\nfrom .model import get_or_none, ReadOnlyModelAdmin\nfrom .orchestrator_api import OrchestratorAPI\nfrom .permissions import IsAdminOrIsSelf\nfrom .schema import OrcSchema\n\nfrom sb_utils import FrozenDict, safe_cast\n\n__all__ = [\n 'FrozenDict',\n 'get_or_none',\n 'IsAdminOrIsSelf',\n 'OrchestratorAPI',\n 'OrcSchema',\n 'prefixUUID',\n 'ReadOnlyModelAdmin',\n 'safe_cast',\n 'schema_merge',\n 'to_str'\n]\n" }, { "alpha_fraction": 0.5823540091514587, "alphanum_fraction": 0.5877634882926941, "avg_line_length": 26.63401985168457, "blob_id": "3a585d59df2c295de8e61efdb319c434d39f24f1", "content_id": "aedc6077538db01f050cc018088fcf60cb6461fe", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5361, "license_type": "permissive", "max_line_length": 199, "num_lines": 194, "path": "/orchestrator/gui/client/src/components/device/lib/transport.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport PropTypes from 'prop-types'\nimport { connect } from 'react-redux'\nimport { toast } from 'react-toastify'\n\nimport {\n Button,\n Modal,\n ModalBody,\n ModalFooter,\n ModalHeader\n} from 'reactstrap'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faTimes } from '@fortawesome/free-solid-svg-icons'\n\nimport * as DeviceActions from '../../../actions/device'\nimport { withGUIAuth } from '../../../actions/util'\n\nclass Transport extends Component {\n constructor(props, context) {\n super(props, context)\n this.checkboxChange = this.checkboxChange.bind(this)\n this.transportRemove = this.transportRemove.bind(this)\n this.transportChange = this.transportChange.bind(this)\n\n\n this.state = {\n host: '127.0.0.1',\n port: 8080,\n protocol: 'HTTPS',\n serialization: ['JSON'],\n ...this.props.data\n }\n }\n\n componentDidMount() {\n this.mounted = true;\n }\n\n componentWillUnmount() {\n this.mounted = false;\n }\n\n shouldComponentUpdate(nextProps, nextState) {\n let props_update = this.props != nextProps\n let state_update = this.state != nextState\n\n if (props_update && this.mounted) {\n setTimeout(() => this.setState(this.props.data), 10)\n }\n\n return props_update || state_update\n }\n\n checkboxChange(e) {\n const name = e.target.name\n const item = e.target.id.replace(/^checkbox_\\d+_/, '')\n\n let tmpVal = this.state[name]\n let index = tmpVal.indexOf(item)\n\n if (e.target.checked) {\n if (index === -1) tmpVal.push(item);\n } else {\n if (index >= 0 && tmpVal.length > 1) tmpVal.splice(index, 1);\n }\n\n this.setState(prevState => ({\n [name]: tmpVal\n }), () => {\n this.props.change(this.state, this.props.index)\n })\n }\n\n transportRemove(e) {\n e.preventDefault()\n this.props.remove(this.props.index)\n }\n\n transportChange(e, reset=false) {\n let tmpState = {}\n if (reset) {\n tmpState = e\n } else {\n tmpState[e.target.name] = e.target.value\n }\n\n this.setState(\n tmpState,\n () => {\n this.props.change(this.state, this.props.index)\n }\n )\n }\n\n transportPubSub() {\n let protocols = Object.keys(this.props.orchestrator.protocols).map((p, i) => <option key={ i } value={ p }>{ p }</option> )\n let pub_sub = this.props.orchestrator.protocols[this.state.protocol]\n let chan_top = ''\n let columns = 'col-6'\n\n if (pub_sub) {\n columns = 'col-md-4 col-sm-12'\n chan_top = [(\n <div key={ 0 } className={ \"form-group \" + columns }>\n <label htmlFor=\"topic\">Topic</label>\n <input type=\"text\" className=\"form-control\" name='topic' value={ this.state.topic } onChange={ this.transportChange } />\n </div>), (\n <div key={ 1 } className={ \"form-group \" + columns }>\n <label htmlFor=\"channel\">Channel</label>\n <input type=\"text\" className=\"form-control\" name='channel' value={ this.state.channel } onChange={ this.transportChange } />\n </div>\n )]\n }\n\n return (\n <div className=\"form-row\">\n <div className={ \"form-group \" + columns }>\n <label htmlFor=\"protocol\">Protocol</label>\n <select className=\"form-control\" name='protocol' value={ this.state.protocol } onChange={ this.transportChange } >\n { protocols }\n </select>\n </div>\n { chan_top }\n </div>\n )\n }\n\n render() {\n let serializations = this.props.orchestrator.serializations.map((s, i) => (\n <div key={ i } className=\"form-check-inline\">\n <label className=\"form-check-label\">\n <input id={ `checkbox_${i}_${s}` } className=\"form-check-input\" name='serialization' type=\"checkbox\" checked={ this.state.serialization.indexOf(s) >= 0 } onChange={ this.checkboxChange } />\n { s }\n </label>\n </div>\n ))\n\n return (\n <div className='border mb-2 p-2'>\n <Button color=\"danger\" size='sm' className='float-right' onClick={ this.transportRemove } >\n <FontAwesomeIcon\n icon={ faTimes }\n />\n </Button>\n <div className=\"form-row\">\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"host\">Host</label>\n <input type=\"text\" className=\"form-control\" name='host' value={ this.state.host } onChange={ this.transportChange } />\n </div>\n\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"port\">Port</label>\n <input type=\"text\" className=\"form-control\" name='port' value={ this.state.port } onChange={ this.transportChange } />\n </div>\n </div>\n\n { this.transportPubSub() }\n\n <div className=\"form-row\">\n <div className=\"form-group col-12\">\n <div>\n <p>Serializations</p>\n </div>\n { serializations }\n </div>\n </div>\n </div>\n )\n }\n}\n\nTransport.propTypes = {\n data: PropTypes.object,\n change: PropTypes.func,\n remove: PropTypes.func,\n};\n\nTransport.defaultProps = {\n data: {},\n change: (d, i) => {},\n remove: (i) => {}\n};\n\nconst mapStateToProps = (state) => ({\n orchestrator: {\n // ...state.Orcs.selected,\n protocols: state.Util.protocols,\n serializations: state.Util.serializations,\n }\n})\n\nexport default connect(mapStateToProps)(Transport)\n" }, { "alpha_fraction": 0.686450183391571, "alphanum_fraction": 0.6886898279190063, "avg_line_length": 23.135135650634766, "blob_id": "900b0ef7d5c5be44de1a02ef599a71ceb36911e9", "content_id": "8bce4fcf9571fd86272d1dfe25c4c426a2ad1d1c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "permissive", "max_line_length": 49, "num_lines": 37, "path": "/orchestrator/gui/server/gui_server/webApp/templatetags/jsonify.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\n\nfrom django.core.serializers import serialize\nfrom django.db.models.query import QuerySet\nfrom django.utils.safestring import mark_safe\nfrom django.template import Library\n\nregister = Library()\n\n\[email protected](is_safe=True)\ndef jsonify(val):\n \"\"\"\n JSON stringify the given value\n :param val: object to JSON stringify\n :return: stringified JSON\n \"\"\"\n if isinstance(val, QuerySet):\n return mark_safe(serialize('json', val))\n\n return mark_safe(json.dumps(val))\n\n\[email protected]\ndef pretty_json(val, ind=2):\n \"\"\"\n Pretty format JSON data\n :param val: Key/Value object\n :param ind: spaces to use as indent\n :return: pretty formatted key/value object\n \"\"\"\n if not isinstance(val, dict):\n val = json.loads(val)\n return mark_safe(json.dumps(val, indent=ind))\n" }, { "alpha_fraction": 0.659649133682251, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 19.35714340209961, "blob_id": "0a64fb6d1f1b139eb7ecf8de993f30da34a900ea", "content_id": "f07ab16c2f93e1615ceba4fab0cceba930b73ead", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "permissive", "max_line_length": 60, "num_lines": 14, "path": "/orchestrator/core/orc_server/conformance/tests/slpf_tests.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nOpenC2 Stateless Packet Filtering Profile (SLPF) Conformance\n\"\"\"\nfrom test_setup import SetupTestCase\n\n\nclass SLPF_UnitTests(SetupTestCase):\n \"\"\"\n SLPF OpenC2 Conformance Tests\n \"\"\"\n profile = \"SLPF\"\n\n def test_allow_ip(self):\n print(\"Test SLPF Allow:IP...\")\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 16.14285659790039, "blob_id": "ae72f6e102bedc56472a5f7daca2f21951a774f9", "content_id": "4811da21a14798ff3aa3bbae76a114c555e1324b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 119, "license_type": "permissive", "max_line_length": 38, "num_lines": 7, "path": "/orchestrator/gui/client/src/components/utils/theme-switcher/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import ThemeSwitcher from './switcher'\nimport ThemeChooser from './chooser'\n\nexport {\n ThemeChooser,\n ThemeSwitcher\n}" }, { "alpha_fraction": 0.5179772973060608, "alphanum_fraction": 0.5214465856552124, "avg_line_length": 32.14285659790039, "blob_id": "acc9e297a9274d461a9087bb6449316392faa92b", "content_id": "f0846c89a761486773c59ab7e7c1210e596e7dcb", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9512, "license_type": "permissive", "max_line_length": 113, "num_lines": 287, "path": "/base/modules/utils/root/sb_utils/ext_dicts.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import copy\n\nfrom typing import (\n Any,\n List,\n Sequence\n)\n\nfrom .general import safe_cast\n\n\nclass ObjectDict(dict):\n \"\"\"\n Dictionary that acts like a object\n d = ObjectDict()\n\n d['key'] = 'value'\n SAME AS\n d.key = 'value'\n \"\"\"\n\n def __getattr__(self, key: Any) -> Any:\n \"\"\"\n Get an key as if an attribute - ObjectDict.key - SAME AS - ObjectDict['key']\n :param key: key to get value of\n :return: value of given key\n \"\"\"\n if key in self:\n return self[key]\n raise AttributeError(f\"No such attribute {key}\")\n\n def __setattr__(self, key: Any, val: Any) -> None:\n \"\"\"\n Set an key as if an attribute - d.key = 'value' - SAME AS - d['key'] = 'value'\n :param key: key to create/override\n :param val: value to set\n :return: None\n \"\"\"\n self[key] = self.__class__(val) if isinstance(val, dict) else val\n\n def __delattr__(self, key: Any) -> None:\n \"\"\"\n Remove a key as if an attribute - del d.key - SAME AS - del d['key']\n :param key: key to remove/delete\n :return: None\n \"\"\"\n if key in self:\n del self[key]\n else:\n raise AttributeError(f\"No such attribute: {key}\")\n\n\nclass FrozenDict(ObjectDict):\n \"\"\"\n Immutable/Frozen dictionary\n \"\"\"\n _hash: hash\n\n def __hash__(self) -> hash:\n \"\"\"\n Create a hash for the FrozenDict\n :return: object hash\n \"\"\"\n if self._hash is None:\n self._hash = hash(tuple(sorted(self.items())))\n return self._hash\n\n def _immutable(self, *args, **kwargs) -> None:\n \"\"\"\n Raise an error for an attempt to alter the FrozenDict\n :param args: positional args\n :param kwargs: key/value args\n :return: None\n :raise TypeError\n \"\"\"\n raise TypeError('cannot change object - object is immutable')\n\n __setitem__ = _immutable\n __delitem__ = _immutable\n pop = _immutable\n popitem = _immutable\n clear = _immutable\n update = _immutable\n setdefault = _immutable\n\n\nclass QueryDict(ObjectDict):\n \"\"\"\n Nested Key traversal dictionary\n d = QueryDict()\n\n d['192']['168']['1']['100'] = 'test.domain.local'\n SAME AS\n d['192.168.1.100'] = 'test.domain.local'\n \"\"\"\n separator: str = \".\"\n\n def __init__(self, seq: Sequence = None, **kwargs) -> None:\n \"\"\"\n Initialize an QueryDict\n :param seq: initial Sequence data\n :param kwargs: key/value parameters\n \"\"\"\n if seq:\n ObjectDict.__init__(self, seq, **kwargs)\n else:\n ObjectDict.__init__(self, **kwargs)\n\n for k, v in self.items():\n if isinstance(v, dict) and not isinstance(v, self.__class__):\n ObjectDict.__setitem__(self, k, QueryDict(v))\n elif isinstance(v, (list, tuple)):\n ObjectDict.__setitem__(self, k, type(v)(QueryDict(i) if isinstance(i, dict) else i for i in v))\n\n # Override Functions\n def get(self, path: str, default: Any = None, sep: str = None) -> Any:\n \"\"\"\n Get a key/path from the QueryDict\n :param path: key(s) to get the value of separated by the separator character\n :param default: default value if the pey/path is not found\n :param sep: separator character to use, default - '.'\n :return: value of key/path or default\n \"\"\"\n sep = sep if sep else self.separator\n path = self._pathSplit(str(path), sep)\n\n if any(k.startswith(sep.join(path)) for k in self.compositeKeys()):\n rtn = self\n for key in path:\n if isinstance(rtn, ObjectDict):\n rtn = ObjectDict.__getitem__(rtn, key)\n elif isinstance(rtn, (list, tuple)) and len(rtn) > safe_cast(key, int, key):\n rtn = rtn[safe_cast(key, int, key)]\n else:\n raise AttributeError(f\"Unknown type {type(rtn)}, cannot get value\")\n return rtn\n return default\n\n def set(self, path: str, val: Any, sep: str = None) -> None:\n \"\"\"\n Set a key/path in the QueryDict object\n :param path: key/path to set\n :param val: value to set\n :param sep: separator character to use, default - '.'\n :return: None\n \"\"\"\n sep = sep if sep else self.separator\n keys = self._pathSplit(str(path), sep)\n\n if isinstance(val, dict):\n val = QueryDict(val)\n elif isinstance(val, (list, tuple)):\n val = type(val)(QueryDict(i) if isinstance(i, dict) else i for i in val)\n\n obj = self\n for idx, key in enumerate(keys):\n key = safe_cast(key, int, key)\n next_key = safe_cast(keys[idx + 1], int, keys[idx + 1]) if len(keys) > idx + 1 else \"\"\n end = len(keys) == idx + 1\n\n if end:\n if isinstance(obj, list) and isinstance(key, int):\n if len(obj) <= key:\n obj.append(val)\n else:\n obj[key] = val\n elif isinstance(obj, ObjectDict):\n ObjectDict.__setitem__(obj, key, val)\n else:\n print(f\"Other - {type(obj)}\")\n\n elif key in obj:\n obj = obj[key]\n elif isinstance(obj, list) and isinstance(key, int):\n if len(obj) <= key:\n obj.append([] if isinstance(next_key, int) else ObjectDict())\n obj = obj[-1]\n else:\n obj = obj[key]\n elif isinstance(obj, ObjectDict):\n obj = obj.setdefault(key, [] if isinstance(next_key, int) else ObjectDict())\n else:\n obj = obj.setdefault(key, [] if isinstance(next_key, int) else ObjectDict())\n\n def delete(self, path: str, sep: str = None) -> None:\n \"\"\"\n Delete a key/path in the QueryDict object\n :param path: key/path to delete\n :param sep: separator character to use, default - '.'\n :return: None\n \"\"\"\n sep = sep if sep else self.separator\n path = self._pathSplit(path, sep)\n\n if any(k.startswith(sep.join(path)) for k in self.compositeKeys()):\n ref = self\n for idx, key in enumerate(path):\n key = safe_cast(key, int, key)\n end = len(path) == idx + 1\n\n if end:\n if isinstance(ref, list) and isinstance(key, int):\n if len(ref) > key:\n ref.remove(ref[key])\n elif isinstance(ref, ObjectDict):\n ObjectDict.__delitem__(ref, key)\n else:\n print(f\"Other - {type(ref)}\")\n\n elif key in ref:\n ref = ref[key]\n elif isinstance(ref, list) and isinstance(key, int):\n if len(ref) > key:\n ref = ref[key]\n else:\n raise KeyError(f\"{sep.join(path[:idx])} does not exist\")\n\n else:\n print(f\"Other - {type(ref)}\")\n\n def __contains__(self, path: str) -> bool:\n \"\"\"\n Verify if a key is in the MultiKeyDict - 'key0' in d and 'key1' in d['key0'] - SAME AS - 'key0.key1' in d\n :param path: path to verify if contained\n :return: if MultiKeyDict contains the given key\n \"\"\"\n keys = self._pathSplit(path)\n return path in self._compositeKeys(self) if len(keys) > 1 else ObjectDict.__contains__(self, path)\n\n def __deepcopy__(self, memo):\n \"\"\"\n Copy the QueryDict without referencing the original data\n :param memo: ...\n :return: copy of QueryDict\n \"\"\"\n return QueryDict(copy.deepcopy(dict(self), memo))\n\n __getattr__ = get\n __getitem__ = get\n\n __setattr__ = set\n __setitem__ = set\n\n __delattr__ = delete\n __delitem__ = delete\n\n # Custom Functions\n def compositeKeys(self, sep: str = None) -> List[str]:\n \"\"\"\n Compiled list of keys\n :param sep: key separator character\n :return: list of composite keys\n \"\"\"\n sep = sep if sep else self.separator\n return self._compositeKeys(self, sep)\n\n # Helper Functions\n def _compositeKeys(self, obj: Any, sep: str = None) -> List[str]:\n \"\"\"\n Determine the composite keys of the given object\n :param obj: object to get the composite keys\n :param sep: path separator character\n :return: list of keys\n \"\"\"\n sep = sep if sep else self.separator\n rtn = []\n key_vals = {}\n if isinstance(obj, self.__class__):\n key_vals = obj.items()\n elif isinstance(obj, (list, tuple)):\n key_vals = enumerate(obj)\n\n for key, val in key_vals:\n val_keys = self._compositeKeys(val, sep)\n rtn.extend([f\"{key}{sep}{k}\" for k in val_keys] if len(val_keys) > 0 else [key])\n\n return rtn\n\n def _pathSplit(self, path: str, sep: str = None) -> List[str]:\n \"\"\"\n Split the path based on the separator character\n :param path: path to split\n :param sep: separator character\n :return: list of separated keys\n \"\"\"\n sep = sep if sep else self.separator\n return list(filter(None, path.split(sep)))\n" }, { "alpha_fraction": 0.6171761155128479, "alphanum_fraction": 0.6171761155128479, "avg_line_length": 23.535715103149414, "blob_id": "364508ac27ed91ca186fa223bc73ebb2cffdb5be", "content_id": "58220df6e2c0d287f4c455f8da5e2b35ae9a4745", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 687, "license_type": "permissive", "max_line_length": 84, "num_lines": 28, "path": "/orchestrator/gui/client/src/actions/util.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for utility endpoints\nimport { RSAA } from 'redux-api-middleware';\n\nconst str_fmt = require('string-format')\n\n// Helper Functions\nexport const withGUIAuth = (headers={}) => {\n return (state) => ({\n ...headers,\n 'Authorization': str_fmt('JWT {token}', {token: state.Auth.access.token || ''}),\n 'Content-Type': 'application/json'\n })\n}\n\n// API Calls\n// GET - /api/\nconst INFO_REQUEST = '@@util/INFO_REQUEST';\nexport const INFO_SUCCESS = '@@util/INFO_SUCCESS';\nexport const INFO_FAILURE = '@@util/INFO_FAILURE';\nexport const info = () => ({\n [RSAA]: {\n endpoint: '/api/',\n method: 'GET',\n types: [\n INFO_REQUEST, INFO_SUCCESS, INFO_FAILURE\n ]\n }\n})\n" }, { "alpha_fraction": 0.5584415793418884, "alphanum_fraction": 0.6233766078948975, "avg_line_length": 24.66666603088379, "blob_id": "3176b1fe573c4d25fd022e2bde4878ec9997eb8e", "content_id": "4931c8842f56ca74d200eb0f804186282b1665e5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "permissive", "max_line_length": 123, "num_lines": 18, "path": "/orchestrator/core/orc_server/device/migrations/0003_device_multi_actuator.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-05-17 14:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('device', '0002_auto_20190416_1225'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='device',\n name='multi_actuator',\n field=models.BooleanField(default=True, help_text='Device can have multiple actuators or is its own actuator'),\n ),\n ]\n" }, { "alpha_fraction": 0.7040673494338989, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 12.471697807312012, "blob_id": "ff2870b299905b4f1a814af1b597ef7ad8dd66de", "content_id": "6b0978a95a8e30c1da09cae652947f8ebdc8a208", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 713, "license_type": "permissive", "max_line_length": 47, "num_lines": 53, "path": "/orchestrator/gui/client/src/components/utils/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import InputField from './inputField'\nimport RemotePageTable from './remotePageTable'\n\nimport {\n mergeByProperty,\n updateArray\n} from './array'\n\nimport {\n checkSchema,\n safeGet,\n sleep,\n titleCase\n} from './general'\n\nimport {\n FormatJADN\n} from './jadn'\n\nimport {\n delMultiKey,\n getMultiKey,\n setMultiKey\n} from './multiKey'\n\nimport {\n ThemeChooser,\n ThemeSwitcher\n} from './theme-switcher'\n\nimport {\n generateUUID4,\n validateUUID4\n} from './uuid'\n\nexport {\n checkSchema,\n delMultiKey,\n FormatJADN,\n generateUUID4,\n getMultiKey,\n InputField,\n mergeByProperty,\n RemotePageTable,\n safeGet,\n setMultiKey,\n sleep,\n ThemeChooser,\n ThemeSwitcher,\n titleCase,\n updateArray,\n validateUUID4\n}" }, { "alpha_fraction": 0.6235242486000061, "alphanum_fraction": 0.6440752148628235, "avg_line_length": 24.10988998413086, "blob_id": "c9d9c7e4415b23decea3a52e294871488fadd832", "content_id": "f648c12efd3c3c806be74cd227c1d2d692d2566d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2287, "license_type": "permissive", "max_line_length": 116, "num_lines": 91, "path": "/orchestrator/gui/server/gui_server/webApp/views/handlers.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\n\nfrom django.http import HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\n\nfrom tracking import log\n\n_browser_search = (\n \"IE\",\n \"Firefox\",\n \"Chrome\",\n \"Opera\",\n \"Safari\"\n)\n\n\ndef is_browser(request):\n user_agent = request.META.get('HTTP_USER_AGENT', '')\n return any(s in user_agent for s in _browser_search)\n\n\ndef get_error_msg(exception):\n exception_repr = exception.__class__.__name__\n\n try:\n message = exception.args[0]\n except (AttributeError, IndexError):\n pass\n else:\n if isinstance(message, str):\n exception_repr = message\n\n return exception_repr\n\n\ndef exception_response(request, code=400, exception=None, *args, **kwargs):\n code = code if code in [400, 403, 404, 500] else 400\n\n exception_repr = get_error_msg(exception)\n log.error(usr=request.user, msg=f'{code} - {exception_repr}')\n\n context = dict(\n message='Error 400 - Bad Request',\n request_path=request.path,\n exception=exception_repr\n )\n\n if is_browser(request):\n template = loader.get_template(f'error/{code}.html')\n rtn = dict(\n content=template.render(context, request),\n content_type='text/html'\n )\n else:\n rtn = dict(\n content=json.dumps(context),\n content_type='application/json'\n )\n\n return rtn\n\n\ndef bad_request(request, exception, *args, **kwargs):\n \"\"\"\n Catch all 400 - Bad Request\n \"\"\"\n return HttpResponseBadRequest(**exception_response(request, 400, exception))\n\n\ndef permission_denied(request, exception, *args, **kwargs):\n \"\"\"\n Catch all 403 - Forbidden/Permission Denied\n \"\"\"\n return HttpResponseForbidden(**exception_response(request, 400, exception))\n\n\ndef page_not_found(request, exception, *args, **kwargs):\n \"\"\"\n Catch all 404 - Not Found\n \"\"\"\n return HttpResponseNotFound(**exception_response(request, 400, exception))\n\n\ndef server_error(request, *args, **kwargs):\n \"\"\"\n Catch all 500 - Server Error\n \"\"\"\n return HttpResponseServerError(**exception_response(request, 400, Exception('Server Error')))\n\n\n" }, { "alpha_fraction": 0.6864035129547119, "alphanum_fraction": 0.6864035129547119, "avg_line_length": 25.823530197143555, "blob_id": "d04268667fa675aa56108da35285c27e4312f32e", "content_id": "36fc829c1655fce06b5b106c804805055bd190ca", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "permissive", "max_line_length": 87, "num_lines": 34, "path": "/orchestrator/core/orc_server/command/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import SentHistory, ResponseHistory\n\n\nclass ResponseInline(admin.TabularInline):\n \"\"\"\n Command Response InLine admin\n \"\"\"\n model = ResponseHistory\n readonly_fields = ('command', 'received_on', 'actuator', 'response', 'received_on')\n\n\nclass SentHistoryAdmin(admin.ModelAdmin):\n \"\"\"\n Command Sent admin\n \"\"\"\n list_display = ('command_id', '_coap_id', 'user', 'received_on', 'command')\n filter_horizontal = ('actuators', )\n readonly_fields = ('received_on', 'actuators')\n inlines = [ResponseInline, ]\n\n\nclass ResponseHistoryAdmin(admin.ModelAdmin):\n \"\"\"\n Command Response admin\n \"\"\"\n list_display = ('command', 'received_on', 'actuator', 'response')\n readonly_fields = ('received_on', )\n\n\n# Register models\nadmin.site.register(SentHistory, SentHistoryAdmin)\nadmin.site.register(ResponseHistory, ResponseHistoryAdmin)\n" }, { "alpha_fraction": 0.6483516693115234, "alphanum_fraction": 0.6483516693115234, "avg_line_length": 14.166666984558105, "blob_id": "fe8d047781969e63161f16bc9783e483000f5d56", "content_id": "e02ad50efb7f7f3b88247451caedecb74cd8ae75", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "permissive", "max_line_length": 37, "num_lines": 6, "path": "/orchestrator/core/orc_server/actuator/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .viewsets import ActuatorViewSet\n\n__all__ = [\n # Viewsets\n 'ActuatorViewSet',\n]\n" }, { "alpha_fraction": 0.5477325916290283, "alphanum_fraction": 0.5520567893981934, "avg_line_length": 36.89495849609375, "blob_id": "c776ff7f8c3e5b88490526e32dbedda3f31a1de7", "content_id": "5f7283d88bd1cb2f6cbffb06cd921e96ba925d68", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9019, "license_type": "permissive", "max_line_length": 254, "num_lines": 238, "path": "/orchestrator/gui/client/src/components/admin/lib/userModal.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { toast } from 'react-toastify'\n\nimport {\n Button,\n Modal,\n ModalBody,\n ModalFooter,\n ModalHeader\n} from 'reactstrap'\n\nimport * as AccountActions from '../../../actions/account'\nimport { withGUIAuth } from '../../../actions/util'\n\nclass UserModal extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.toggleModal = this.toggleModal.bind(this)\n this.registerAccount = this.registerAccount.bind(this)\n this.saveAccount = this.saveAccount.bind(this)\n\n this.register = this.props.register == true\n\n this.defaultState = {\n account: {\n first_name: \"\",\n last_name: \"\",\n username: \"\",\n email: \"\",\n is_active: false,\n is_staff: false,\n auth_groups: [],\n actuator_groups: [],\n device_groups: []\n },\n password: {\n pass_1: \"\",\n pass_2: \"\"\n }\n }\n\n this.state = {\n modal: false,\n ...this.defaultState\n }\n }\n\n componentDidMount() {\n if (this.props.register) {\n this.setState({\n ...this.defaultState\n })\n\n } else if (this.props.data) {\n this.setState({\n ...this.defaultState,\n account: {\n first_name: this.props.data.first_name,\n last_name: this.props.data.last_name,\n username: this.props.data.username,\n email: this.props.data.email,\n is_active: this.props.data.is_active || false,\n is_staff: this.props.data.is_staff || false,\n auth_groups: this.props.data.auth_groups,\n actuator_groups: this.props.data.actuator_groups,\n device_groups: this.props.data.device_groups\n }\n })\n }\n }\n\n toggleModal() {\n this.setState(prevState => ({\n modal: !prevState.modal,\n actuator: {\n ...this.defaultActuator,\n ...(this.register ? {} : this.props.data)\n }\n }))\n }\n\n registerAccount() {\n console.log(\"register Account\")\n if (this.state.password.pass_1 == this.state.password.pass_2 && this.state.password.pass_1.length >= 8) {\n let account = {\n ...this.state.account,\n password: this.state.password.pass_1\n }\n\n Promise.resolve(this.props.createAccount(account)).then(() => {\n setTimeout(() => {\n let errs = this.props.errors[AccountActions.CREATE_ACCOUNT_FAILURE] || {}\n if (Object.keys(errs).length == 0) {\n this.toggleModal()\n } else {\n if (errs.hasOwnProperty('non_field_errors')) {\n Object.values(errs).forEach(err => {\n toast(<p>Error: { err }</p>, {type: toast.TYPE.WARNING})\n })\n } else {\n Object.keys(errs).forEach(err => {\n toast(<div><p>Error { err }:</p><p>{ errs[err] }</p></div>, {type: toast.TYPE.WARNING})\n })\n }\n }\n }, 500)\n })\n } else {\n toast(<div><p>Error:</p><p>Passwords do not match or are less than 8 characters</p></div>, {type: toast.TYPE.WARNING})\n }\n }\n\n saveAccount() {\n console.log(\"Save Account\")\n Promise.resolve(this.props.updateAccount(this.state.account.username, this.state.account)).then(() => {\n setTimeout(() => {\n let errs = this.props.errors[AccountActions.UPDATE_ACCOUNT_FAILURE] || {}\n if (Object.keys(errs).length == 0) {\n this.toggleModal()\n } else {\n if (errs.hasOwnProperty('non_field_errors')) {\n Object.values(errs).forEach(err => {\n toast(<p>Error: { err }</p>, {type: toast.TYPE.WARNING})\n })\n } else {\n Object.keys(errs).forEach(err => {\n toast(<div><p>Error { err }:</p><p>{ errs[err] }</p></div>, {type: toast.TYPE.WARNING})\n })\n }\n }\n }, 500)\n })\n }\n\n passwords() {\n let match = (this.state.password.pass_1 == this.state.password.pass_2 && this.state.password.pass_1.length >= 8)\n\n return (\n <div className=\"form-row\">\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"password_1\">Password</label>\n <input type=\"password\" className={ \"form-control \" + (match ? \"is-valid\" : \"is-invalid\") } id='password_1' value={ this.state.password.pass_1 } onChange={ (e) => this.setState({ password: {...this.state.password, pass_1: e.target.value }}) } />\n </div>\n\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"password_2\">Confirm Password</label>\n <input type=\"password\" className={ \"form-control \" + (match ? \"is-valid\" : \"is-invalid\") } id='password_2' value={ this.state.password.pass_2 } onChange={ (e) => this.setState({ password: {...this.state.password, pass_2: e.target.value }}) } />\n </div>\n\n <small className='form-text text-muted'>\n <ul>\n <li>Your password can't be too similar to your other personal information.</li>\n <li>Your password must contain at least 8 characters.</li>\n <li>Your password can't be a commonly used password.</li>\n <li>Your password can't be entirely numeric.</li>\n </ul>\n </small>\n </div>\n )\n }\n\n render() {\n return (\n <div className={ 'd-inline-block ' + this.props.className }>\n <Button color='primary' size='sm' onClick={ this.toggleModal } >{ this.register ? 'Register' : 'Edit' }</Button>\n\n <Modal isOpen={ this.state.modal } toggle={ this.toggleModal } size='lg' >\n <ModalHeader toggle={ this.toggleModal }>{ this.register ? 'Register' : 'Edit' } User</ModalHeader>\n <ModalBody>\n <form onSubmit={ () => false }>\n <div className=\"form-row\">\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"first_name\">First Name</label>\n <input type=\"text\" className=\"form-control\" id='first_name' value={ this.state.account.first_name } onChange={ (e) => this.setState({ account: {...this.state.account, first_name: e.target.value }}) } />\n </div>\n\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"last_name\">Last Name</label>\n <input type=\"text\" className=\"form-control\" id='last_name' value={ this.state.account.last_name } onChange={ (e) => this.setState({ account: {...this.state.account, last_name: e.target.value }}) } />\n </div>\n </div>\n\n <div className=\"form-row\">\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"username\">Username</label>\n <input type=\"text\" className=\"form-control\" id='username' value={ this.state.account.username } onChange={ (e) => this.setState({ account: {...this.state.account, username: e.target.value }}) } />\n </div>\n\n <div className=\"form-group col-lg-6\">\n <label htmlFor=\"email\">Email</label>\n <input type=\"email\" className=\"form-control\" id='email' value={ this.state.account.email } onChange={ (e) => this.setState({ account: {...this.state.account, email: e.target.value }}) } />\n </div>\n </div>\n\n <div className=\"form-row mb-2\">\n <div className=\"col-12\">\n <div className=\"form-check-inline no_indent col-lg-3\">\n <label className=\"form-check-label\">\n <input className=\"form-check-input\" type=\"checkbox\" checked={ this.state.account.is_active } onChange={ (e) => this.setState({ account: {...this.state.account, is_active: e.target.checked }}) } />\n Active\n </label>\n </div>\n\n <div className=\"form-check-inline no_indent col-lg-3\">\n <label className=\"form-check-label\">\n <input className=\"form-check-input\" type=\"checkbox\" checked={ this.state.account.is_staff } onChange={ (e) => this.setState({ account: {...this.state.account, is_staff: e.target.checked }}) } />\n Admin\n </label>\n </div>\n </div>\n </div>\n\n { this.props.register ? this.passwords() : \"\"}\n\n </form>\n </ModalBody>\n <ModalFooter>\n <Button color=\"primary\" onClick={ this.register ? this.registerAccount : this.saveAccount }>{ this.register ? 'Register' : 'Save' }</Button>\n <Button color=\"danger\" onClick={ this.toggleModal }>Cancel</Button>\n </ModalFooter>\n </Modal>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n errors: state.Account.errors\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n createAccount: (acnt) => dispatch(AccountActions.createAccount(acnt)),\n updateAccount: (uname, acnt) => dispatch(AccountActions.updateAccount(uname, acnt))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(UserModal)\n" }, { "alpha_fraction": 0.5953778624534607, "alphanum_fraction": 0.6052824854850769, "avg_line_length": 46.824562072753906, "blob_id": "8a991b0e845e44c1c80788f7ba25311eb71cc6dd", "content_id": "e838a310777eb38b2519e93883849f0c79886059", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2726, "license_type": "permissive", "max_line_length": 195, "num_lines": 57, "path": "/orchestrator/core/orc_server/actuator/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-04 18:39\n\nimport actuator.models\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport jsonfield.fields\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('device', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Actuator',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('actuator_id', models.UUIDField(default=uuid.uuid4, help_text='Unique UUID of the actuator', unique=True)),\n ('name', models.CharField(default=actuator.models.defaultName, help_text='Unique display name of the actuator', max_length=30, unique=True)),\n ('schema', jsonfield.fields.JSONField(blank=True, help_text='Schema of the actuator', null=True)),\n ('profile', models.CharField(default='N/A', help_text='Profile of the actuator, set from the profile', max_length=60)),\n ('device', models.ForeignKey(blank=True, default=None, help_text='Device the actuator is located on', null=True, on_delete=django.db.models.deletion.CASCADE, to='device.Device')),\n ],\n ),\n migrations.CreateModel(\n name='ActuatorProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Unique name of the group', max_length=80, unique=True)),\n ('actuators', models.ManyToManyField(blank=True, help_text='Actuators of the groups profile', to='actuator.Actuator')),\n ],\n options={\n 'verbose_name': 'profile',\n 'verbose_name_plural': 'profiles',\n },\n ),\n migrations.CreateModel(\n name='ActuatorGroup',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Unique name of the group', max_length=80, unique=True)),\n ('actuators', models.ManyToManyField(blank=True, help_text='Actuators available to users in the group', to='actuator.Actuator')),\n ('users', models.ManyToManyField(blank=True, help_text='Users in the group', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'group',\n 'verbose_name_plural': 'groups',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.56977778673172, "alphanum_fraction": 0.5706666707992554, "avg_line_length": 24.011110305786133, "blob_id": "41ed6de5db937082973559f77b0adb2f2653f8bf", "content_id": "cae02678c5d92f6298738d14b694203499502a0d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2250, "license_type": "permissive", "max_line_length": 116, "num_lines": 90, "path": "/logger/gui/src/components/utils/theme-switcher/chooser.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport PropTypes from 'prop-types';\n\nconst capitalize = s => s.charAt(0).toUpperCase() + s.substring(1);\n\nclass ThemeChooser extends Component {\n constructor(props, context) {\n super(props, context);\n this.onSelect = this.onSelect.bind(this);\n\n // get themes from context and sort them for display\n this.themes = [ ...context.themes ];\n\n this.themes.sort();\n\n this.state = {\n currentTheme: this.context.currentTheme || '',\n defaultTheme: this.context.defaultTheme\n };\n }\n\n onSelect(e) {\n e.preventDefault();\n this.setState({\n currentTheme: e.target.getAttribute('data-theme')\n }, () => {\n // eslint-disable-next-line promise/catch-or-return\n this.context.themeSwitcher.load(this.state.currentTheme).then(() => {\n return this.props.change(this.state.currentTheme);\n });\n });\n }\n\n render() {\n const themes = this.themes.map(theme => {\n return (\n <li key={ theme }>\n <a\n href='#'\n className={ `dropdown-item ${theme === this.state.currentTheme ? ' active' : ''}` }\n data-theme={ theme }\n onClick={ this.onSelect }\n >\n { theme === this.state.defaultTheme ? '* ' : '' }{ capitalize(theme) }\n </a>\n </li>\n );\n });\n\n return (\n <div className='dropdown dropdown-menu-right' style={ this.props.style }>\n <button\n id='theme-menu'\n className={ `btn btn-default dropdown-toggle ${this.props.size === '' ? '' : `btn-${this.props.size}` }` }\n type='button'\n data-toggle='dropdown'\n aria-haspopup='true'\n aria-expanded='true'\n >\n Theme\n </button>\n\n <ul className='dropdown-menu'>\n { themes }\n </ul>\n </div>\n );\n }\n}\n\nThemeChooser.contextTypes = {\n defaultTheme: PropTypes.string,\n themeSwitcher: PropTypes.object,\n themes: PropTypes.array,\n currentTheme: PropTypes.string\n};\n\nThemeChooser.propTypes = {\n style: PropTypes.object,\n size: PropTypes.oneOf(['sm', 'lg', '']),\n change: PropTypes.func\n};\n\nThemeChooser.defaultProps = {\n style: {},\n size: '',\n change: () => {}\n};\n\nexport default ThemeChooser;" }, { "alpha_fraction": 0.587517261505127, "alphanum_fraction": 0.5909718871116638, "avg_line_length": 25.638036727905273, "blob_id": "5fb6f758c8f2dee58190353324008ac6af03348f", "content_id": "69d3e642affb1827fe7db7c1dc67dee8299b9309", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4342, "license_type": "permissive", "max_line_length": 89, "num_lines": 163, "path": "/orchestrator/gui/server/gui_server/tracking/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom rest_framework import serializers\n\nfrom . import _DB_LEVELS\n\n\nclass RequestLog(models.Model):\n \"\"\"\n Logs Django requests\n \"\"\"\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.SET_NULL,\n blank=True,\n help_text=\"User that requested the page\",\n null=True\n )\n requested_at = models.DateTimeField(\n db_index=True,\n default=timezone.now,\n help_text=\"Time the initial request was received\"\n )\n response_ms = models.PositiveIntegerField(\n default=0,\n help_text=\"Time it took to process the request in milliseconds\"\n )\n path = models.CharField(\n db_index=True,\n help_text=\"URL path for the request\",\n max_length=200\n )\n view = models.CharField(\n db_index=True,\n blank=True,\n help_text=\"Method that was called to process the request\",\n max_length=200,\n null=True\n )\n view_method = models.CharField(\n db_index=True,\n blank=True,\n help_text=\"HTTP Method of the request\",\n max_length=30,\n null=True,\n )\n remote_addr = models.GenericIPAddressField(\n blank=True,\n help_text=\"Remote IP Address of the system that made the requested\",\n null=True,\n )\n host = models.URLField(\n blank=True,\n help_text=\"Host of the system that received the request\",\n null=True,\n )\n method = models.CharField(\n help_text=\"HTTP Method of the request\",\n max_length=10\n )\n query_params = models.TextField(\n blank=True,\n help_text=\"Data received in the URL as Query Parameters\",\n null=True\n )\n data = models.TextField(\n blank=True,\n help_text=\"Data received in the Body/JSON of the request\",\n null=True\n )\n response = models.TextField(\n blank=True,\n help_text=\"Data sent back to the remote system\",\n null=True\n )\n errors = models.TextField(\n blank=True,\n help_text=\"Errors raised in processing the request\",\n null=True\n )\n status_code = models.PositiveIntegerField(\n blank=True,\n help_text=\"HTTP response status code\",\n null=True\n )\n\n class Meta:\n verbose_name = 'Request Log'\n\n def __str__(self):\n return f'Request - {self.method} {self.path}'\n\n\nclass EventLog(models.Model):\n \"\"\"\n Logs Specified Events\n \"\"\"\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n help_text=\"User that caused the event\",\n null=True,\n on_delete=models.SET_NULL\n )\n occurred_at = models.DateTimeField(\n default=timezone.now,\n help_text=\"Time the event occurred\"\n )\n level = models.CharField(\n choices=_DB_LEVELS,\n help_text=\"Level of severity the event\",\n max_length=1\n )\n message = models.TextField(\n blank=True,\n help_text=\"Event message\",\n null=True\n )\n\n class Meta:\n verbose_name = 'Event Log'\n\n def __str__(self):\n lvl = [l[1] for l in _DB_LEVELS if l[0] == self.level][0]\n return f'Event - {lvl} - {self.occurred_at}'\n\n\nclass RequestLogSerializer(serializers.ModelSerializer):\n \"\"\"\n Model Serializer for Logs\n \"\"\"\n user = serializers.SlugRelatedField(\n allow_null=True,\n read_only=True,\n slug_field='username'\n )\n\n requested_at = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S %z')\n remote_addr = serializers.IPAddressField()\n\n class Meta:\n model = RequestLog\n fields = ('id', 'user', 'requested_at', 'response_ms', 'path', 'status_code',\n 'view', 'view_method', 'remote_addr', 'host', 'method', 'query_params',\n 'data', 'response', 'errors', 'status_code')\n\n\nclass EventLogSerializer(serializers.ModelSerializer):\n \"\"\"\n Model Serializer for Events\n \"\"\"\n user = serializers.SlugRelatedField(\n allow_null=True,\n read_only=True,\n slug_field='username'\n )\n occurred_at = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S %z')\n\n class Meta:\n model = EventLog\n fields = ('id', 'user', 'occurred_at', 'level', 'message')\n" }, { "alpha_fraction": 0.6527777910232544, "alphanum_fraction": 0.6527777910232544, "avg_line_length": 13.399999618530273, "blob_id": "527fd95c280a8a81596735c53357034670f1878d", "content_id": "d1a0d4dba54c3631a2d0653e87e8bac250438421", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/orchestrator/core/orc_server/device/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .viewsets import DeviceViewSet\n\n__all__ = [\n 'DeviceViewSet',\n]\n" }, { "alpha_fraction": 0.5379746556282043, "alphanum_fraction": 0.5393811464309692, "avg_line_length": 27.420000076293945, "blob_id": "8441f56eda9fd74102bd9834b7cce536f807064d", "content_id": "e3430a50b0319a8911941ebb9e109c3d73be72ef", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2844, "license_type": "permissive", "max_line_length": 121, "num_lines": 100, "path": "/orchestrator/core/orc_server/conformance/tests/test_setup.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nConformance Test Setup\n\"\"\"\nimport unittest\n\n# Local imports\nfrom sb_utils import FrozenDict\n\n\nclass SetupTestSuite(unittest.TestSuite):\n \"\"\"\n Basic OpenC2 TestSuite Class\n \"\"\"\n _testKwargs: dict\n\n def __init__(self, tests: tuple = (), **kwargs):\n super(SetupTestSuite, self).__init__(tests=tests)\n self._testKwargs = kwargs\n\n def run(self, result, debug=False):\n topLevel = False\n if getattr(result, '_testRunEntered', False) is False:\n result._testRunEntered = topLevel = True\n\n for index, test in enumerate(self):\n if result.shouldStop:\n break\n\n if unittest.suite._isnotsuite(test):\n self._tearDownPreviousClass(test, result)\n self._handleModuleFixture(test, result)\n self._handleClassSetUp(test, result)\n result._previousTestClass = test.__class__\n\n if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)):\n continue\n\n if not debug:\n test(result, **self._testKwargs)\n else:\n test.debug(**self._testKwargs)\n\n if self._cleanup:\n self._removeTestAtIndex(index)\n\n if topLevel:\n self._tearDownPreviousClass(None, result)\n self._handleModuleTearDown(result)\n result._testRunEntered = False\n return result\n\n\nclass SetupTestCase(unittest.TestCase):\n \"\"\"\n OpenC2 TestCase setup class\n \"\"\"\n actuator: FrozenDict\n '''\n FrozenDict - immutable dictionary with object like attribute access\n Actuator: FrozenDict[\n actuator_id: UUIDv4\n name: str\n device: FrozenDict[\n device_id: UUIDv3\n name: str\n transport: Tuple[\n FrozenDict[\n transport_id: str,\n host: str\n port: int\n protocol: str\n serialization: List[str]\n topic: str\n channel: str\n pub_sub: bool\n ]\n ]\n nodes: str\n ] \n schema: FrozenDict\n schema_format: str -> OneOf(jadn, json)\n profile: str\n ]\n '''\n profile: str = None\n\n def __init__(self, methodName: str = 'runTest', **kwargs):\n super(SetupTestCase, self).__init__(methodName=methodName)\n self._setupKwargs(**kwargs)\n\n def _setupKwargs(self, **kwargs):\n self.actuator = kwargs.get('actuator', None)\n\n def debug(self, **kwargs):\n self._setupKwargs(**kwargs)\n super(SetupTestCase, self).debug()\n\n def __call__(self, *args, **kwargs):\n self._setupKwargs(**kwargs)\n return self.run(*args)\n\n\n" }, { "alpha_fraction": 0.6627907156944275, "alphanum_fraction": 0.6637334823608398, "avg_line_length": 47.9538459777832, "blob_id": "e8a5ed8281f265424b803adfd1ad28c1e769b398", "content_id": "5e17fffc341665aa32f67ae2a72e23bdb37e2340", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3182, "license_type": "permissive", "max_line_length": 128, "num_lines": 65, "path": "/orchestrator/core/orc_server/tracking/conf.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\nfrom django.conf import settings\n\nfrom . import LEVELS, REQUEST_LEVELS\n\n\nclass TrackingConfig(AppConfig):\n name = 'tracking'\n\n URL_PREFIXES = [\n # \"^/(?!admin)\" # Don\"t log /admin/*\n \".*\" # Log Everything\n ]\n\n EVENT_LEVELS = [l[0].upper() for l in LEVELS]\n\n REQUEST_LEVELS = [getattr(REQUEST_LEVELS, err) for err in REQUEST_LEVELS]\n\n SENSITIVE_FIELDS = []\n\n def __init__(self, app_name, app_module):\n super(TrackingConfig, self).__init__(app_name, app_module)\n self._prefix = TrackingConfig.Meta.prefix\n\n global_settings = {n: getattr(settings, n) for n in dir(settings) if n.startswith(self._prefix)}\n for s in global_settings:\n delattr(settings, s)\n\n if len(global_settings.keys()) == 1 and self._prefix in global_settings:\n global_settings = global_settings.get(self._prefix)\n\n # Validate URL Prefixes\n prefxs = global_settings.get('URL_PREFIXES', self.URL_PREFIXES)\n if not isinstance(prefxs, (list, tuple)):\n raise ValueError(f\"{self._prefix}_URL_PREFIXES is improperly formatted, expected list/tuple got {type(prefxs)}\")\n if not all(isinstance(url, str) for url in prefxs):\n raise ValueError(f\"{self._prefix}_URL_PREFIXES is improperly formatted, values should be regex strings\")\n setattr(settings, f\"{self._prefix}_URL_PREFIXES\", prefxs)\n\n # Validate Event Levels\n evt_lvls = global_settings.get('EVENT_LEVELS', self.EVENT_LEVELS)\n if not isinstance(evt_lvls, (list, tuple)):\n raise ValueError(f\"{self._prefix}_EVENT_LEVELS is improperly formatted, expected list/tuple got {type(prefxs)}\")\n if not all(isinstance(lvl, str) and len(lvl) == 1 for lvl in evt_lvls):\n raise ValueError(f\"{self._prefix}_EVENT_LEVELS is improperly formatted, values should be single character string\")\n setattr(settings, f\"{self._prefix}_EVENT_LEVELS\", evt_lvls)\n\n # Validate Request Levels\n rqst_lvls = global_settings.get('REQUEST_LEVELS', self.REQUEST_LEVELS)\n if not isinstance(rqst_lvls, (list, tuple)):\n raise ValueError(f\"{self._prefix}_REQUEST_LEVELS is improperly formatted, expected list/tuple got {type(prefxs)}\")\n if not all(isinstance(lvl, (list, range, tuple)) for lvl in rqst_lvls):\n raise ValueError(f\"{self._prefix}_REQUEST_LEVELS is improperly formatted, values should be list/range/tuple\")\n setattr(settings, f\"{self._prefix}_REQUEST_LEVELS\", rqst_lvls)\n\n # Validate Sensitive fields\n sensitive_fields = global_settings.get('SENSITIVE_FIELDS', self.SENSITIVE_FIELDS)\n if not isinstance(sensitive_fields, (list, tuple)):\n raise ValueError(f\"{self._prefix}_SENSITIVE_FIELDS is improperly formatted, expected list/tuple got {type(prefxs)}\")\n if not all(isinstance(field, str) for field in sensitive_fields):\n raise ValueError(f\"{self._prefix}_SENSITIVE_FIELDS is improperly formatted, values should be str\")\n setattr(settings, f\"{self._prefix}_SENSITIVE_FIELDS\", sensitive_fields)\n\n class Meta:\n prefix = 'TRACKING'\n" }, { "alpha_fraction": 0.5905172228813171, "alphanum_fraction": 0.5905172228813171, "avg_line_length": 30.280899047851562, "blob_id": "1efc384e3cdc2959ff0c9ec092a1019ff3a79547", "content_id": "c96de6d5e09d6f48aceb369eee46bb51fb916d00", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2784, "license_type": "permissive", "max_line_length": 72, "num_lines": 89, "path": "/orchestrator/core/orc_server/orchestrator/middleware.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.http import QueryDict\nfrom django.http.multipartparser import MultiValueDict\nfrom django.utils.deprecation import MiddlewareMixin\n\n\nclass RESTMiddleware(MiddlewareMixin):\n \"\"\"\n REST API Middleware for proper handling of REST HTTP methods\n \"\"\"\n def process_request(self, request):\n \"\"\"\n Process REST request\n :param request: request instance\n :return: None\n \"\"\"\n request.PUT = QueryDict('')\n request.DELETE = QueryDict('')\n method = request.META.get('REQUEST_METHOD', '').upper()\n if method == 'PUT':\n self.handle_PUT(request)\n elif method == 'DELETE':\n self.handle_DELETE(request)\n\n def handle_DELETE(self, request):\n \"\"\"\n Handle REST DELETE request\n :param request: request instance\n :return: None\n \"\"\"\n request.DELETE, request._files = self.parse_request(request)\n\n def handle_PUT(self, request):\n \"\"\"\n Handle REST PUT request\n :param request: request instance\n :return: None\n \"\"\"\n request.PUT, request._files = self.parse_request(request)\n if not hasattr(request, 'data'):\n request.data = dict(request.PUT.dict())\n\n def parse_request(self, request):\n \"\"\"\n Parse data sent with request\n :param request: request instance\n :return: processed request data\n \"\"\"\n if request.META.get('CONTENT_TYPE', '').startswith('multipart'):\n return self.parse_multipart(request)\n if request.META.get('CONTENT_TYPE', '').endswith('json'):\n return self.parse_json(request), MultiValueDict()\n return self.parse_form(request), MultiValueDict()\n\n def parse_json(self, request):\n \"\"\"\n Parse request as json data\n :param request: request instance\n :return: processed data\n \"\"\"\n data = QueryDict('', mutable=True)\n try:\n data.update(json.loads(request.body))\n except json.JSONDecodeError:\n if request.body not in ['', b'', None]:\n data = QueryDict(request.body)\n\n return data.copy()\n\n def parse_form(self, request):\n \"\"\"\n Parse request as form data\n :param request: request instance\n :return: processed data\n \"\"\"\n try:\n return QueryDict(request.raw_post_data)\n except AttributeError as e:\n print(f'Form Parse Error: {e}')\n return QueryDict(request.body)\n\n def parse_multipart(self, request):\n \"\"\"\n Parse request as multipart from data\n :param request: request instance\n :return: processed data\n \"\"\"\n return request.parse_file_upload(request.META, request)\n" }, { "alpha_fraction": 0.6278145909309387, "alphanum_fraction": 0.6331126093864441, "avg_line_length": 19.97222137451172, "blob_id": "125119de6bbcbea37589137cb27459e3d401f1c7", "content_id": "5205ebe85950cc6a6a85eba62db0879bea556f36", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "permissive", "max_line_length": 49, "num_lines": 72, "path": "/orchestrator/core/orc_server/command/documents.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from es_mirror.document import Document, InnerDoc\nfrom es_mirror.field import (\n Date,\n Integer,\n Nested,\n Object,\n Text\n)\n\n\nclass UserDocument(InnerDoc):\n username = Text()\n email = Text()\n\n\nclass DeviceDocument(InnerDoc):\n name = Text()\n device_id = Text()\n\n\nclass ActuatorDocument(InnerDoc):\n name = Text()\n actuator_id = Text()\n device = Object(DeviceDocument)\n\n\nclass OpenC2_CommandDocument(InnerDoc):\n action = Text()\n target = Object(dynamic=True)\n args = Object(dynamic=True)\n actuator = Object(dynamic=True)\n command_id = Text()\n\n\nclass OpenC2_ResponseDocument(InnerDoc):\n status = Integer()\n status_text = Text()\n results = Object(dynamic=True)\n\n\nclass CommandDocument(Document):\n command_id = Text()\n user = Object(UserDocument)\n received_on = Date(default_timezone='UTC')\n actuators = Nested(ActuatorDocument)\n command = Object(OpenC2_CommandDocument)\n\n class Index:\n name = 'commands'\n\n settings = {\n 'number_of_shards': 1,\n 'number_of_replicas': 0\n }\n\n\nclass ResponseDocument(Document):\n command = Text()\n received_on = Date(default_timezone='UTC')\n actuator = Object(ActuatorDocument)\n response = Object(OpenC2_ResponseDocument)\n\n class Index:\n name = 'responses'\n\n settings = {\n 'number_of_shards': 1,\n 'number_of_replicas': 0\n }\n\n def prepare_command(self, instance):\n return instance.command.command_id\n" }, { "alpha_fraction": 0.6847826242446899, "alphanum_fraction": 0.6847826242446899, "avg_line_length": 22, "blob_id": "d8a01f323e1df699b34cdcd240be03d31ee9e1a9", "content_id": "8d29a53f3c6a00f1f63b7a346a2db71ca2132ce8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "permissive", "max_line_length": 49, "num_lines": 8, "path": "/orchestrator/core/orc_server/command/views/stats.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from ..models import SentHistory, ResponseHistory\n\n\ndef app_stats():\n return dict(\n sent=SentHistory.objects.count(),\n responses=ResponseHistory.objects.count()\n )\n" }, { "alpha_fraction": 0.629363477230072, "alphanum_fraction": 0.6344969272613525, "avg_line_length": 27.902076721191406, "blob_id": "e4c95652949a4f817059b55991367901a75d1fea", "content_id": "12e635068bff37481391935e7d62c17bfa81eb81", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9740, "license_type": "permissive", "max_line_length": 105, "num_lines": 337, "path": "/orchestrator/core/orc_server/orchestrator/settings.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport re\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nDATA_DIR = os.path.join(BASE_DIR, 'data')\n\nFIXTURE_DIRS = [\n os.path.join(DATA_DIR, 'fixtures')\n]\n\nif not os.path.isdir(DATA_DIR):\n os.mkdir(DATA_DIR)\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'vcj0le7zphvkzdcmnh7)i2sd(+ba2@k4pahqss&nbbpk4cpk@y'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = not os.getenv('DJANGO_ENV') == 'prod'\n\nALLOWED_HOSTS = ['*']\n\nIP = '0.0.0.0'\n\nPORT = \"8080\"\n\nSOCKET = f'{IP}:{PORT}'\n\nAPPEND_SLASH = True\n\n# Application definition\nINSTALLED_APPS = [\n # Custom Modules - MUST BE IN DEPENDENCY ORDER!!\n 'orchestrator',\n 'es_mirror',\n 'device',\n 'actuator',\n 'account',\n 'command',\n 'conformance',\n 'backup',\n 'tracking',\n # Default Modules\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # REST API\n 'rest_framework',\n 'rest_framework.authtoken',\n # Swagger REST API View\n 'rest_framework_swagger',\n # DataTables AJAX Addin\n 'rest_framework_datatables',\n # Dynamic config from database\n 'dynamic_preferences',\n # Dynamic user config - uncomment to enable\n # 'dynamic_preferences.users.apps.UserPreferencesConfig',\n # CORS (Cross-Origin Resource Sharing)\n 'corsheaders',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'orchestrator.middleware.RESTMiddleware',\n 'tracking.middleware.LoggingMiddleware'\n]\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nROOT_URLCONF = 'orchestrator.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(DATA_DIR, 'templates')\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.request',\n 'dynamic_preferences.processors.global_preferences'\n ]\n }\n }\n]\n\nWSGI_APPLICATION = 'orchestrator.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n# MySQL/MariaDB\nDATABASES = {\n 'default': {\n 'ENGINE': 'mysql.connector.django',\n 'NAME': os.environ.get('DATABASE_NAME', 'orchestrator'),\n 'USER': os.environ.get('DATABASE_USER', 'orc_root'),\n 'PASSWORD': os.environ.get('DATABASE_PASSWORD', '0Rch35Tr@t0r'),\n 'HOST': os.environ.get('DATABASE_HOST', 'localhost'),\n 'PORT': os.environ.get('DATABASE_PORT', '3306'),\n 'CON_MAX_AGE': 5\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n {'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},\n {'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},\n {'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},\n {'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\nSTATIC_URL = '/static/'\n\n# Central location for all static files\nSTATIC_ROOT = os.path.join(DATA_DIR, \"static\")\n\nSTATICFILES_DIRS = []\n\nif DEBUG:\n # App Static Dirs\n for app in INSTALLED_APPS:\n app_static_dir = os.path.join(BASE_DIR, app, 'static')\n if os.path.isdir(app_static_dir):\n STATICFILES_DIRS.append(app_static_dir)\n\nMEDIA_URL = '/uploads/'\n\n# Email Config - maybe...\n# https://docs.djangoproject.com/en/2.0/topics/email/\n\n# Auth Config\nLOGIN_URL = '/account/login/'\n\nLOGIN_REDIRECT_URL = '/'\n\nLOGOUT_URL = '/account/logout/'\n\n# Dynamic Preferences\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences_registry'\n}\n\n# JWT\nJWT_AUTH = {\n 'JWT_SECRET_KEY': SECRET_KEY,\n 'JWT_GET_USER_SECRET_KEY': None,\n 'JWT_PUBLIC_KEY': None,\n 'JWT_PRIVATE_KEY': None,\n 'JWT_ALGORITHM': 'HS512',\n 'JWT_VERIFY': True,\n 'JWT_VERIFY_EXPIRATION': True,\n 'JWT_LEEWAY': 0,\n 'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=30),\n 'JWT_AUDIENCE': None,\n 'JWT_ISSUER': None,\n 'JWT_ALLOW_REFRESH': True,\n 'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),\n # 'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler', # Original\n 'JWT_PAYLOAD_HANDLER': 'orchestrator.jwt_handlers.jwt_payload_handler', # Custom\n 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',\n 'JWT_PAYLOAD_GET_USERNAME_HANDLER': 'rest_framework_jwt.utils.jwt_get_username_from_payload_handler',\n 'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler', # Original\n # 'JWT_RESPONSE_PAYLOAD_HANDLER': 'orchestrator.jwt_handlers.jwt_response_payload_handler', # Custom\n 'JWT_AUTH_HEADER_PREFIX': 'JWT',\n 'JWT_AUTH_COOKIE': None,\n # Not listed in docs, but in example.....\n 'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler',\n 'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler',\n}\n\n# Rest API\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n ),\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser'\n ],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.AllowAny'\n ],\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_datatables.renderers.DatatablesRenderer',\n ],\n 'DEFAULT_FILTER_BACKENDS': [\n 'rest_framework_datatables.filters.DatatablesFilterBackend',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework_datatables.pagination.DatatablesPageNumberPagination',\n 'PAGE_SIZE': 10,\n 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',\n 'DATETIME_FORMAT': \"%Y-%m-%dT%H:%M:%S.%fZ\"\n}\n\n\n# Logging\nIGNORE_LOGS = (\n r'^pyexcel_io.*',\n r'^lml.*'\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'ignore_logs': {\n '()': 'django.utils.log.CallbackFilter',\n 'callback': lambda r: not any([re.match(reg, r.name) for reg in IGNORE_LOGS])\n }\n },\n 'formatters': {\n 'requests': {\n 'format': '%{sctime} [{levelname}] {name}: {message}',\n 'style': '{',\n },\n 'stream': {\n 'format': '{levelname} {module} {message}',\n 'style': '{',\n },\n 'verbose': {\n 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',\n 'style': '{',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'stream',\n 'filters': ['ignore_logs']\n },\n 'requests': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'requests',\n 'filters': ['ignore_logs']\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['requests'],\n 'level': 'DEBUG',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n }\n}\n\n# Tracking\nfrom tracking import REQUEST_LEVELS # pylint: disable=wrong-import-position\nTRACKING = {\n 'URL_PREFIXES': [\n '^/(?!admin)' # Don't log /admin/*\n ],\n 'REQUEST_LEVELS': [\n REQUEST_LEVELS.Redirect,\n REQUEST_LEVELS.Client_Error,\n REQUEST_LEVELS.Server_Error\n ]\n}\n\n# Elasticsearch Model Mirroring\nES_MIRROR = {\n 'host': os.environ.get('ES_HOST', None),\n 'prefix': os.environ.get('ES_PREFIX', ''),\n}\n\n# Message Queue\nQUEUE = {\n 'hostname': os.environ.get('QUEUE_HOST', 'localhost'),\n 'port': os.environ.get('QUEUE_PORT', 5672),\n 'auth': {\n 'username': os.environ.get('QUEUE_USER', 'guest'),\n 'password': os.environ.get('QUEUE_PASSWORD', 'guest')\n },\n 'exchange': 'orchestrator',\n 'consumer_key': 'response',\n 'producer_exchange': 'transport'\n}\n\nMESSAGE_QUEUE = None\n\n# Valid Schema Formats\nSCHEMA_FORMATS = (\n 'jadn',\n 'json'\n)\n\n# App stats function\nSTATS_FUN = 'app_stats'\n\n# GUI Configuration\nADMIN_GUI = True\n" }, { "alpha_fraction": 0.6066176295280457, "alphanum_fraction": 0.658088207244873, "avg_line_length": 27.63157844543457, "blob_id": "fa0e83779cde3a1c662174c8ad9aca04a3d36b37", "content_id": "db0f783ff917037991e3718e1b54e2869d5edd3a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "permissive", "max_line_length": 191, "num_lines": 19, "path": "/orchestrator/core/orc_server/device/migrations/0002_auto_20190416_1225.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-16 12:25\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('device', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='transport',\n name='port',\n field=models.IntegerField(default=8080, help_text='Port of the device', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)]),\n ),\n ]\n" }, { "alpha_fraction": 0.6693548560142517, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 16.714284896850586, "blob_id": "8190dff1a6180700375e6e9375328ab0ab098c7e", "content_id": "1724702a0ee78ba8c65d2496d1994506d3be31b6", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "permissive", "max_line_length": 51, "num_lines": 7, "path": "/orchestrator/core/orc_server/conformance/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .viewsets import ConformanceViewSet, UnitTests\n\n__all__ = [\n # ViewSets\n 'ConformanceViewSet',\n 'UnitTests'\n]\n" }, { "alpha_fraction": 0.8039950132369995, "alphanum_fraction": 0.8080970048904419, "avg_line_length": 113.38775634765625, "blob_id": "f964d181c56933ba5042194c3e38e88dc4e4ab22", "content_id": "12cc91071f2658e1bc2894b54c85830a9e95c71f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5607, "license_type": "permissive", "max_line_length": 812, "num_lines": 49, "path": "/README.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# README\n\n## <a id=\"readme-general\"></a>OASIS TC Open Repository: openc2-oif-orchestrator\n\nThis GitHub public repository [openc2-oif-orchestrator](https://github.com/oasis-open/openc2-oif-orchestrator) was created at the request of the [OASIS OpenC2 Technical Committee](https://www.oasis-open.org/committees/openc2/) as an [OASIS TC Open Repository](https://www.oasis-open.org/resources/open-repositories/) to support development of open source resources related to Technical Committee work.\n\nWhile this TC Open Repository remains associated with the sponsor TC, its development priorities, leadership, intellectual property terms, participation rules, and other matters of governance are separate and distinct from the OASIS TC Process and related policies.\n\nAll contributions made to this TC Open Repository are subject to open source license terms expressed in [Apache License v 2.0](https://www.oasis-open.org/sites/www.oasis-open.org/files/Apache-LICENSE-2.0.txt). That license was selected as the declared [Applicable License](https://www.oasis-open.org/resources/open-repositories/licenses) when the TC voted to create this Open Repository.\n\nAs documented in [Public Participation Invited](https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/CONTRIBUTING.md#public-participation-invited), contributions to this TC Open Repository are invited from all parties, whether affiliated with OASIS or not. Participants must have a GitHub account, but no fees or OASIS membership obligations are required. Participation is expected to be consistent with the [OASIS TC Open Repository Guidelines and Procedures](https://www.oasis-open.org/policies-guidelines/open-repositories), the open source [LICENSE.md](LICENSE.md) designated for this particular repository, and the requirement for an [Individual Contributor License Agreement](href=\"https://www.oasis-open.org/resources/open-repositories/cla/individual-cla) that governs intellectual property.\n\n## <a id=\"purposeStatement\"></a>Statement of Purpose\n\nOpenC2 Integration Framework (OIF) is a project that will enable developers to create and test OpenC2 specifications and implementations without having to recreate an entire OpenC2 ecosystem.\n\nOIF consists of two major parts. The \"orchestrator\" which functions as an OpenC2 producer and the \"Device\" which functions as an OpenC2 consumer.\n\nThis particular repository contains the code required to set up an OpenC2 Orchestrator. The Device repository can be found [here](https://github.com/oasis-open/openc2-oif-device). Due to port bindings it is recommended that the orchestrator and the device not be run on the same machine.\n\nThe OIF Orchestrator was created with the intent of being an easy-to-configure OpenC2 producer that can be used in the creation of reference implemetations to control multiple devices. To that end it allows for the addition of multiple serializations and trasnportation types.\n\nTo get started please reference [https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/docs/Orchestrator.md](https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/docs/Orchestrator.md)\n\n**Additions to Statement of Purpose**\n\nRepository Maintainers may include here any additional sections, subsections, and paragraphs that the Maintainer(s) wish to add as descriptive text, reflecting project status, milestones, releases, modifications to statement of purpose, etc. The project Maintainers will create and maintain this content on behalf of the participants.\n\n## <a id=\"currentMaintainers\"></a>Maintainers\n\nTC Open Repository [Maintainers](https://www.oasis-open.org/resources/open-repositories/maintainers-guide) are responsible for oversight of this project's community development activities, including evaluation of GitHub [pull requests](https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/CONTRIBUTING.md#fork-and-pull-collaboration-model) and [preserving open source principles of openness and fairness](https://www.oasis-open.org/policies-guidelines/open-repositories#repositoryManagement). Maintainers are recognized and trusted experts who serve to implement community goals and consensus design preferences.\n\nInitially, the associated TC members have designated one or more persons to serve as Maintainer(s); subsequently, participating community members may [select additional or substitute Maintainers](https://www.oasis-open.org/resources/open-repositories/maintainers-guide#additionalMaintainers).\n\n*Current Maintainers of this TC Open Repository*\n\n- Danny Martinez, [email protected], [G2, Inc.](http://g2-inc.com)\n\n## <a id=\"aboutOpenRepos\"></a>About OASIS TC Open Repositories\n\n- [TC Open Repositories: Overview and Resources](https://www.oasis-open.org/resources/open-repositories)\n- [Frequently Asked Questions](https://www.oasis-open.org/resources/open-repositories/faq)\n- [Open Source Licenses](https://www.oasis-open.org/resources/open-repositories/licenses)\n- [Contributor License Agreements (CLAs)](https://www.oasis-open.org/resources/open-repositories/cla)\n- [Maintainers' Guidelines and Agreement](https://www.oasis-open.org/resources/open-repositories/maintainers-guide)\n\n## <a id=\"feedback\"></a>Feedback\n\nQuestions or comments about this TC Open Repository's activities should be composed as GitHub issues or comments. If use of an issue/comment is not possible or appropriate, questions may be directed by email to the Maintainer(s) <a href=\"#currentMaintainers\">listed above</a>. Please send general questions about TC Open Repository participation to OASIS Staff at [email protected] and any specific CLA-related questions to [email protected]. \n\n" }, { "alpha_fraction": 0.7924528121948242, "alphanum_fraction": 0.7924528121948242, "avg_line_length": 52, "blob_id": "65baab6e3435c96b49c4404ed02d4b366a710ece", "content_id": "814b85902d99d3eee83693456c1346f5d6ae8bf3", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "permissive", "max_line_length": 52, "num_lines": 1, "path": "/orchestrator/core/orc_server/es_mirror/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "default_app_config = 'es_mirror.apps.EsMirrorConfig'\n" }, { "alpha_fraction": 0.6159695982933044, "alphanum_fraction": 0.6159695982933044, "avg_line_length": 22.909090042114258, "blob_id": "c2ac4d3e30d8a6769d5095181b7819a6dcf18d5b", "content_id": "bbada3c0540325ddfc2b75fd688b3c5185bdb92a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1841, "license_type": "permissive", "max_line_length": 80, "num_lines": 77, "path": "/orchestrator/core/orc_server/tracking/log.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from . import EVENT_LEVELS, LEVEL_EVENTS\nfrom .conf import settings, TrackingConfig\nfrom .models import EventLog\n\n\ndef log(level=EVENT_LEVELS.Info, usr=None, msg=''):\n \"\"\"\n Log a message at the specified level\n :param level: level of the error\n :param usr: user that caused the message\n :param msg: message to log\n :return: None\n \"\"\"\n level = level if level in EVENT_LEVELS else EVENT_LEVELS.Info\n usr = None if getattr(usr, 'is_anonymous', True) else usr\n\n if level in getattr(settings, f\"{TrackingConfig.Meta.prefix}_EVENT_LEVELS\"):\n print(f\"{LEVEL_EVENTS.get(level, '')} Log: {usr} - {msg}\")\n EventLog.objects.create(\n user=usr,\n level=level,\n message=msg\n )\n\n\ndef debug(usr=None, msg=''):\n \"\"\"\n Log debug message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Debug, usr, msg)\n\n\ndef error(usr=None, msg=''):\n \"\"\"\n Log error message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Error, usr, msg)\n\n\ndef fatal(usr=None, msg=''):\n \"\"\"\n Log fatal message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Fatal, usr, msg)\n\n\ndef info(usr=None, msg=''):\n \"\"\"\n Log info message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Info, usr, msg)\n\n\ndef trace(usr=None, msg=''):\n \"\"\"\n Log trace message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Trace, usr, msg)\n\n\ndef warn(usr=None, msg=''):\n \"\"\"\n Log warning message\n :param usr: user that caused the message\n :param msg: message to log\n \"\"\"\n log(EVENT_LEVELS.Warn, usr, msg)\n" }, { "alpha_fraction": 0.5609493255615234, "alphanum_fraction": 0.5660733580589294, "avg_line_length": 25.869565963745117, "blob_id": "13123f3e22e03c7de9820212fd1c27c3bdcb91b0", "content_id": "059611f182fca4a40cc6b775945ab1f74cebc97b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3708, "license_type": "permissive", "max_line_length": 127, "num_lines": 138, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/arrayOf.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport { FontAwesomeIcon } from '@fortawesome/react-fontawesome'\nimport { faMinusSquare, faPlusSquare } from '@fortawesome/free-solid-svg-icons'\n\nimport {\n isOptional_jadn,\n keys,\n opts2arr,\n zip,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass ArrayOfField extends Component {\n constructor(props, context) {\n super(props, context)\n this.name = this.props.name || this.props.def.name\n this.msgName = (this.props.parent ? [this.props.parent, this.name] : [this.name]).join('.')\n this.opts = opts2arr(this.props.def.opts)\n\n this.state = {\n min: false,\n max: false,\n count: 1,\n opts: {}\n }\n }\n\n addOpt(e) {\n e.preventDefault()\n let max = this.opts.hasOwnProperty('max') ? this.opts.max : 20\n\n this.setState((prevState) => {\n let max_bool = prevState.count < max\n return {\n count: max_bool ? ++prevState.count : prevState.count,\n max: !max_bool\n }\n }, () => {\n this.props.optChange(this.msgName, Array.from(new Set(Object.values(this.state.opts))))\n })\n }\n\n removeOpt(e) {\n e.preventDefault()\n let min = this.opts.hasOwnProperty('min') ? this.opts.min : 0\n\n this.setState((prevState) => {\n let min_bool = prevState.count > min\n let opts = prevState.opts\n if (min_bool) {\n delete opts[Math.max.apply(Math, Object.keys(opts))]\n }\n\n return {\n count: min_bool ? --prevState.count : prevState.count,\n min: !min_bool,\n opts: opts\n }\n }, () => {\n this.props.optChange(this.msgName, Array.from(new Set(Object.values(this.state.opts))))\n })\n }\n\n optChange(k, v, i) {\n this.setState((prevState) => {\n return {\n opts: {\n ...prevState.opts,\n [i]: v\n }\n }\n }, () => {\n this.props.optChange(this.msgName, Array.from(new Set(Object.values(this.state.opts))))\n })\n }\n\n render() {\n let arrDef = this.props.schema.types.filter((type) => type[0] == this.opts.rtype)\n\n if (arrDef.length === 1) {\n arrDef = arrDef[0]\n arrDef = [0, arrDef[0].toLowerCase(), arrDef[0], [], arrDef[arrDef.length-2]]\n } else {\n arrDef = [0, arrDef[1], \"String\", [], \"\"]\n }\n arrDef = zip(keys.Gen_Def, arrDef)\n\n let fields = []\n for (let i=0; i < this.state.count; ++i) {\n fields.push(<Field key={ i } def={ arrDef } parent={ this.msgName } optChange={ this.optChange.bind(this) } idx={ i } />)\n }\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>\n { (isOptional_jadn(this.props.def) ? '' : '*') + this.name }\n <Button\n color=\"danger\"\n className={ 'float-right p-1' + (this.state.min ? ' disabled' : '') }\n onClick={ this.removeOpt.bind(this) }\n >\n <FontAwesomeIcon icon={ faMinusSquare } size=\"lg\"/>\n </Button>\n <Button\n color=\"primary\"\n className={ 'float-right p-1' + (this.state.max ? ' disabled' : '') }\n onClick={ this.addOpt.bind(this) }\n >\n <FontAwesomeIcon icon={ faPlusSquare } size=\"lg\"/>\n </Button>\n </legend>\n { this.props.def.desc != '' ? <FormText color=\"muted\">{ this.props.def.desc }</FormText> : '' }\n { fields }\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema,\n baseTypes: state.Generate.types.base\n})\n\nexport default connect(mapStateToProps)(ArrayOfField)\n" }, { "alpha_fraction": 0.5872990489006042, "alphanum_fraction": 0.5972668528556824, "avg_line_length": 32.80434799194336, "blob_id": "cc811a71a18d6888878b6531021c9748520f40f3", "content_id": "3a48e1963658a5734e403de2c735327b7817f0bc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6220, "license_type": "permissive", "max_line_length": 166, "num_lines": 184, "path": "/logger/server/syslog.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport logging\nimport os\nimport time\nimport urllib3\n\nfrom datetime import datetime, timezone\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.exceptions import TransportError\nfrom logging.handlers import RotatingFileHandler\nfrom syslog_rfc5424_parser import SyslogMessage, ParseError\n# https://github.com/EasyPost/syslog-rfc5424-parser\n\ntry:\n from queue import Queue\nexcept ImportError:\n import Queue\n\n# Testing\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import DatagramProtocol\nfrom twisted.internet.task import LoopingCall\n\n# Based on - https://gist.github.com/marcelom/4218010\nfrom sb_utils import ObjectDict\n\n\n\"\"\"\nTiny Syslog Server in Python.\n\nThis is a tiny syslog server that is able to receive UDP based syslog\nentries on a specified port and save them to a file.\nThat's it... it does nothing else...\nThere are a few configuration parameters.\n\"\"\"\n\n# Generic Config\nreactor.singleton = ObjectDict(\n DEFAULT_LOG='syslog.log',\n HOST=\"0.0.0.0\",\n UDP_PORT=514,\n HOST_PORT=os.environ.get('HOST_PORT', 514),\n LOG_DIR=os.path.join('/', 'var', 'log', 'syslog'),\n # NO USER SERVICEABLE PARTS BELOW HERE...\n COUNT=ObjectDict(\n RECEIVED=0,\n PROCESSED=0,\n FORWARDED=0\n ),\n LOG_COUNT=0,\n LOG_PREFIX=os.environ.get('LOG_PREFIX', 'logger'),\n # ElasticSearch Config\n ES=ObjectDict(\n HOST=os.environ.get('ES_HOST', None),\n PORT=os.environ.get('ES_PORT', '9200'),\n RETRIES=os.environ.get('ES_TRIES', 60),\n QUEUE=Queue(),\n CONN=None\n ),\n APP_LOGS={},\n LOG_LEVELS=dict(\n crit=logging.CRITICAL,\n err=logging.ERROR,\n warn=logging.WARNING,\n info=logging.INFO,\n debug=logging.DEBUG,\n notset=logging.NOTSET\n )\n)\n\n\ndef openLog(app):\n tmp_log = logging.getLogger(app)\n tmp_log.setLevel(logging.DEBUG)\n LOG_NAME = f'{app}.log' if app.startswith(f'{reactor.singleton.LOG_PREFIX}_') else f'{reactor.singleton.LOG_PREFIX}_{app}.log'\n\n # Rotatin after 10 MB\n handler = RotatingFileHandler(os.path.join(reactor.singleton.LOG_DIR, LOG_NAME), maxBytes=10485760, backupCount=5)\n tmp_log.addHandler(handler)\n reactor.singleton.APP_LOGS[app] = tmp_log\n\n\ndef connectElasticsearch():\n ES = reactor.singleton.ES\n\n if ES.HOST is not None:\n while ES.RETRIES > 0:\n try:\n http = urllib3.PoolManager()\n rsp = http.request('GET', f'{ES.HOST}:{ES.PORT}', retries=False, timeout=1.0)\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - Connected to ElasticSearch at {ES.HOST}:{ES.PORT}', flush=True)\n break\n\n except Exception as e:\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - ElasticSearch at {ES.HOST}:{ES.PORT} not up', flush=True)\n ES.RETRIES -= 1\n time.sleep(2)\n\n if ES.RETRIES > 0:\n ES_CONN = Elasticsearch(f'{ES.HOST}:{ES.PORT}')\n while True:\n if not ES.QUEUE.empty():\n msg = ES.QUEUE.get()\n try:\n rsp = ES_CONN.index(**msg)\n reactor.singleton.COUNT.FORWARDED += 1\n\n except (TransportError, Exception) as e:\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - Log Error: {e}', flush=True)\n ES.QUEUE.put(msg)\n\n else:\n time.sleep(1)\n else:\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - ElasticSearch at {ES.HOST}:{ES.PORT} not up, max retries reached', flush=True)\n reactor.singleton.ES.HOST = None\n while not ES.QUEUE.empty():\n msg = ES.QUEUE.get()\n time.sleep(0.5)\n\n\nclass SyslogUDPHandler(DatagramProtocol):\n def datagramReceived(self, data, addr):\n data = bytes.decode(data.strip())\n\n try:\n message = SyslogMessage.parse(data).as_dict()\n message['hostname'] = reactor.singleton.LOG_PREFIX\n msg = message.get('msg', None)\n\n if msg not in ['', ' ', None]:\n reactor.singleton.COUNT.RECEIVED += 1\n appName = message.get('appname', 'default')\n level = message.get('severity', 'info')\n\n if appName not in reactor.singleton.APP_LOGS:\n openLog(appName)\n\n log_msg = f\"{message.get('timestamp', datetime.now())} - {level} - {msg}\"\n reactor.singleton.APP_LOGS[appName].log(reactor.singleton.LOG_LEVELS.get(level), log_msg)\n reactor.singleton.COUNT.PROCESSED += 1\n\n if reactor.singleton.ES.HOST is not None:\n reactor.singleton.ES.QUEUE.put(dict(\n index=f'log_{reactor.singleton.LOG_PREFIX}-{datetime.now():%Y.%m.%d}',\n doc_type='log',\n body=message\n ))\n\n except ParseError as e:\n print(f\"Error {e.__class__.__name__} - {getattr(e, 'message', e)}\", flush=True)\n\n\ndef stats():\n COUNT = reactor.singleton.COUNT\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - Received {COUNT.RECEIVED:,}, Processed {COUNT.PROCESSED:,}, Forwarded {COUNT.FORWARDED:,}', flush=True)\n\n\nif __name__ == \"__main__\":\n print(\"Starting Syslog forwarding to ElasticSearch\", flush=True)\n for log in os.listdir(reactor.singleton.LOG_DIR):\n name, ext = os.path.splitext(log)\n if ext == '.log':\n openLog(name)\n\n if 'default' not in reactor.singleton.APP_LOGS:\n openLog('default')\n\n print(f'Syslog UDP Listening {reactor.singleton.HOST}:{reactor.singleton.HOST_PORT}', flush=True)\n\n udpServer = SyslogUDPHandler()\n reactor.listenUDP(reactor.singleton.UDP_PORT, udpServer)\n\n reactor.callInThread(connectElasticsearch)\n\n status = LoopingCall(stats)\n status.start(60)\n\n try:\n reactor.run()\n except (IOError, SystemExit):\n raise\n except KeyboardInterrupt:\n print(f'{datetime.now(timezone.utc):%Y.%m.%d %H:%M:%S%z} - Crtl+C Pressed. Shutting down.', flush=True)\n" }, { "alpha_fraction": 0.6422253251075745, "alphanum_fraction": 0.6494413614273071, "avg_line_length": 35.71794891357422, "blob_id": "256003bfdf8b1245f83124aa9352628489fa5bd1", "content_id": "b8957a0226197086f368a1592505480d346cbc26", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4296, "license_type": "permissive", "max_line_length": 118, "num_lines": 117, "path": "/orchestrator/core/orc_server/account/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import base64\nimport bleach\nimport coreschema\n\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom rest_framework import filters, permissions, status, viewsets\nfrom rest_framework.compat import coreapi\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\n# Local imports\nfrom command.models import SentHistory, HistorySerializer\nfrom utils import get_or_none, IsAdminOrIsSelf, OrcSchema\nfrom ..models import UserSerializer, PasswordSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n permission_classes = (permissions.IsAdminUser, )\n serializer_class = UserSerializer\n lookup_field = 'username'\n\n queryset = User.objects.all().order_by('-date_joined')\n filter_backends = (filters.OrderingFilter,)\n ordering_fields = ('last_name', 'first_name', 'username', 'email_address', 'active')\n\n @action(methods=['POST'], detail=False, permission_classes=[IsAdminOrIsSelf], serializer_class=PasswordSerializer)\n def change_password(self, request, username=None): # pylint: disable=unused-argument\n \"\"\"\n Change user password, passwords sent as base64 encoded strings\n \"\"\"\n serializer = PasswordSerializer(data=request.data)\n user = self.get_object()\n\n if serializer.is_valid():\n if not user.check_password(base64.b64decode(serializer.data.get('old_password'))):\n return Response({'old_password': ['Wrong password.']}, status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(base64.b64decode(serializer.data.get('new_password_1')))\n user.save()\n return Response({'status': 'password changed'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserHistoryViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n API endpoint that allows users to view their history\n \"\"\"\n permission_classes = (permissions.IsAdminUser,)\n serializer_class = HistorySerializer\n lookup_field = 'command_id'\n\n queryset = SentHistory.objects.order_by('-received_on')\n\n schema = OrcSchema(\n manual_fields=[\n coreapi.Field(\n \"username\",\n required=True,\n location=\"path\",\n schema=coreschema.String(\n description='Username to list the command history'\n )\n )\n ]\n )\n\n def list(self, request, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Return a list of a users command history\n \"\"\"\n username = kwargs.get('username', None)\n self.pagination_class.page_size_query_param = 'length'\n self.pagination_class.max_page_size = 100\n queryset = self.filter_queryset(self.get_queryset())\n\n username = bleach.clean(username)\n\n if request.user.is_staff: # Admin User\n user = get_or_none(User, username=username)\n if user is None:\n raise Http404\n queryset = queryset.filter(user=user)\n\n else: # Standard User\n if request.user.username == username:\n queryset = queryset.filter(user=request.user)\n else:\n raise PermissionDenied\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific user's command\n \"\"\"\n username = kwargs.get('username', None)\n instance = self.get_object()\n\n if not request.user.is_staff: # Standard User\n username = bleach.clean(username)\n if request.user.username != username or request.user != instance.user:\n raise PermissionDenied\n\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n" }, { "alpha_fraction": 0.682758629322052, "alphanum_fraction": 0.6965517401695251, "avg_line_length": 20.799999237060547, "blob_id": "d46d70f1cc9767bf6ee08f352758b2f399df2a8e", "content_id": "3d04a8b895b8a7f3397c41064b975f143c306d2e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 435, "license_type": "permissive", "max_line_length": 58, "num_lines": 20, "path": "/orchestrator/transport/coap/docker_dev_start.sh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\necho \"Running COAP Transport Module.\"\ndockerize -wait tcp://$QUEUE_HOST:$QUEUE_PORT -timeout 30s\n\necho \"Starting CoAP Server\"\npython3 -u coap_server.py &\nstatus=$?\nif [[ $status -ne 0 ]]; then\n echo \"Failed to start CoAP Server: $status\"\n exit $status\nfi\n\necho \"Starting CoAP Client\"\npython3 -u coap_client.py\nstatus=$?\nif [[ $status -ne 0 ]]; then\n echo \"Failed to start CoAP Client: $status\"\n exit $status\nfi" }, { "alpha_fraction": 0.6810126304626465, "alphanum_fraction": 0.6822784543037415, "avg_line_length": 23.6875, "blob_id": "eda679eafb67ea1b60d6273606ecba300fe9f302", "content_id": "de789389d50033d050a850d38be8ec64507e2a53", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "permissive", "max_line_length": 78, "num_lines": 32, "path": "/orchestrator/gui/server/gui_server/tracking/views/gui.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.shortcuts import render, reverse\n\n\n@login_required\n@permission_required('logs.can_view')\ndef gui_root(request):\n page_args = {\n 'page_title': 'Logs'\n }\n return render(request, 'tracking/index.html', page_args)\n\n\n@login_required\n@permission_required('logs.can_view')\ndef gui_requests(request):\n page_args = {\n 'page_title': 'Request Logs'\n }\n return render(request, 'tracking/request.html', page_args)\n\n\n@login_required\n@permission_required('logs.can_view')\ndef gui_events(request):\n page_args = {\n 'page_title': 'Event Logs'\n }\n return render(request, 'tracking/event.html', page_args)\n" }, { "alpha_fraction": 0.707676112651825, "alphanum_fraction": 0.7192429304122925, "avg_line_length": 22.774999618530273, "blob_id": "2eaf6a96f388723c53c18be175184249f9910aed", "content_id": "1f68275488d5c4d8edd87b6b35b82bb45b43ca0e", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 951, "license_type": "permissive", "max_line_length": 78, "num_lines": 40, "path": "/logger/server/Dockerfile", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "FROM g2inc/oif-python\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\n\nMAINTAINER Screaming_Bunny\n\nLABEL name=\"Logger Server\" \\\nvendor=\"OpenC2\" \\\nlicense=\"BSD\" \\\nversion=\"2.0\" \\\ndescription=\"This is the Logger Server container\"\n\nADD requirements.txt /tmp/requirements.txt\nADD syslog.py /opt/syslog/\nCOPY docker-entrypoint.sh /\n\n# Package installation\n# Packages - https://pkgs.alpinelinux.org/packages\n#\n# Requirements install\nRUN apk add --no-cache --virtual .build-deps gcc python3-dev libc-dev && \\\npip3 install -r /tmp/requirements.txt && \\\n# Entrypint config\nchmod +x /docker-entrypoint.sh && \\\n# Cleanup\napk del .build-deps && \\\nrm -rf /var/cache/apk/* *.tar.gz* /usr/src /root/.gnupg /tmp/*\n\n# Ports\nEXPOSE 514/tcp\n\n# Orchestrator Core Working Directory\nWORKDIR /opt/syslog/\n\n# Persistant volume\nVOLUME '/var/log/syslog'\n\n# Startup Command\nENTRYPOINT [\"/docker-entrypoint.sh\"]\n\nCMD [\"python3\", \"-u\", \"syslog.py\"]\n" }, { "alpha_fraction": 0.5682774186134338, "alphanum_fraction": 0.5715925097465515, "avg_line_length": 38.81218338012695, "blob_id": "fc101d6073ea1f5225bc9dc31622a733fd1fdae3", "content_id": "407ca115e26ad9d6fbf023fa918f7453ece88056", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7843, "license_type": "permissive", "max_line_length": 115, "num_lines": 197, "path": "/orchestrator/transport/mqtt/MQTT/callbacks.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# callbacks.py\n\nimport json\nimport os\nimport paho.mqtt.client as mqtt\nimport paho.mqtt.publish as publish\nimport re\n\nfrom sb_utils import Consumer, Producer, encode_msg, decode_msg, safe_cast\n\n# maintains a list of active devices we can receive responses from\nACTIVE_CONNECTIONS = []\n\n\nclass Callbacks(object):\n required_device_keys = {\"encoding\", \"profile\", \"socket\"}\n\n @staticmethod\n def on_connect(client, userdata, flags, rc):\n \"\"\"\n MQTT Callback for when client receives connection-acknowledgement response from MQTT server.\n :param client: Class instance of connection to server\n :param userdata: User-defined data passed to callbacks\n :param flags: Response flags sent by broker\n :param rc: Connection result, Successful = 0\n \"\"\"\n print(f\"Connected with result code {rc}\")\n # Subscribing in on_connect() allows us to renew subscriptions if disconnected\n\n if isinstance(userdata, list):\n for topic in userdata:\n if not isinstance(topic, str):\n print(\"Error in on_connect. Expected topic to be type a list of strings.\")\n client.subscribe(topic.lower(), qos=1)\n print(f\"Listening on {topic.lower()}\")\n\n @staticmethod\n def on_message(client, userdata, msg):\n \"\"\"\n MQTT Callback for when a PUBLISH message is received from the server.\n :param client: Class instance of connection to server.\n :param userdata: User-defined data passed to callbacks\n :param msg: Contains payload, topic, qos, retain\n \"\"\"\n payload = json.loads(msg.payload)\n payload_header = payload.get(\"header\", {})\n\n encoding = re.search(r\"(?<=\\+)(.*?)(?=;)\", payload_header.get(\"content_type\", \"\")).group(1)\n orc_id, broker_socket = payload_header.get(\"from\", \"\").rsplit(\"@\", 1)\n corr_id = payload_header.get(\"correlationID\", \"\")\n\n # copy necessary headers\n header = {\n \"socket\": broker_socket,\n \"correlationID\": corr_id,\n \"orchestratorID\": orc_id,\n \"encoding\": encoding,\n }\n\n # Connect and publish to internal buffer\n exchange = \"orchestrator\"\n route = \"response\"\n producer = Producer(\n os.environ.get(\"QUEUE_HOST\", \"localhost\"),\n os.environ.get(\"QUEUE_PORT\", \"5672\")\n )\n\n producer.publish(\n headers=header,\n message=decode_msg(payload.get(\"body\", \"\"), encoding),\n exchange=exchange,\n routing_key=route\n )\n\n print(f\"Received: {payload} \\nPlaced message onto exchange [{exchange}] queue [{route}].\")\n\n @ staticmethod\n def send_mqtt(body, message):\n \"\"\"\n AMQP Callback when we receive a message from internal buffer to be published\n :param body: Contains the message to be sent.\n :param message: Contains data about the message as well as headers\n \"\"\"\n # check for certs if TLS is enabled\n if os.environ.get(\"MQTT_TLS_ENABLED\", False) and os.listdir(\"/opt/transport/MQTT/certs\"):\n tls = dict(\n ca_certs=os.environ.get(\"MQTT_CAFILE\", None),\n certfile=os.environ.get(\"MQTT_CLIENT_CERT\", None),\n keyfile=os.environ.get(\"MQTT_CLIENT_KEY\", None)\n )\n else:\n tls = None\n\n # iterate through all devices within the list of destinations\n for device in message.headers.get(\"destination\", []):\n # check that all necessary parameters exist for device\n key_diff = Callbacks.required_device_keys.difference({*device.keys()})\n if len(key_diff) == 0:\n encoding = device.get(\"encoding\", \"json\")\n ip, port = device.get(\"socket\", \"localhost:1883\").split(\":\")\n\n # iterate through actuator profiles to send message to\n for actuator in device.get(\"profile\", []):\n payload = {\n \"header\": format_header(message.headers, device, actuator),\n \"body\": encode_msg(json.loads(body), encoding)\n }\n print(f\"Sending {ip}:{port} - {payload}\")\n\n try:\n publish.single(\n actuator,\n payload=json.dumps(payload),\n qos=1,\n hostname=ip,\n port=safe_cast(port, int, 1883),\n will={\n \"topic\": actuator,\n \"payload\": json.dumps(payload),\n \"qos\": 1\n },\n tls=tls\n )\n print(f\"Placed payload onto topic {actuator} Payload Sent: {payload}\")\n except Exception as e:\n print(f\"There was an error sending command to {ip}:{port} - {e}\")\n send_error_response(e, payload[\"header\"])\n return\n get_response(ip, port, message.headers.get(\"source\", {}).get(\"orchestratorID\", \"\"))\n else:\n err_msg = f\"Missing required header data to successfully transport message - {', '.join(key_diff)}\"\n send_error_response(err_msg, payload[\"header\"])\n\n\ndef send_error_response(e, header):\n \"\"\"\n If error occurs before leaving the transport on the orchestrator side, then send back a message\n response to the internal buffer indicating so.\n :param e: Exception thrown \n :param header: Include headers which would have been sent for Orchestrator to read.\n \"\"\"\n producer = Producer(\n os.environ.get(\"QUEUE_HOST\", \"localhost\"),\n os.environ.get(\"QUEUE_PORT\", \"5672\")\n )\n\n err = json.dumps(str(e))\n print(f\"Send error response: {err}\")\n\n producer.publish(\n headers=header,\n message=err,\n exchange=\"orchestrator\",\n routing_key=\"response\"\n )\n\n\ndef get_response(ip, port, orc_id):\n \"\"\"\n Waits for response from actuator at server at given ip:port\n :param ip: IP Address specified from destination sent from orchestrator\n :param port: Port specified from destination sent from orchestrator\n :param orc_id: Indicates where message was sent from - used in topic to receive responses\n \"\"\"\n # if we are already connected to an ip, don\"t try to connect again\n if ip not in ACTIVE_CONNECTIONS:\n ACTIVE_CONNECTIONS.append(ip)\n client = mqtt.Client()\n print(f\"New connection: {ip}:{port}\")\n\n try:\n client.connect(ip, int(port))\n except Exception as e:\n print(f\"ERROR: Connection to {ip}:{port} has been refused - {e}\")\n\n response_topic = f\"{orc_id}/response\"\n client.user_data_set([response_topic])\n client.on_connect = Callbacks.on_connect\n client.on_message = Callbacks.on_message\n client.loop_start()\n\n\ndef format_header(header, device, actuator):\n \"\"\"\n Takes relevant info from header and organizes it into a format that the orchestrator is expecting\n :param header: Header data received from device containing data to trace back the original command\n \"\"\"\n broker_socket = header.get(\"source\", {}).get(\"transport\", {}).get(\"socket\", \"\")\n orc_id = header.get(\"source\", {}).get(\"orchestratorID\", \"\")\n\n return {\n \"to\": f\"{actuator}@{broker_socket}\",\n \"from\": f\"{orc_id}@{broker_socket}\",\n \"correlationID\": header.get(\"source\", {}).get(\"correlationID\", \"\"),\n \"created\": header.get(\"source\", {}).get(\"date\", \"\"),\n \"content_type\": f\"application/openc2-cmd+{device.get('encoding', 'json')};version=1.0\",\n }\n" }, { "alpha_fraction": 0.5503711700439453, "alphanum_fraction": 0.5694591999053955, "avg_line_length": 31.517240524291992, "blob_id": "3769ce4be4fa470be20e318ca014c33a5ec2f06c", "content_id": "209c06043a4eba9c7bb38bf9a469c463079ef3c7", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "permissive", "max_line_length": 114, "num_lines": 29, "path": "/orchestrator/core/orc_server/orchestrator/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-04 18:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Protocol',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Name of the Protocol', max_length=30)),\n ('pub_sub', models.BooleanField(default=False, help_text='Protocol is Pub/Sub')),\n ],\n ),\n migrations.CreateModel(\n name='Serialization',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='Name of the Serialization', max_length=30)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6007556915283203, "alphanum_fraction": 0.6108312606811523, "avg_line_length": 19.894737243652344, "blob_id": "89d0ff9740842205478a103f06def7a8efbcb27f", "content_id": "a3515a47c943f3320790c115e9159bdb0548bbcc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 794, "license_type": "permissive", "max_line_length": 49, "num_lines": 38, "path": "/orchestrator/gui/client/config/dev.config.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "const generalConfig = require('./general.config')\nconst webpack = require('webpack')\nconst merge = require('webpack-merge')\nconst path = require('path')\n\nenv = 'development'\nconsole.log('NODE_ENV: ' + env)\n\nconst ROOT_DIR = path.join(__dirname, '..')\nconst BUILD_DIR = path.join(ROOT_DIR, 'build')\n\nmodule.exports = merge.smart(generalConfig, {\n mode: env,\n devtool: 'eval',\n plugins: [\n new webpack.DefinePlugin({\n NODE_ENV: env\n }),\n new webpack.NoEmitOnErrorsPlugin(),\n ],\n devServer: {\n contentBase: BUILD_DIR,\n compress: true,\n port: 3000,\n hot: true,\n open: false,\n historyApiFallback: true,\n proxy: {\n '/api': {\n target: 'http://localhost:8081',\n secure: false\n }\n }\n },\n optimization: {\n usedExports: true,\n }\n})\n" }, { "alpha_fraction": 0.7279411554336548, "alphanum_fraction": 0.7279411554336548, "avg_line_length": 18.428571701049805, "blob_id": "7029a26e7f0a67620e951cef3cabc1cacfa4ddf7", "content_id": "e1d657d5c7486eb591ac2dd108c4e0f0d54a9830", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "permissive", "max_line_length": 57, "num_lines": 14, "path": "/orchestrator/core/orc_server/actuator/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\n\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register('', views.ActuatorViewSet)\n\n\nurlpatterns = [\n # Actuator Router\n path('', include(router.urls), name='actuator.root'),\n]\n" }, { "alpha_fraction": 0.6055402159690857, "alphanum_fraction": 0.6101139187812805, "avg_line_length": 32.298851013183594, "blob_id": "e85a6cff3c79874d10a804e729e691498df255e4", "content_id": "599fde55e092ac9ed4f6c7edd8fe4088e5b2b49b", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11588, "license_type": "permissive", "max_line_length": 144, "num_lines": 348, "path": "/base/modules/script_utils.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Utility functions for update_subs and configure\nimport fnmatch\nimport importlib\nimport io\nimport os\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\n\nfrom getpass import getpass\n\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\n# Classes\nclass FrozenDict(dict):\n def __init__(self, *args, **kwargs):\n self._hash = None\n super(FrozenDict, self).__init__(*args, **kwargs)\n\n def __hash__(self):\n if self._hash is None:\n self._hash = hash(tuple(sorted(self.items()))) # iteritems() on py2\n return self._hash\n\n def __getattr__(self, item):\n return self.get(item, None)\n\n def _immutable(self, *args, **kws):\n raise TypeError('cannot change object - object is immutable')\n\n __setitem__ = _immutable\n __delitem__ = _immutable\n pop = _immutable\n popitem = _immutable\n clear = _immutable\n update = _immutable\n setdefault = _immutable\n\n\nclass ConsoleStyle:\n def __init__(self, verbose=False, log=None):\n import colorama\n colorama.init()\n self._verbose = verbose if isinstance(verbose, bool) else False\n self._logFile = log if isinstance(verbose, (str, io.TextIOWrapper)) else None\n\n self._encoding = sys.getdefaultencoding()\n self._format_regex = re.compile(r\"\\[\\d+m\", flags=re.MULTILINE)\n self._textStyles = FrozenDict({\n # Styles\n \"RESET\": colorama.Fore.RESET,\n \"NORMAL\": colorama.Style.NORMAL,\n \"DIM\": colorama.Style.DIM,\n \"BRIGHT\": colorama.Style.BRIGHT,\n # Text Colors\n \"FG_BLACK\": colorama.Fore.BLACK,\n \"FG_BLUE\": colorama.Fore.BLUE,\n \"FG_CYAN\": colorama.Fore.CYAN,\n \"FG_GREEN\": colorama.Fore.GREEN,\n \"FG_MAGENTA\": colorama.Fore.MAGENTA,\n \"FG_RED\": colorama.Fore.RED,\n \"FG_WHITE\": colorama.Fore.WHITE,\n \"FG_YELLOW\": colorama.Fore.YELLOW,\n \"FG_RESET\": colorama.Fore.RESET,\n # Background Colors\n \"BG_BLACK\": colorama.Back.BLACK,\n \"BG_BLUE\": colorama.Back.BLUE,\n \"BG_CYAN\": colorama.Back.CYAN,\n \"BG_GREEN\": colorama.Back.GREEN,\n \"BG_MAGENTA\": colorama.Back.MAGENTA,\n \"BG_RED\": colorama.Back.RED,\n \"BG_WHITE\": colorama.Back.WHITE,\n \"BG_YELLOW\": colorama.Back.YELLOW,\n \"BG_RESET\": colorama.Back.RESET,\n })\n\n def _toStr(self, txt):\n return txt.decode(self._encoding, \"backslashreplace\") if hasattr(txt, \"decode\") else txt\n\n def colorize(self, txt, *styles):\n txt = self._toStr(txt)\n self._log(txt)\n color_text = \"\".join([self._textStyles.get(s.upper(), \"\") for s in styles]) + txt\n return f\"\\033[0m{color_text}\\033[0m\"\n\n def _log(self, txt):\n if self._logFile:\n if isinstance(self._logFile, str):\n with open(self._logFile, 'a') as f:\n f.write(f\"{self._format_regex.sub('', self._toStr(txt))}\\n\")\n elif isinstance(self._logFile, io.TextIOWrapper):\n self._logFile.write(f\"{self._format_regex.sub('', self._toStr(txt))}\\n\")\n\n # Headers\n def underline(self, txt):\n print(self.colorize(txt, \"UNDERLINE\", \"BOLD\"))\n\n def h1(self, txt):\n tmp = self.colorize(f\"\\n{txt}\", \"UNDERLINE\", \"BOLD\", \"FG_CYAN\")\n print(tmp)\n\n def h2(self, txt):\n print(self.colorize(f\"\\n{txt}\", \"UNDERLINE\", \"BOLD\", \"FG_WHITE\"))\n\n def debug(self, txt):\n print(self.colorize(txt, \"FG_WHITE\"))\n\n def info(self, txt):\n print(self.colorize(f\"> {txt}\", \"FG_WHITE\"))\n\n def success(self, txt):\n print(self.colorize(txt, \"FG_GREEN\"))\n\n def error(self, txt):\n print(self.colorize(f\"x {txt}\", \"FG_RED\"))\n\n def warn(self, txt):\n print(self.colorize(f\"-> {txt}\", \"FG_YELLOW\"))\n\n def bold(self, txt):\n print(self.colorize(txt, \"BOLD\"))\n\n def note(self, txt):\n print(f\"{self.colorize('Note:', 'UNDERLINE', 'BOLD', 'FG_CYAN')} {self.colorize(txt, 'FG_CYAN')}\")\n\n def default(self, txt):\n txt = self._toStr(txt)\n print(self.colorize(txt))\n\n def verbose(self, style, txt):\n if style is not \"verbose\" and hasattr(self, style) and callable(getattr(self, style)):\n if self._verbose:\n getattr(self, style)(txt)\n else:\n self._log(txt)\n\n\n# Config\nCONFIG = FrozenDict(\n DefaultBranch=\"master\",\n EmptyString=(\"\", b\"\", None),\n Remove=FrozenDict(\n Dirs=(\".git\", \".idea\"),\n Files=(\".git\", \".gitlab-ci.yml\", \"dev-compose.yaml\", \".gitmodules\", \".pipeline_trigger*\")\n ),\n MinVersions=FrozenDict(\n Docker=(18, 0, 0),\n DockerCompose=(1, 20, 0)\n )\n)\n\n\n# Functions\ndef checkRequiredArguments(opts, parser):\n missing_options = []\n for option in parser.option_list:\n if re.match(r'^\\[REQUIRED\\]', option.help) and eval('opts.' + option.dest) is None:\n missing_options.extend(option._long_opts)\n if len(missing_options) > 0:\n parser.error('Missing REQUIRED parameters: ' + str(missing_options))\n\n\ndef set_rw(operation, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\ndef install_pkg(package):\n try:\n importlib.import_module(package[0])\n except ImportError:\n print(f'{package[1]} not installed')\n try:\n pkg_install = subprocess.Popen([sys.executable, \"-m\", \"pip\", \"install\", package[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = pkg_install.communicate()\n except Exception as e:\n print(e)\n finally:\n setattr(sys.modules[__name__], package[0], importlib.import_module(package[0]))\n\n\ndef import_mod(mod=None):\n if mod not in sys.modules:\n setattr(sys.modules[__name__], mod, importlib.import_module(mod))\n\n\ndef recursive_find(rootdir='.', patterns=('*', ), directory=False):\n results = []\n for (base, dirs, files) in os.walk(rootdir):\n search = dirs if directory else files\n matches = [fnmatch.filter(search, pattern) for pattern in patterns]\n matches = [v for sl in matches for v in sl]\n results.extend(os.path.join(base, f) for f in matches)\n return results\n\n\ndef git_lsremote(url):\n import_mod('git')\n\n remote_refs = {}\n g = git.cmd.Git()\n for ref in g.ls_remote(url).split('\\n'):\n hash_ref_list = ref.split('\\t')\n remote_refs[hash_ref_list[1]] = hash_ref_list[0]\n return remote_refs\n\n\ndef update_repo(repo_url, repo_path, branch=\"master\"):\n import_mod('git')\n\n if os.path.isdir(repo_path):\n shutil.rmtree(repo_path, onerror=set_rw)\n try:\n branch = branch if f\"refs/heads/{branch}\" in git_lsremote(repo_url) else CONFIG.DefaultBranch\n repo = git.Repo.clone_from(repo_url, repo_path, branch=branch)\n except git.cmd.GitCommandError as e:\n return e\n\n os.chdir(repo_path)\n\n for f in recursive_find(patterns=CONFIG.Remove.Files):\n os.remove(f)\n\n for d in recursive_find(patterns=CONFIG.Remove.Dirs, directory=True):\n shutil.rmtree(d, onerror=set_rw)\n\n os.chdir('../')\n\n\ndef check_docker(console=None):\n msg = \"Checking installed docker version\"\n console.h2(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n installed_docker = subprocess.Popen([\"docker\", \"--version\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = installed_docker.communicate()\n\n if err in CONFIG.EmptyString:\n installed_version = re.search(r\"\\d{,2}\\.\\d{,2}\\.\\d{,2}\", str(out)).group()\n version = tuple(int(n) for n in installed_version.split(\".\"))\n\n msg = f\"required min docker: {version_str(CONFIG.MinVersions.Docker)}\"\n console.info(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n if CONFIG.MinVersions.Docker <= version:\n msg = f\"installed docker version: {installed_version}\"\n console.note(msg) if isinstance(console, ConsoleStyle) else print(msg)\n else:\n msg = f\"Need to upgrade docker package to {version_str(CONFIG.MinVersions.Docker)}+\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n else:\n msg = \"Failed to parse docker version\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n\n\ndef check_docker_compose(console=None):\n msg = \"Checking installed docker-compose version\"\n console.h2(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n installed_compose = subprocess.Popen([\"docker-compose\", \"--version\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = installed_compose.communicate()\n\n if err in CONFIG.EmptyString:\n installed_version = re.search(r\"\\d{,2}\\.\\d{,2}\\.\\d{,2}\", str(out)).group()\n version = tuple(int(n) for n in installed_version.split(\".\"))\n\n msg = f\"required min docker-compose: {version_str(CONFIG.MinVersions.DockerCompose)}\"\n console.info(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n if CONFIG.MinVersions.DockerCompose <= version:\n msg = f\"installed docker-compose version: {installed_version}\"\n console.note(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n else:\n msg = f\"Need to upgrade docker-compose to {version_str(CONFIG.MinVersions.DockerCompose)}+\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n\n else:\n msg = \"Failed to parse docker-compose version\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n\n exit(1)\n\n\ndef build_image(docker_sys=None, console=None, **kwargs):\n import_mod('docker')\n if docker_sys is None:\n msg = f\"docker_sys arg is required\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n\n img = None\n try:\n img = docker_sys.images.build(**kwargs)\n except docker.errors.ImageNotFound as e:\n msg = f\"Cannot build image, base image not found: {e}\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n except docker.errors.APIError as e:\n msg = f\"Docker API error: {e}\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n except TypeError as e:\n msg = \"Cannot build image, path nor fileobj args are not specified\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n except KeyboardInterrupt:\n msg = \"Keyboard Interrupt\"\n console.error(msg) if isinstance(console, ConsoleStyle) else print(msg)\n exit(1)\n\n msg = \"\".join(line.get(\"stream\", \"\") for line in img[1])\n console.verbose(\"default\", msg) if isinstance(console, ConsoleStyle) else print(msg)\n return img\n\n\ndef human_size(size, units=(\" bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\")):\n \"\"\" Returns a human readable string reprentation of bytes\"\"\"\n return f\"{size:,d}{units[0]}\" if size < 1024 else human_size(size >> 10, units[1:])\n\n\ndef version_str(ver):\n return \".\".join(str(x) for x in ver)\n\n\ndef prompt(msg, err_msg, isvalid, password=False):\n res = None\n password = password if type(password) == bool else False\n\n while res is None:\n if password:\n res = getpass()\n else:\n res = input(str(msg)+': ')\n\n if not isvalid(res):\n print(str(err_msg))\n res = None\n return res\n" }, { "alpha_fraction": 0.663772702217102, "alphanum_fraction": 0.667192816734314, "avg_line_length": 34.52336502075195, "blob_id": "ed7b63c2deb1899efe7b73180f634b0b595ac7a5", "content_id": "ca96bb82e28b81293c6ecfd7f6ffc43ed8e45be9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3801, "license_type": "permissive", "max_line_length": 106, "num_lines": 107, "path": "/orchestrator/gui/server/gui_server/orchestrator/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import permissions, status, viewsets\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom ..models import Orchestrator, OrchestratorSerializer, OrchestratorAuth, OrchestratorAuthSerializer\n\nfrom utils import get_or_none\n\n\nclass OrchestratorViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, )\n serializer_class = OrchestratorSerializer\n\n lookup_field = 'orc_id'\n queryset = Orchestrator.objects.order_by('-name')\n\n permissions = {\n 'create': (permissions.IsAdminUser,),\n 'destroy': (permissions.IsAdminUser,),\n 'partial_update': (permissions.IsAdminUser,),\n 'update': (permissions.IsAdminUser,),\n }\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n\nclass OrchestratorAuthViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, )\n serializer_class = OrchestratorAuthSerializer\n\n lookup_field = 'orc_id'\n queryset = OrchestratorAuth.objects.order_by('-user')\n\n def create(self, request, *args, **kwargs):\n data = request.data\n if not request.user.is_staff:\n data['user'] = request.user.username\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n self.perform_create(serializer)\n\n headers = self.get_success_headers(serializer.data)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all auth tokens that the user has permissions for\n \"\"\"\n queryset = self.filter_queryset(self.get_queryset())\n\n if not request.user.is_staff: # Standard User\n queryset = queryset.filter(user=request.user)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific auth toekn that the user has permissions for\n \"\"\"\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to access auth token', code=401)\n\n serializer = self.get_serializer(auth)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to update auth token', code=401)\n\n return super(OrchestratorAuthViewSet, self).update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to delete auth token', code=401)\n\n return super(OrchestratorAuthViewSet, self).destroy(request, *args, **kwargs)\n" }, { "alpha_fraction": 0.6254432797431946, "alphanum_fraction": 0.633865237236023, "avg_line_length": 47, "blob_id": "c667136a995d96304700e781e33aa55dba8e6fe4", "content_id": "88c2cc1601eda8aefc62c3fce81064724a450fcf", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2256, "license_type": "permissive", "max_line_length": 177, "num_lines": 47, "path": "/orchestrator/core/orc_server/command/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-04-04 18:39\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\nimport jsonfield.fields\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('actuator', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SentHistory',\n fields=[\n ('command_id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Unique UUID of the command', primary_key=True, serialize=False)),\n ('received_on', models.DateTimeField(default=django.utils.timezone.now, help_text='Time the command was received')),\n ('command', jsonfield.fields.JSONField(blank=True, help_text='Command that was received', null=True)),\n ('actuators', models.ManyToManyField(help_text='Actuators the command was sent to', to='actuator.Actuator')),\n ('user', models.ForeignKey(help_text='User that sent the command', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name_plural': 'Sent History',\n },\n ),\n migrations.CreateModel(\n name='ResponseHistory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('received_on', models.DateTimeField(default=django.utils.timezone.now, help_text='Time the respose was received')),\n ('response', jsonfield.fields.JSONField(blank=True, help_text='Response that was received', null=True)),\n ('actuator', models.ForeignKey(help_text='Actuator response was received from', null=True, on_delete=django.db.models.deletion.PROTECT, to='actuator.Actuator')),\n ('command', models.ForeignKey(help_text='Command that was received', on_delete=django.db.models.deletion.CASCADE, to='command.SentHistory')),\n ],\n options={\n 'verbose_name_plural': 'Response History',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5091181397438049, "alphanum_fraction": 0.5161129236221313, "avg_line_length": 41.585105895996094, "blob_id": "0f073880b8f3f7a9a36d51276d3061f323e8dfd5", "content_id": "bcc3c74cca87b9939c2e60fb84e886b9364b0758", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4003, "license_type": "permissive", "max_line_length": 137, "num_lines": 94, "path": "/orchestrator/transport/https/HTTPS/https_transport.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import re\nimport urllib3\n\nfrom datetime import datetime\nfrom sb_utils import Producer, Consumer, default_encode, decode_msg, encode_msg, safe_json\n\n\ndef process_message(body, message):\n \"\"\"\n Callback when we receive a message from internal buffer to publish to waiting flask.\n :param body: Contains the message to be sent.\n :param message: Contains data about the message as well as headers\n \"\"\"\n http = urllib3.PoolManager(cert_reqs=\"CERT_NONE\")\n producer = Producer()\n\n body = body if isinstance(body, dict) else safe_json(body)\n rcv_headers = message.headers\n\n orc_socket = rcv_headers[\"source\"][\"transport\"][\"socket\"] # orch IP:port\n orc_id = rcv_headers[\"source\"][\"orchestratorID\"] # orchestrator ID\n corr_id = rcv_headers[\"source\"][\"correlationID\"] # correlation ID\n\n for device in rcv_headers[\"destination\"]:\n device_socket = device[\"socket\"] # device IP:port\n encoding = device[\"encoding\"] # message encoding\n\n if device_socket and encoding and orc_socket:\n for profile in device[\"profile\"]:\n print(f\"Sending command to {profile}@{device_socket}\")\n\n try:\n rsp = http.request(\n method=\"POST\",\n url=f\"https://{device_socket}\",\n body=encode_msg(body, encoding), # command being encoded\n headers={\n \"Content-type\": f\"application/openc2-cmd+{encoding};version=1.0\",\n # \"Status\": ..., # Numeric status code supplied by Actuator's OpenC2-Response\n \"X-Request-ID\": corr_id,\n \"Date\": f\"{datetime.utcnow():%a, %d %b %Y %H:%M:%S GMT}\", # RFC7231-7.1.1.1 -> Sun, 06 Nov 1994 08:49:37 GMT\n \"From\": f\"{orc_id}@{orc_socket}\",\n \"Host\": f\"{profile}@{device_socket}\",\n }\n )\n\n rsp_headers = dict(rsp.headers)\n if \"Content-type\" in rsp_headers:\n rsp_enc = re.sub(r\"^application/openc2-(cmd|rsp)\\+\", \"\", rsp_headers[\"Content-type\"])\n rsp_enc = re.sub(r\"(;version=\\d+\\.\\d+)?$\", \"\", rsp_enc)\n else:\n rsp_enc = \"json\"\n\n rsp_headers = {\n \"socket\": device_socket,\n \"correlationID\": corr_id,\n \"profile\": profile,\n \"encoding\": rsp_enc,\n \"transport\": \"https\"\n }\n\n data = {\n \"headers\": rsp_headers,\n \"content\": decode_msg(rsp.data.decode(\"utf-8\"), rsp_enc)\n }\n\n print(f\"Response from request: {rsp.status} - {safe_json(data)}\")\n producer.publish(message=data[\"content\"], headers=rsp_headers, exchange=\"orchestrator\", routing_key=\"response\")\n except Exception as err:\n err = str(getattr(err, \"message\", err))\n rcv_headers[\"error\"] = True\n producer.publish(message=err, headers=rcv_headers, exchange=\"orchestrator\", routing_key=\"response\")\n print(f\"HTTPS error: {err}\")\n\n else:\n response = \"Destination/Encoding/Orchestrator Socket of command not specified\"\n rcv_headers[\"error\"] = True\n producer.publish(message=str(response), headers=rcv_headers, exchange=\"orchestrator\", routing_key=\"response\")\n print(response)\n\n\nif __name__ == \"__main__\":\n print(\"Connecting to RabbitMQ...\")\n try:\n consumer = Consumer(\n exchange=\"transport\",\n routing_key=\"https\",\n callbacks=[process_message],\n debug=True\n )\n\n except Exception as err:\n print(f\"Consumer Error: {err}\")\n consumer.shutdown()\n" }, { "alpha_fraction": 0.5597765445709229, "alphanum_fraction": 0.5597765445709229, "avg_line_length": 18.88888931274414, "blob_id": "cd9753e2eb18a479c8f0b9112a5ad1b6e46f6fbb", "content_id": "8d2329eeae8b4f4b6b6edefc117a9097f3fe983d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "permissive", "max_line_length": 48, "num_lines": 45, "path": "/orchestrator/gui/server/gui_server/tracking/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import EventLog, RequestLog\n\n\nclass RequestLogAdmin(admin.ModelAdmin):\n \"\"\"\n Request Log model admin\n \"\"\"\n date_hierarchy = 'requested_at'\n list_display = (\n 'id',\n 'requested_at',\n 'response_ms',\n 'status_code',\n 'user',\n 'method',\n 'path',\n 'remote_addr',\n 'host'\n )\n\n list_filter = ('method', 'status_code')\n search_fields = ('path', 'user__email',)\n raw_id_fields = ('user', )\n\n\nclass EventLogAdmin(admin.ModelAdmin):\n \"\"\"\n Event Log model admin\n \"\"\"\n date_hierarchy = 'occurred_at'\n list_display = (\n 'id',\n 'user',\n 'occurred_at',\n 'level',\n 'message'\n )\n\n list_filter = ('level', )\n\n\n# Register Models\nadmin.site.register(RequestLog, RequestLogAdmin)\nadmin.site.register(EventLog, EventLogAdmin)\n" }, { "alpha_fraction": 0.6054848432540894, "alphanum_fraction": 0.6129285097122192, "avg_line_length": 34.45138931274414, "blob_id": "8897eb8849c3a8e6efb15809fbc8f0fa5a7c13fa", "content_id": "e6a1c346c3e712e93ed3d4d0996305a7ccf24c81", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5109, "license_type": "permissive", "max_line_length": 243, "num_lines": 144, "path": "/base/modules/utils/root/sb_utils/message_obj.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import struct\nimport uuid\n\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n List,\n Union\n)\n\nfrom .message import decode_msg, encode_msg\n\n\nclass ContentType(Enum):\n \"\"\"\n The content format of an OpenC2 Message\n \"\"\"\n Binn = 1\n BSON = 2\n CBOR = 3\n JSON = 4\n MsgPack = 5\n S_Expression = 6\n # smile = 7\n XML = 8\n UBJSON = 9\n YAML = 10\n VPack = 11\n\n\nclass MessageType(Enum):\n \"\"\"\n The type of an OpenC2 Message\n \"\"\"\n Command = 1 # The Message content is an OpenC2 Command\n Response = 2 # The Message content is an OpenC2 Response\n\n\nclass Message:\n \"\"\"\n Message parameter holding class\n \"\"\"\n # to - Authenticated identifier(s) of the authorized recipient(s) of a message\n recipients: List[str]\n # from - Authenticated identifier of the creator of or authority for execution of a message\n origin: str\n # Creation date/time of the content.\n created: datetime\n # The type of OpenC2 Message\n msg_type: MessageType\n # Populated with a numeric status code in Responses\n status: int\n # A unique identifier created by the Producer and copied by Consumer into all Responses, in order to support reference to a particular Command, transaction, or event chain\n request_id: uuid.UUID\n # Media Type that identifies the format of the content, including major version\n # Incompatible content formats must have different content_types\n # Content_type application/openc2 identifies content defined by OpenC2 language specification versions 1.x, i.e., all versions that are compatible with version 1.0\n content_type: ContentType\n # Message body as specified by content_type and msg_type\n content: dict\n\n __slots__ = (\"recipients\", \"origin\", \"created\", \"msg_type\", \"status\", \"request_id\", \"content_type\", \"content\")\n\n def __init__(self, recipients: Union[str, List[str]] = \"\", origin: str = \"\", created: datetime = None, msg_type: MessageType = None, status: int = None, request_id: uuid.UUID = None, content_type: ContentType = None, content: dict = None):\n self.recipients = (recipients if isinstance(recipients, list) else [recipients]) if recipients else []\n self.origin = origin\n self.created = created or datetime.utcnow()\n self.msg_type = msg_type or MessageType.Command\n self.status = status or 404\n self.request_id = request_id or uuid.uuid4()\n self.content_type = content_type or ContentType.JSON\n self.content = content or {}\n\n def __setattr__(self, key, val):\n if key in self.__slots__:\n object.__setattr__(self, key, val)\n return\n raise AttributeError(\"Cannot set an unknown attribute\")\n\n def __str__(self):\n return f\"Open Message: <{self.msg_type.name}; {self.content}>\"\n\n @classmethod\n def load(cls, m: bytes) -> 'Message':\n msg = m.split(b\"\\xF5\\xBE\")\n print(len(msg))\n if len(msg) != 8:\n raise ValueError(\"The OpenC2 message was not properly loaded\")\n [recipients, origin, created, msg_type, status, request_id, content_type, content] = msg\n return cls(\n recipients=list(filter(None, map(bytes.decode, recipients.split(b\"\\xF5\\xBD\")))),\n origin=origin.decode(),\n created=datetime.fromtimestamp(float(\".\".join(map(str, struct.unpack('LI', created))))),\n msg_type=MessageType(struct.unpack(\"B\", msg_type)[0]),\n status=struct.unpack(\"I\", status)[0],\n request_id=uuid.UUID(bytes=request_id),\n content_type=ContentType(struct.unpack(\"B\", content_type)[0]),\n content=decode_msg(content, 'cbor', raw=True)\n )\n\n @property\n def serialization(self) -> str:\n return self.content_type.name # message encoding\n\n @property\n def dict(self) -> dict:\n return dict(\n recipients=self.recipients,\n origin=self.origin,\n created=self.created,\n msg_type=self.msg_type,\n status=self.status,\n request_id=self.request_id,\n content_type=self.content_type,\n content=self.content\n )\n\n @property\n def list(self) -> list:\n return [\n self.recipients,\n self.origin,\n self.created,\n self.msg_type,\n self.status,\n self.request_id,\n self.content_type,\n self.content\n ]\n\n def serialize(self) -> Union[bytes, str]:\n return encode_msg(self.content, self.content_type.name.lower(), raw=True)\n\n def dump(self) -> bytes:\n return b\"\\xF5\\xBE\".join([ # §¥\n b\"\\xF5\\xBD\".join(map(str.encode, self.recipients)), # §¢\n self.origin.encode(),\n struct.pack('LI', *map(int, str(self.created.timestamp()).split(\".\"))),\n struct.pack(\"B\", self.msg_type.value),\n struct.pack(\"I\", self.status),\n self.request_id.bytes,\n struct.pack(\"B\", self.content_type.value),\n encode_msg(self.content, 'cbor', raw=True)\n ])\n" }, { "alpha_fraction": 0.5721271634101868, "alphanum_fraction": 0.5777156949043274, "avg_line_length": 26.528846740722656, "blob_id": "4c0c8ef4e8b7baa1ae1858a0a570527bd987cbb3", "content_id": "f574a0c1434b1321a7e2b25bbb9bd35cda030c96", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2863, "license_type": "permissive", "max_line_length": 128, "num_lines": 104, "path": "/orchestrator/gui/client/src/reducers/actuator.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import * as actuator from '../actions/actuator'\n\nimport {\n checkSchema,\n mergeByProperty\n} from '../components/utils'\n\nconst initialState = {\n actuators: [],\n sort: '',\n count: 0,\n errors: {}\n}\n\nexport default (state=initialState, action=null) => {\n let actuators = []\n\n switch(action.type) {\n case actuator.GET_ACTUATORS_SUCCESS:\n let newActs = action.payload.results || []\n actuators = action.meta.refresh ? newActs : mergeByProperty(state.actuators, newActs, 'actuator_id')\n\n return {\n ...state,\n count: action.payload.count || 0,\n actuators: actuators.map((act, i) => ({ ...act, schema: checkSchema(act.schema || {})})),\n sort: action.meta.sort,\n errors: {\n ...state.errors,\n [actuator.GET_ACTUATORS_FAILURE]: {}\n }\n }\n\n case actuator.CREATE_ACTUATOR_SUCCESS:\n setTimeout(() => {\n action.asyncDispatch(actuator.getActuators({page: 1, count: state.actuators.length+1, sort: state.sort, refresh: true}))\n }, 500)\n\n return {\n ...state,\n errors: {\n ...state.errors,\n [actuator.CREATE_ACTUATOR_FAILURE]: {}\n }\n }\n\n case actuator.GET_ACTUATOR_SUCCESS:\n let newAct = [action.payload] || []\n actuators = action.meta.refresh ? newActs : mergeByProperty(state.actuators, newActs, 'actuator_id')\n\n return {\n count: action.payload.count || 1,\n actuators: actuators.map((act, i) => ({ ...act, schema: checkSchema(act.schema || {})})),\n ...state,\n errors: {\n ...state.errors,\n [actuator.GET_ACTUATOR_FAILURE]: {}\n }\n }\n\n case actuator.UPDATE_ACTUATOR_SUCCESS:\n setTimeout(() => {\n action.asyncDispatch(actuator.getActuators({page: 1, count: state.actuators.length, sort: state.sort, refresh: true}))\n }, 500)\n\n return {\n ...state,\n errors: {\n ...state.errors,\n [actuator.UPDATE_ACTUATOR_FAILURE]: {}\n }\n }\n\n case actuator.DELETE_ACTUATOR_SUCCESS:\n setTimeout(() => {\n action.asyncDispatch(actuator.getActuators({page: 1, count: state.actuators.length, sort: state.sort, refresh: true}))\n }, 500)\n\n return {\n ...state,\n errors: {\n ...state.errors,\n [actuator.DELETE_ACTUATOR_FAILURE]: {}\n }\n }\n\n case actuator.GET_ACTUATORS_FAILURE:\n case actuator.CREATE_ACTUATOR_FAILURE:\n case actuator.GET_ACTUATOR_FAILURE:\n case actuator.UPDATE_ACTUATOR_FAILURE:\n case actuator.DELETE_ACTUATOR_FAILURE:\n console.log('Actuator Failure', action.type, action)\n return {\n ...state,\n errors: {\n ...state.errors,\n [action.type]: action.payload.response || {'non_field_errors': action.payload.statusText},\n }\n }\n\n default:\n return state\n }\n}\n" }, { "alpha_fraction": 0.6099071502685547, "alphanum_fraction": 0.6191950440406799, "avg_line_length": 22.851852416992188, "blob_id": "08c626a254751232e8e1692ad2a31944ad21d549", "content_id": "9a6a6747b1bc25994208eb0db1b1008dc45e4be8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "permissive", "max_line_length": 101, "num_lines": 27, "path": "/orchestrator/gui/server/gui_server/utils/general.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\nimport uuid\n\n\ndef prefixUUID(pre='PREFIX', max=30):\n uid_max = max - (len(pre) + 10)\n uid = str(uuid.uuid4()).replace('-', '')[:uid_max]\n return f'{pre}-{uid}'[:max]\n\n\ndef safe_cast(val, to_type, default=None):\n try:\n return to_type(val)\n except (ValueError, TypeError):\n return default\n\n\ndef to_str(s):\n \"\"\"\n Convert a given type to a default string\n :param s: item to convert to a string\n :return: converted string\n \"\"\"\n return s.decode(sys.getdefaultencoding(), \"backslashreplace\") if hasattr(s, \"decode\") else str(s)\n\n\n" }, { "alpha_fraction": 0.6275773048400879, "alphanum_fraction": 0.6288659572601318, "avg_line_length": 25.758621215820312, "blob_id": "577ace453e031ce60be755e45e04427ec80f8510", "content_id": "1ee107facfd6e0464867da327da5d03d361c50d0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 776, "license_type": "permissive", "max_line_length": 67, "num_lines": 29, "path": "/orchestrator/gui/server/gui_server/webApp/management/commands/makemigrations_apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management import ManagementUtility\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n \"\"\"\n Custom django command - makemigrations_apps\n Make migrations for the custom apps available to the Django app\n \"\"\"\n def handle(self, *args, **kwargs):\n \"\"\"\n Handle command execution\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n args = [sys.argv[0], 'makemigrations']\n\n for app in settings.INSTALLED_APPS:\n app_dir = os.path.join(settings.BASE_DIR, app)\n if os.path.isdir(app_dir):\n args.append(app)\n\n utility = ManagementUtility(args)\n utility.execute()\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 16.14285659790039, "blob_id": "9ea39f58daab71a5a9b53d6ff9be0c8b2a8eb94d", "content_id": "1908079094d526e251791f86d87f0449c105b5e9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 119, "license_type": "permissive", "max_line_length": 42, "num_lines": 7, "path": "/orchestrator/gui/client/src/components/admin/lib/index.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import SettingsModal from './settingModal'\nimport UserModal from './userModal'\n\nexport {\n SettingsModal,\n UserModal\n}" }, { "alpha_fraction": 0.738246500492096, "alphanum_fraction": 0.7594239711761475, "avg_line_length": 68.47058868408203, "blob_id": "ac553f6374d26279ff98baa04ee0c08d63039718", "content_id": "ffec40f4cbbc7552ce5b2ea9d5f02dcc3a1bd7cb", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2361, "license_type": "permissive", "max_line_length": 580, "num_lines": 34, "path": "/orchestrator/transport/coap/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OASIS TC Open: oif-orchestrator-transport-coap\n## OpenC2 CoAP Transport\n\n### About this Image\n- This image is the CoAP transfer container for use with the O.I.F.\n- This Transfer is not standardized as of July 9, 2019\n- Implements CoAP utilizing [CoAPthon3](https://github.com/Tanganelli/CoAPthon3)\n\n### How to use this image\n#### Running Transport\n\nThe CoAP Transport Module is configured to run from a docker container as a part of the OIF-Orchestrator docker stack. Use the [configure.py](../../../configure.py) script to build the images needed to run the entirety of this Transport as a part of the Orchestrator.\n\n#### CoAP and OpenC2 Headers\n\nAt the time of writing this OpenC2 as well as the OpenC2 CoAP Transport spec have not been finalized. The OpenC2 Headers have been included into the CoAP Request as follows:\n\n```python\nrequest.source = (\"localhost\", \"5683\") # From - IP, Port of CoAP Client sending\nrequest.destination = (\"localhost\", \"5682\") # To - IP, Port of CoAP Server receiving\nrequest.content_type = \"application/json\" # Content Type\nrequest.mid = \"0x1AB2FE\" # Request_ID - limited to 16-bits using CoAP\nrequest.timestamp = \"Wed, 22 May 2019 16:12:23 UTC\", # Created - message was created by Orchestrator\n```\n\nIn addition to the above OpenC2 required headers, our O.I.F. Implementation needs two CoAP Options added to work properly.\n\n* `profile` - Given option number 8 (which is supposed relate to another field which is unused, but uses the same data type) and it contains the actuator profile name needed to route the OpenC2 Command to the proper actuator on the OIF-Device side.\n\n* `source_socket` - Given option number 3 (which is supposed relate to another field which is unused, but uses the same data type) and it contains the IP/Port of the Orchestrator which sent the command. This value is included here as well as in request.source because of the docker implementation. The request.source value is overwritten by the library with a value which is the location of the Docker Network and not the actual machine. This will not allow a proper OpenC2 Response once the actuator has created and sent one. This option only needs to be set if run using Docker.\n\n#### Port Info\n\nThe default port for the CoAP Transport on the Orchestrator side is 5683, the default for registering the demo-device is 5682." }, { "alpha_fraction": 0.6902356743812561, "alphanum_fraction": 0.6902356743812561, "avg_line_length": 21.846153259277344, "blob_id": "609a4f3a73382160444ad865628ef4f011b8c524", "content_id": "1e545636a8d299b6ccca7b6b37f63a9ff18dabe0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "permissive", "max_line_length": 56, "num_lines": 13, "path": "/orchestrator/core/orc_server/tracking/urls/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\n\nfrom .api import urlpatterns as api_patterns\nfrom .gui import urlpatterns as gui_patterns\n\n\nurlpatterns = [\n # API Patterns\n path('api/', include(api_patterns), name='log.api'),\n\n # GUI Patterns\n path('', include(gui_patterns), name='log.gui')\n]\n" }, { "alpha_fraction": 0.49173155426979065, "alphanum_fraction": 0.5003584027290344, "avg_line_length": 29.85888671875, "blob_id": "cde51bd34b293fdea16625b839869b2f79f1e52a", "content_id": "e8c6054a21e633002e0b50dda3a3da80f3ebd2d2", "detected_licenses": [ "MIT", "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 40455, "license_type": "permissive", "max_line_length": 141, "num_lines": 1311, "path": "/orchestrator/gui/client/src/components/utils/jadn-editor/tokenize.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import { format } from 'react-json-editor-ajrm/locale';\nimport defaultLocale from 'react-json-editor-ajrm/locale/en';\n\nimport {\n deleteCharAt,\n followedBySymbol,\n followsSymbol,\n newSpan,\n typeFollowed\n} from './utils'\n\n\n// DOM Node || OnBlue or Update\n// Helper Functions\nconst finalPush = (buffer, prefix='') => {\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n buffer[buffer.active] = '';\n buffer.active = false;\n }\n}\n\nconst pushAndStore = (buffer, char, type, prefix='') => {\n switch(type) {\n case 'symbol':\n case 'delimiter':\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n }\n buffer[buffer.active] = '';\n buffer.active = type;\n buffer[buffer.active] = char;\n break;\n default:\n if (type !== buffer.active || ([buffer.string, char].indexOf('\\n') > -1)) {\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n }\n buffer[buffer.active] = '';\n buffer.active = type;\n buffer[buffer.active] = char;\n } else{\n buffer[type] += char;\n }\n break;\n }\n}\n\nconst quarkize = (text, prefix='') => {\n let buffer = {\n active: false,\n string: '',\n number: '',\n symbol: '',\n space: '',\n delimiter: '',\n quarks: []\n };\n\n text.split('').forEach((char, i) => {\n switch(char) {\n case '\"':\n case \"'\":\n pushAndStore(buffer, char, 'delimiter', prefix);\n break;\n case ' ':\n case '\\u00A0':\n pushAndStore(buffer, char, 'space', prefix);\n break;\n case '{':\n case '}':\n case '[':\n case ']':\n case ':':\n case ',':\n pushAndStore(buffer, char, 'symbol', prefix);\n break;\n case '0':\n case '1':\n case '2':\n case '3':\n case '4':\n case '5':\n case '6':\n case '7':\n case '8':\n case '9':\n pushAndStore(buffer, char, buffer.active === 'string' ? 'string' : 'number', prefix);\n break;\n case '-':\n if (i < text.length - 1 && '0123456789'.indexOf(text.charAt(i + 1)) > -1) {\n pushAndStore(buffer, char, 'number', prefix);\n break;\n }\n case '.':\n if (i < text.length - 1 && i > 0 && '0123456789'.indexOf(text.charAt(i + 1)) > -1 && '0123456789'.indexOf(text.charAt(i - 1)) > -1) {\n pushAndStore(buffer, char, 'number', prefix);\n break;\n }\n default:\n pushAndStore(buffer, char, 'string', prefix);\n break;\n }\n })\n\n finalPush(buffer, prefix);\n return buffer.quarks;\n}\n\nconst validToken = (string, type) => {\n const quotes = '\\'\"';\n let firstChar = '',\n lastChar = '',\n quoteType = false;\n\t\n switch(type) {\n case 'primitive':\n if (['true','false','null','undefined'].indexOf(string) === -1) return false;\n break;\n case 'string':\n if (string.length < 2) return false;\n firstChar = string.charAt(0);\n lastChar = string.charAt(string.length-1);\n quoteType = quotes.indexOf(firstChar);\n if (quoteType === -1 || firstChar !== lastChar) return false;\n for (var i = 0; i < string.length; i++) {\n if (i > 0 && i < string.length - 1 && string.charAt(i) === quotes[quoteType] && string.charAt(i - 1) !== '\\\\') return false;\n }\n break;\n case 'key':\n if (string.length === 0) return false;\n firstChar = string.charAt(0);\n lastChar = string.charAt(string.length-1);\n quoteType = quotes.indexOf(firstChar);\n if (quoteType > -1) {\n if (string.length===1 || firstChar !== lastChar) return false;\n for (var i = 0; i < string.length; i++) {\n if (i > 0 && i < string.length - 1 && string.charAt(i)===quotes[quoteType] && string.charAt(i - 1) !== '\\\\') return false;\n }\n } else {\n const nonAlphanumeric = '\\'\"`.,:;{}[]&<>=~*%\\\\|/-+!?@^ \\xa0';\n for (var i = 0; i < nonAlphanumeric.length; i++) {\n const nonAlpha = nonAlphanumeric.charAt(i);\n if (string.indexOf(nonAlpha) > -1) return false;\n }\n }\n break;\n case 'number':\n for (var i = 0; i < string.length ; i++) {\n if ('0123456789'.indexOf(string.charAt(i)) === -1 && i === 0) {\n if ('-' !== string.charAt(0)) return false;\n } else if ('.' !== string.charAt(i)) return false;\n }\n break;\n case 'symbol':\n if (string.length > 1 || '{[:]},'.indexOf(string) === -1) return false;\n break;\n case 'colon':\n if (string.length > 1 || ':' !== string) return false;\n break;\n default:\n return true;\n break;\n }\n return true;\n}\n\nconst tokenFollowed = (buffer) => {\n const last = buffer.tokens_normalize.length - 1;\n if (last < 1) return false;\n\n for (var i = last; i >= 0; i--) {\n const previousToken = buffer.tokens_normalize[i];\n switch(previousToken.type) {\n case 'space':\n case 'linebreak':\n break;\n default:\n return previousToken;\n break;\n }\n }\n return false;\n}\n\n// Main Function\nexport const DomNode_Update = (obj, locale=defaultLocale, colors) => {\n const containerNode = obj.cloneNode(true),\n hasChildren = containerNode.hasChildNodes();\n\n if (!hasChildren) return '';\n const children = containerNode.childNodes;\n\n let buffer = {\n tokens_unknown: [],\n tokens_proto: [],\n tokens_split: [],\n tokens_fallback: [],\n tokens_normalize: [],\n tokens_merge: [],\n tokens_plainText: '',\n indented: '',\n json: '',\n jsObject: undefined,\n markup: ''\n }\n\n children.forEach((child, i) => {\n switch(child.nodeName) {\n case 'SPAN':\n buffer.tokens_unknown.push({\n string: child.textContent,\n type: child.attributes.type.textContent\n });\n break;\n case 'DIV':\n buffer.tokens_unknown.push({ string : child.textContent, type : 'unknown' });\n break;\n case 'BR' :\n if (child.textContent==='') {\n buffer.tokens_unknown.push({ string : '\\n', type : 'unknown' });\n }\n break;\n case '#text':\n buffer.tokens_unknown.push({ string : child.wholeText, type : 'unknown' });\n break;\n case 'FONT':\n buffer.tokens_unknown.push({ string : child.textContent, type : 'unknown' });\n break;\n default:\n console.error('Unrecognized node:', { child })\n break;\n }\n })\n\n buffer.tokens_proto = buffer.tokens_unknown.map(token => quarkize(token.string, 'proto')).reduce((all, quarks) => all.concat(quarks))\n\n buffer.tokens_proto.forEach(token => {\n if (token.type.indexOf('proto') === -1) {\n if (!validToken(token.string,token.type)) {\n buffer.tokens_split = buffer.tokens_split.concat(quarkize(token.string, 'split'));\n } else {\n buffer.tokens_split.push(token);\n }\n } else {\n buffer.tokens_split.push(token);\n }\n })\n\n buffer.tokens_fallback = buffer.tokens_split.map(token => {\n let type = token.type,\n fallback = [];\n\n if (type.indexOf('-') > -1) {\n type = type.slice(type.indexOf('-') + 1);\n if (type !== 'string') {\n fallback.push('string')\n }\n fallback.push('key');\n fallback.push('error');\n }\n\n return {\n string: token.string,\n length: token.string.length,\n type: type,\n fallback: fallback\n }\n })\n\n let buffer2 = {\n brackets: [],\n isValue: false,\n stringOpen: false\n };\n\n buffer.tokens_normalize = buffer.tokens_fallback.map((token, i) => {\n let normalToken = {\n type: token.type,\n string: token.string\n };\n\n switch(normalToken.type) {\n case 'symbol':\n case 'colon':\n if (buffer2.stringOpen) {\n normalToken.type = buffer2.isValue ? 'string' : 'key'\n break;\n }\n switch(normalToken.string) {\n case '[':\n case '{':\n buffer2.brackets.push(normalToken.string);\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ']':\n case '}':\n buffer2.brackets.pop();\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ',':\n if (tokenFollowed(buffer).type === 'colon') break;\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n case ':':\n normalToken.type = 'colon';\n buffer2.isValue = true;\n break;\n }\n break;\n case 'delimiter':\n normalToken.type = buffer2.isValue ? 'string' : 'key'\n if (!buffer2.stringOpen) {\n buffer2.stringOpen = normalToken.string;\n break;\n }\n if (i > 0) {\n const previousToken = buffer.tokens_fallback[i - 1],\n _string = previousToken.string,\n _type = previousToken.type,\n _char = _string.charAt(_string.length - 1);\n if (_type === 'string' && _char === '\\\\') break;\n }\n if (buffer2.stringOpen === normalToken.string) {\n buffer2.stringOpen = false;\n break;\n }\n break;\n case 'primitive':\n case 'string':\n if (['false','true','null','undefined'].indexOf(normalToken.string) > -1) {\n const lastIndex = buffer.tokens_normalize.length - 1;\n if (lastIndex >= 0) {\n if (buffer.tokens_normalize[lastIndex].type !== 'string') {\n normalToken.type = 'primitive';\n break;\n }\n normalToken.type = 'string';\n break;\n }\n normalToken.type = 'primitive';\n break;\n }\n if (normalToken.string === '\\n') {\n if (!buffer2.stringOpen) {\n normalToken.type = 'linebreak';\n break;\n }\n }\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n break;\n case 'space':\n if (buffer2.stringOpen) {\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n }\n break;\n case 'number':\n if (buffer2.stringOpen) {\n normalToken.type = buffer2.isValue ? 'string' : 'key';\n }\n break;\n default:\n break;\n }\n return normalToken\n })\n\n for (var i = 0; i < buffer.tokens_normalize.length; i++) {\n const token = buffer.tokens_normalize[i];\n let mergedToken = {\n string: token.string,\n type: token.type,\n tokens: [i]\n };\n\n if (['symbol', 'colon'].indexOf(token.type) === -1 && i + 1 < buffer.tokens_normalize.length) {\n let count = 0;\n for (var u = i + 1; u < buffer.tokens_normalize.length; u++) {\n const nextToken = buffer.tokens_normalize[u];\n if (token.type !== nextToken.type) break;\n mergedToken.string += nextToken.string;\n mergedToken.tokens.push(u);\n count++;\n }\n i += count;\n }\n buffer.tokens_merge.push(mergedToken);\n }\n\n const quotes = '\\'\"',\n alphanumeric = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$';\n\t\n var error = false,\n line = buffer.tokens_merge.length > 0 ? 1 : 0;\n\t\n const setError = (tokenID, reason, offset=0) => {\n error = {\n token: tokenID,\n line: line,\n reason: reason\n };\n buffer.tokens_merge[tokenID + offset].type = 'error';\n }\n\n let bracketList = [];\n\t\n // Break apart??\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n if (error) break;\n let token = buffer.tokens_merge[i],\n string = token.string,\n type = token.type,\n found = false;\n\n switch(type) {\n case 'space':\n break;\n case 'linebreak':\n line++;\n break;\n case 'symbol':\n switch(string) {\n case '{':\n case '[':\n found = followsSymbol(buffer.tokens_merge, i, ['}', ']']);\n if (found) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: buffer.tokens_merge[found].string,\n secondToken: string\n }));\n break;\n } else if (string === '[' && i > 0) {\n if (!followsSymbol(buffer.tokens_merge, i ,[':', '[', ','])){\n setError(i, format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: \"[\",\n secondToken: [\":\", \"[\", \",\"]\n }));\n break;\n }\n } else if (string === '{') {\n if (followsSymbol(buffer.tokens_merge, i, ['{'])) {\n setError(i, format(locale.invalidToken.double, {\n token: \"{\"\n }));\n break;\n }\n }\n buffer2.brackets.push(string);\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n bracketList.push({\n i: i,\n line: line,\n string: string\n });\n break;\n case '}':\n case ']':\n if (string === '}') {\n if (buffer2.brackets[buffer2.brackets.length-1] !== '{') {\n setError(i, format(locale.brace.curly.missingOpen));\n break;\n } else if (followsSymbol(buffer.tokens_merge, i, [','])) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \",\",\n secondToken: \"}\"\n }));\n break;\n }\n } else if (string === ']') {\n if (buffer2.brackets[buffer2.brackets.length-1] !== '[') {\n setError(i, format(locale.brace.square.missingOpen));\n break;\n } else if (followsSymbol(buffer.tokens_merge, i, [':'])){\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \":\",\n secondToken: \"]\"\n }));\n break;\n }\n }\n buffer2.brackets.pop();\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n bracketList.push({\n i: i,\n line: line,\n string: string\n });\n break;\n case ',':\n found = followsSymbol(buffer.tokens_merge, i, ['{']);\n if (found) {\n if (followedBySymbol(buffer.tokens_merge, i, ['}'])) {\n setError(i, format(locale.brace.curly.cannotWrap, {\n token: \",\"\n }));\n break;\n }\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"{\",\n secondToken: \",\"\n }));\n break;\n } else if (followedBySymbol(buffer.tokens_merge, i, ['}', ',', ']'])) {\n setError(i, format(locale.noTrailingOrLeadingComma));\n break;\n }\n found = typeFollowed(buffer.tokens_merge, i);\n switch(found) {\n case 'key':\n case 'colon':\n setError(i, format(locale.invalidToken.termSequence.prohibited, {\n firstTerm: found === 'key' ? locale.types.key : locale.symbols.colon,\n secondTerm: locale.symbols.comma\n }));\n break;\n case 'symbol':\n if (followsSymbol(buffer.tokens_merge, i, ['{'])) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"{\",\n secondToken: \",\"\n }));\n break;\n }\n break;\n default:\n break;\n }\n buffer2.isValue = buffer2.brackets[buffer2.brackets.length - 1] === '[';\n break;\n default:\n break;\n }\n buffer.json += string;\n break;\n case 'colon':\n found = followsSymbol(buffer.tokens_merge, i, ['[']);\n if (found && followedBySymbol(buffer.tokens_merge, i, [']'])) {\n setError(i, format(locale.brace.square.cannotWrap, {\n token: \":\"\n }));\n break;\n }\n if (found) {\n setError(i, format(locale.invalidToken.tokenSequence.prohibited, {\n firstToken: \"[\",\n secondToken: \":\"\n }));\n break;\n }\n if (typeFollowed(buffer.tokens_merge, i) !== 'key') {\n setError(i, format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.symbols.colon,\n secondTerm: locale.types.key\n }));\n break;\n }\n if (followedBySymbol(buffer.tokens_merge, i, ['}', ']'])) {\n setError(i, format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.symbols.colon,\n secondTerm: locale.types.value\n }));\n break;\n }\n buffer2.isValue = true;\n buffer.json += string;\n break;\n case 'key':\n case 'string':\n let firstChar = string.charAt(0),\n lastChar = string.charAt(string.length - 1),\n quote_primary = quotes.indexOf(firstChar);\n\n if (quotes.indexOf(firstChar) === -1 && quotes.indexOf(lastChar) !== -1) {\n setError(i, format(locale.string.missingOpen, {\n quote: firstChar\n }));\n break;\n }\n if (quotes.indexOf(lastChar) === -1 && quotes.indexOf(firstChar) !== -1) {\n setError(i,format(locale.string.missingClose, {\n quote: firstChar,\n }));\n break;\n }\n if (quotes.indexOf(firstChar) > -1 && firstChar !== lastChar) {\n setError(i,format(locale.string.missingClose, {\n quote: firstChar,\n }));\n break;\n }\n if ('string' === type && quotes.indexOf(firstChar) === -1 && quotes.indexOf(lastChar) === -1) {\n setError(i,format(locale.string.mustBeWrappedByQuotes));\n break;\n }\n if ('key' === type && followedBySymbol(buffer.tokens_merge, i,['}',']'])) {\n setError(i,format(locale.invalidToken.termSequence.permitted, {\n firstTerm: locale.types.key,\n secondTerm: locale.symbols.colon\n }));\n }\n if (quotes.indexOf(firstChar)===-1 && quotes.indexOf(lastChar) === -1) {\n for (var h = 0; h < string.length; h++) {\n if (error) break;\n const c = string.charAt(h);\n if (alphanumeric.indexOf(c) === -1) {\n setError(i, format(locale.string.nonAlphanumeric, {\n token: c,\n }));\n break;\n }\n }\n }\n string = '\"' + (firstChar === \"'\" ? string.slice(1,-1) : string) + '\"';\n if ('key' === type) {\n if ('key' === typeFollowed(buffer.tokens_merge, i)) {\n if (i > 0 && !isNaN(buffer.tokens_merge[i-1])) {\n buffer.tokens_merge[i-1] += buffer.tokens_merge[i];\n setError(i,format(locale.key.numberAndLetterMissingQuotes));\n break;\n }\n setError(i,format(locale.key.spaceMissingQuotes));\n break;\n }\n if (!followsSymbol(buffer.tokens_merge, i,['{', ','])) {\n setError(i,format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"{\", \",\"]\n }));\n break;\n }\n if (buffer2.isValue) {\n setError(i, format(locale.string.unexpectedKey));\n break;\n }\n }\n if ('string' === type) {\n if (!followsSymbol(buffer.tokens_merge, i,['[', ':', ','])) {\n setError(i,format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"[\", \":\", \",\"]\n }));\n break;\n }\n if (!buffer2.isValue) {\n setError(i,format(locale.key.unexpectedString));\n break;\n }\n }\n buffer.json += string;\n break;\n case 'number':\n case 'primitive':\n if (followsSymbol(buffer.tokens_merge, i, ['{'])) {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n string = '\"' + string + '\"';\n } else if (typeFollowed(buffer.tokens_merge, i) === 'key') {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n } else if (!followsSymbol(buffer.tokens_merge, i, ['[', ':', ','])) {\n setError(i,format(locale.invalidToken.tokenSequence.permitted, {\n firstToken: type,\n secondToken: [\"[\", \":\", \",\"]\n }));\n break;\n }\n if (type !== 'key' && !buffer2.isValue) {\n buffer.tokens_merge[i].type = 'key';\n type = buffer.tokens_merge[i].type;\n string = '\"' + string + '\"';\n }\n if (type === 'primitive' && string === 'undefined') {\n setError(i,format(locale.invalidToken.useInstead, {\n badToken: \"undefined\",\n goodToken: \"null\"\n }));\n }\n buffer.json += string;\n break;\n }\n }\n\n let noEscapedSingleQuote = '';\n\n for (var i = 0; i < buffer.json.length; i++) {\n let current = buffer.json.charAt(i),\n next = buffer.json.charAt(i+1) || '';\n if (i + 1 < buffer.json.length) {\n if (current === '\\\\' && next === \"'\") {\n noEscapedSingleQuote += next;\n i++;\n continue;\n }\n }\n noEscapedSingleQuote += current;\n }\n\n buffer.json = noEscapedSingleQuote;\n\n if (!error) {\n const maxIterations = Math.ceil(bracketList.length / 2);\n let round = 0,\n delta = false;\n\n const removePair = (index) => {\n bracketList.splice(index + 1,1);\n bracketList.splice(index, 1);\n if (!delta) {\n delta = true;\n }\n }\n\n while (bracketList.length > 0) {\n delta = false;\n for (var tokenCount = 0; tokenCount < bracketList.length - 1; tokenCount++) {\n const pair = bracketList[tokenCount].string + bracketList[tokenCount+1].string;\n if (['[]', '{}'].indexOf(pair) > -1) {\n removePair(tokenCount);\n }\n }\n round++;\n if (!delta) {\n break;\n }\n if (round >= maxIterations) {\n break;\n }\n }\n\n if (bracketList.length > 0) {\n const _tokenString = bracketList[0].string,\n _tokenPosition = bracketList[0].i,\n _closingBracketType = _tokenString === '[' ? ']' : '}';\n line = bracketList[0].line;\n setError(_tokenPosition, format(locale.brace[_closingBracketType === ']' ? 'square' : 'curly'].missingClose));\n }\n\n if ([undefined, ''].indexOf(buffer.json) === -1) {\n try {\n buffer.jsObject = JSON.parse(buffer.json);\n } catch(err) {\n const errorMessage = err.message,\n subsMark = errorMessage.indexOf('position');\n if (subsMark === -1) {\n throw new Error('Error parsing failed');\n }\n const errPositionStr = errorMessage.substring(subsMark + 9, errorMessage.length),\n errPosition = parseInt(errPositionStr);\n let charTotal = 0,\n tokenIndex = 0,\n token = false,\n _line = 1,\n exitWhile = false;\n\n while (charTotal < errPosition && !exitWhile) {\n token = buffer.tokens_merge[tokenIndex];\n if ('linebreak'===token.type) {\n _line++;\n }\n if (['space','linebreak'].indexOf(token.type)===-1) {\n charTotal += token.string.length;\n }\n if (charTotal >= errPosition) {\n break;\n }\n tokenIndex++;\n if (!buffer.tokens_merge[tokenIndex+1]) {\n exitWhile = true;\n }\n }\n\n line = _line;\n let backslashCount = 0;\n for (let i = 0; i < token.string.length; i++) {\n const char = token.string.charAt(i);\n if (char==='\\\\') {\n backslashCount = backslashCount > 0 ? backslashCount + 1 : 1;\n } else {\n if (backslashCount % 2 !== 0 || backslashCount === 0) {\n if ('\\'\"bfnrt'.indexOf(char) === -1) {\n setError(tokenIndex,format(locale.invalidToken.unexpected, {\n token: '\\\\'\n }));\n }\n }\n backslashCount = 0;\n }\n }\n if (!error) {\n setError(tokenIndex,format(locale.invalidToken.unexpected, {\n token: token.string\n }));\n }\n }\n }\n }\n\n let _line = 1,\n _depth = 0;\n const lastIndex = buffer.tokens_merge.length - 1;\n\t\n const newIndent = () => Array(_depth * 2).fill('&nbsp;').join('');\n\t\n const newLineBreak = (byPass=false) => {\n _line++;\n return (_depth > 0 || byPass) ? '<br>' : '';\n }\n\t\n const newLineBreakAndIndent = (byPass=false) => newLineBreak(byPass) + newIndent();\n\t\n if (error) {\n let _line_fallback = 1;\n const countCarrigeReturn = (string) => {\n let count = 0;\n for (var i = 0; i < string.length; i++) {\n if (['\\n','\\r'].indexOf(string[i]) > -1) count++;\n }\n return count;\n }\n _line = 1;\n\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n const token = buffer.tokens_merge[i],\n type = token.type,\n string = token.string;\n if (type === 'linebreak') {\n _line++;\n }\n buffer.markup += newSpan(i, token, _depth, colors);\n _line_fallback += countCarrigeReturn(string);\n }\n\n _line++;\n _line_fallback++;\n if (_line < _line_fallback) {\n _line = _line_fallback;\n }\n const isFunction = (functionToCheck) => functionToCheck && {}.toString.call(functionToCheck) === '[object Function]';\n\n /*\n if ('modifyErrorText' in this.props && isFunction(this.props.modifyErrorText)) {\n error.reason = this.props.modifyErrorText(error.reason);\n }\n */\n } else {\n // FORMAT BY TOKEN!!\n // TODO: Simplify this....\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n const token = buffer.tokens_merge[i];\n switch(token.type) {\n case 'string':\n case 'number':\n case 'primitive':\n case 'error':\n // buffer.markup += followsSymbol(buffer.tokens_merge, i, [',', '[']) ? newLineBreakAndIndent() : '';\n buffer.markup += newSpan(i, token, _depth, colors);\n break;\n case 'key':\n buffer.markup += (newLineBreakAndIndent() + newSpan(i, token, _depth, colors));\n break;\n case 'colon':\n buffer.markup += (newSpan(i, token, _depth, colors) + '&nbsp;');\n break;\n case 'symbol':\n const islastToken = i === lastIndex;\n switch(token.string) {\n case '{':\n buffer.markup += newSpan(i, token, _depth, colors);\n _depth++;\n break;\n case '}':\n _depth = _depth > 0 ? _depth - 1 : _depth;\n const _adjustment = i > 0 ? followsSymbol(buffer.tokens_merge, i, ['[', '{']) ? '' : newLineBreakAndIndent(islastToken) : '';\n buffer.markup += (_adjustment + newSpan(i, token, _depth, colors));\n break;\n case '[':\n if (followsSymbol(buffer.tokens_merge, i, ['['])) {\n _depth++;\n buffer.markup += newLineBreakAndIndent();\n }\n buffer.markup += newSpan(i, token, _depth, colors);\n break;\n case ']':\n let tmp_token = { ...token },\n ind_bool = false;\n if (followsSymbol(buffer.tokens_merge, i, [']'])) {\n if (followedBySymbol(buffer.tokens_merge, i, [']'])) {\n if (followedBySymbol(buffer.tokens_merge, i+1, [','])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n i++;\n } else if (followedBySymbol(buffer.tokens_merge, i+1, [']'])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n }\n } else if (followedBySymbol(buffer.tokens_merge, i, ['}'])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n }\n }\n buffer.markup += ((ind_bool ? newLineBreakAndIndent() : '') + newSpan(i, tmp_token, _depth, colors));\n break;\n case ',':\n buffer.markup += newSpan(i, token, _depth, colors);\n if (followsSymbol(buffer.tokens_merge, i, [']']) && followedBySymbol(buffer.tokens_merge, i, ['['])) {\n buffer.markup += newLineBreakAndIndent();\n }\n break;\n default:\n buffer.markup += newSpan(i, token, _depth, colors);\n break;\n }\n break;\n }\n }\n }\n\n for (var i = 0; i < buffer.tokens_merge.length; i++) {\n let token = buffer.tokens_merge[i];\n buffer.indented += token.string;\n if (['space', 'linebreak'].indexOf(token.type) === -1) {\n buffer.tokens_plainText += token.string;\n }\n }\n\t\n return {\n tokens: buffer.tokens_merge,\n noSpaces: buffer.tokens_plainText,\n indented: buffer.indented,\n json: buffer.json,\n jsObject: buffer.jsObject,\n markup: buffer.markup,\n lines: _line,\n error: error\n };\n}\n\n\n// JS OBJECTS || PLACEHOLDER\n// Helper Functions\nconst stringHasQuotes = (str) => str.match(/^[\\'\\\"].*[\\'\\\"]$/) ? true : false;\n\nconst stringMayRemoveQuotes = (nonAlphaNumeric, text) => {\n let numberAndLetter = false;\n\t\n for (var i = 0; i < text.length; i++) {\n if (i === 0) if (isNaN(text.charAt(i))) break;\n if (isNaN(text.charAt(i))) {\n numberAndLetter = true;\n break;\n }\n }\n return !(nonAlphaNumeric.length > 0 || numberAndLetter);\n}\n\nconst stripQuotesFromKey = (text) => {\n if (text.length === 0) return text;\n if (['\"\"', \"''\"].indexOf(text) > -1) return \"''\";\n let wrappedInQuotes = false;\n\n if (text.match(/^[\\\"\\'].*[\\\"\\']$/)) {\n wrappedInQuotes = true;\n }\n\n if (wrappedInQuotes && text.length >= 2) text = text.slice(1, -1);\n const nonAlphaNumeric = text.replace(/\\w/g,''),\n alphaNumeric = text.replace(/\\W+/g,''),\n mayRemoveQuotes = stringMayRemoveQuotes(nonAlphaNumeric, text),\n hasQuotes = stringHasQuotes(nonAlphaNumeric);\n\n if (hasQuotes) {\n let newText = '';\n const charList = text.split('');\n for (var ii = 0; ii < charList.length; ii++) {\n let char = charList[ii];\n if ([\"'\",'\"'].indexOf(char)>-1) char = '\\\\' + char;\n newText += char;\n }\n text = newText;\n }\n return mayRemoveQuotes ? text : \"'\" + text + \"'\";\n}\n\n// Cleanup??\nconst add_tokenSecondary = (buffer) => {\n if (buffer.tokenSecondary.length === 0) return false;\n buffer.tokens.push(buffer.tokenSecondary);\n buffer.tokenSecondary = '';\n return true;\n}\n\n// Cleanup??\nconst add_tokenPrimary = (buffer, value) => {\n if (value.length === 0) return false;\n buffer.tokens.push(value);\n return true;\n}\n\n// Cleanup??\nconst escape_character = (buffer) => {\n if (buffer.currentChar !== '\\\\') return false;\n buffer.inputText = deleteCharAt(buffer.inputText, buffer.position);\n return true;\n}\n\n// Cleanup??\nconst determine_string = (buffer) => {\n if ('\\'\"'.indexOf(buffer.currentChar) === -1) return false;\n if (!buffer.stringOpen) {\n add_tokenSecondary(buffer);\n buffer.stringStart = buffer.position;\n buffer.stringOpen = buffer.currentChar;\n return true;\n }\n\n if (buffer.stringOpen === buffer.currentChar) {\n add_tokenSecondary(buffer);\n const stringToken = buffer.inputText.substring(buffer.stringStart, buffer.position + 1);\n add_tokenPrimary(buffer, stringToken);\n buffer.stringOpen = false;\n return true;\n }\n return false;\n}\n\n// Cleanup??\nconst determine_value = (buffer) => {\n if (':,{}[]'.indexOf(buffer.currentChar) === -1 || buffer.stringOpen) return false;\n add_tokenSecondary(buffer);\n add_tokenPrimary(buffer, buffer.currentChar);\n\t\n switch (buffer.currentChar) {\n case ':':\n buffer.isValue = true;\n return true;\n break;\n case '{':\n case '[':\n buffer.brackets.push(buffer.currentChar);\n break;\n case '}':\n case ']':\n buffer.brackets.pop();\n break;\n }\n\t\n if (buffer.currentChar !== ':') {\n buffer.isValue = (buffer.brackets[buffer.brackets.length - 1] === '[');\n }\n return true;\n}\n\n// Main Function\nexport const JSON_Placeholder = (obj, colors) => {\n let buffer = {\n inputText: JSON.stringify(obj),\n position: 0,\n currentChar: '',\n tokenPrimary: '',\n tokenSecondary: '',\n brackets: [],\n isValue: false,\n stringOpen: false,\n stringStart: 0,\n tokens: []\n };\n\n buffer.inputText.split('').forEach((char, i) => {\n buffer.position = i;\n buffer.currentChar = char;\n if (!determine_value(buffer) && !determine_string(buffer) && !escape_character(buffer)) {\n if (!buffer.stringOpen) {\n buffer.tokenSecondary += buffer.currentChar;\n }\n }\n })\n\t\n let buffer2 = {\n brackets: [],\n isValue: false,\n tokens: []\n };\n\n buffer2.tokens = buffer.tokens.map(token => {\n switch(token){\n case ',':\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length - 1] === '[');\n return {\n type: 'symbol',\n string: token,\n value: token,\n depth: buffer2.brackets.length\n }\n case ':':\n buffer2.isValue = true;\n return {\n type: 'symbol',\n string: token,\n value: token,\n depth: buffer2.brackets.length\n }\n case '{':\n case '[' :\n buffer2.brackets.push(token);\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length - 1] === '[');\n return {\n type: 'symbol',\n string: token,\n value: token,\n depth: buffer2.brackets.length\n }\n case '}':\n case ']':\n buffer2.brackets.pop();\n buffer2.isValue = (buffer2.brackets[buffer2.brackets.length - 1] === '[');\n return {\n type: 'symbol',\n string: token,\n value: token,\n depth: buffer2.brackets.length\n }\n case 'undefined':\n return {\n type: 'primitive',\n string: token,\n value: undefined,\n depth: buffer2.brackets.length\n }\n case 'null':\n return {\n type: 'primitive',\n string: token,\n value: null,\n depth: buffer2.brackets.length\n }\n case 'false':\n return {\n type: 'primitive',\n string: token,\n value: false,\n depth: buffer2.brackets.length\n }\n case 'true':\n return {\n type: 'primitive',\n string: token,\n value: true,\n depth: buffer2.brackets.length\n }\n default:\n const C = token.charAt(0);\n let rtn = {\n type: '',\n string: '',\n value: '',\n depth: buffer2.brackets.length\n }\n if ('\\'\"'.indexOf(C) > -1) {\n rtn.type = buffer2.isValue ? 'string' : 'key';\n if (rtn.type === 'key') rtn.string = stripQuotesFromKey(token);\n if (rtn.type === 'string') {\n rtn.string = '';\n const charList2 = token.slice(1, -1).split('');\n for (var ii = 0; ii < charList2.length; ii++) {\n let char = charList2[ii];\n if ('\\'\\\"'.indexOf(char) > -1) char = '\\\\' + char;\n rtn.string += char;\n }\n rtn.string = \"'\" + rtn.string + \"'\";\n }\n rtn.value = rtn.string;\n return rtn\n }\n if (!isNaN(token)) {\n rtn.type = 'number';\n rtn.string = token;\n rtn.value = Number(token);\n return rtn\n }\n if (token.length > 0 && !buffer2.isValue) {\n rtn.type = 'key';\n rtn.string = token;\n if (rtn.string.indexOf(' ') > -1) rtn.string = \"'\" + rtn.string + \"'\";\n rtn.value = string;\n return rtn\n }\n }\n return {\n type: '',\n string: '',\n value: '',\n depth: buffer2.brackets.length\n }\n });\n\n let clean = buffer2.tokens.map(t => t.string).join('');\n\n let _line = 1,\n _depth = 0,\n indentation = '',\n markup = '';\n const lastIndex = buffer2.tokens.length - 1;\n const indent = (byPass=false) => ((_depth > 0 || byPass) ? '\\n' : '') + Array(_depth * 2).fill(' ').join('');\n\n const indentII = (byPass=false) => {\n if (_depth > 0) _line++;\n return ((_depth > 0 || byPass) ? '<br>' : '') + Array(_depth * 2).fill('&nbsp;').join('');\n };\n\t\n // FORMAT BY TOKEN!!\n buffer2.tokens.forEach((token, i) => {\n switch(token.type) {\n case 'string':\n case 'number':\n indentation += token.string;\n markup += newSpan(i, token, _depth, colors);\n break;\n case 'key':\n indentation += indent() + token.string;\n markup += indentII() + newSpan(i, token, _depth, colors);\n break;\n case 'symbol':\n const islastToken = i === lastIndex\n switch(token.string) {\n case '{':\n indentation += token.string\n markup += newSpan(i, token, _depth, colors)\n _depth++;\n if (followedBySymbol(buffer2.tokens, i, ['}'])) {\n indentation += indent();\n markup += indentII();\n }\n break;\n case '}':\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n const _adjustment = i > 0 ? followsSymbol(buffer2.tokens, i, ['[', '{']) ? '' : indent(islastToken) : '',\n _adjustmentII = i > 0 ? followsSymbol(buffer2.tokens, i, ['[', '{']) ? '' : indentII(islastToken) : '';\n indentation += (_adjustment + token.string);\n markup += (_adjustmentII + newSpan(i, token, _depth, colors));\n break;\n case '[':\n if (followsSymbol(buffer2.tokens, i, ['['])) {\n _depth++;\n indentation += indent();\n markup += indentII();\n }\n indentation += token.string;\n markup += newSpan(i, token, _depth, colors);\n break;\n case ']':\n let tmp_token = { ...token },\n ind_bool = false;\n if (followsSymbol(buffer2.tokens, i, [']'])) {\n if (followedBySymbol(buffer2.tokens, i, [']'])) {\n if (followedBySymbol(buffer2.tokens, i+1, [','])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n i++;\n } else if (followedBySymbol(buffer2.tokens, i+1, [']'])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n }\n } else if (followedBySymbol(buffer2.tokens, i, ['}'])) {\n _depth = _depth >= 1 ? _depth - 1 : _depth;\n ind_bool = true;\n }\n }\n indentation += ((ind_bool ? indent() : '') + tmp_token.string);\n markup += ((ind_bool ? indentII() : '') + newSpan(i, tmp_token, _depth, colors));\n break;\n case ':':\n indentation += token.string + ' ';\n markup += newSpan(i, token, _depth, colors) + '&nbsp;';\n break;\n case ',':\n indentation += token.string;\n markup += newSpan(i, token, _depth, colors);\n if (followsSymbol(buffer2.tokens, i, [']']) && followedBySymbol(buffer2.tokens, i, ['['])) {\n indentation += indent();\n markup += indentII();\n }\n break;\n default:\n indentation += token.string;\n markup += newSpan(i, token, _depth, colors);\n break;\n }\n break;\n }\n })\n _line += 1;\n\n return {\n tokens: buffer2.tokens,\n noSpaces: clean,\n indented: indentation,\n json: JSON.stringify(obj),\n jsObject: obj,\n markup: markup,\n lines: _line\n };\n}" }, { "alpha_fraction": 0.7218044996261597, "alphanum_fraction": 0.7218044996261597, "avg_line_length": 18, "blob_id": "d4372c66d5eda19146d6c5a4b039087ee909d2fe", "content_id": "de93f75d5295b3d33014a13e3f477d6102916f94", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "permissive", "max_line_length": 55, "num_lines": 14, "path": "/orchestrator/core/orc_server/device/urls.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\n\nfrom rest_framework import routers\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register('', views.DeviceViewSet)\n\n\nurlpatterns = [\n # Device Router\n path('', include(router.urls), name='device.root'),\n]\n" }, { "alpha_fraction": 0.6797243356704712, "alphanum_fraction": 0.6837142109870911, "avg_line_length": 45.72881317138672, "blob_id": "b13a3e43ac991fbb999c488d08dff1d2a9a8b583", "content_id": "e1be6896927fb93b9a3d5f8b5d3a3bc95f9a97b4", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2757, "license_type": "permissive", "max_line_length": 147, "num_lines": 59, "path": "/orchestrator/gui/client/src/components/static/home.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\n\nimport OpenC2_Logo from '../dependencies/img/openc2-logo.png'\n\nconst str_fmt = require('string-format')\n\nclass Home extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Home'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n }\n\n render() {\n return (\n <div className=\"row mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <div className=\"col-12\">\n <img src={ OpenC2_Logo } alt=\"OpenC2 Logo\" className=\"float-left col-md-4 col-xs-12 mr-3 mb-3\" />\n\n <p>Spicy jalapeno bacon ipsum dolor amet dolore aliquip sirloin swine quis veniam magna in ipsum voluptate reprehenderit elit velit sunt.\n Landjaeger laboris buffalo excepteur bacon commodo fugiat.\n Pastrami landjaeger rump, id dolore corned beef flank ad beef officia velit meatball ex.\n Qui ipsum cupim, dolore adipisicing salami est in ham hock consectetur hamburger enim pork belly.\n Incididunt quis shankle magna, minim occaecat ham officia consectetur landjaeger burgdoggen leberkas pastrami.</p>\n\n <p>Hamburger pork chop nostrud ea minim dolore, venison flank exercitation sausage pork sirloin.\n Kielbasa frankfurter consequat cupidatat shoulder short loin non eu.\n Doner pig in hamburger, consequat eu veniam prosciutto.\n Pork sint tail biltong tenderloin do nulla in swine tempor strip steak adipisicing incididunt.\n Pastrami in anim ham officia ut excepteur dolor cupim ground round veniam biltong meatball.\n Enim frankfurter swine meatloaf spare ribs capicola.\n Do fatback chicken rump, est id pork chop leberkas shankle shank eu esse.</p>\n\n <p>Reprehenderit landjaeger kevin ut pork loin.\n Leberkas sirloin deserunt voluptate, veniam andouille quis tenderloin non ground round nisi.\n Cupidatat culpa sausage nisi filet mignon, tempor aliquip sed bresaola qui chicken do in veniam.\n Dolor in tri-tip buffalo ham fugiat, mollit pariatur.\n Nisi ut leberkas labore, shoulder shank cow turducken nostrud non et velit ad veniam.\n Fatback cupidatat pancetta est laborum chuck quis flank ipsum ribeye.</p>\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title\n})\n\nexport default connect(mapStateToProps)(Home)\n" }, { "alpha_fraction": 0.6542255282402039, "alphanum_fraction": 0.6632897853851318, "avg_line_length": 29.25, "blob_id": "a848caf1cf3c55cb5533b0948e4a2ab447d032e9", "content_id": "957eaeec438f2f1313c859e51e095f9d28e2a3ac", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3751, "license_type": "permissive", "max_line_length": 121, "num_lines": 124, "path": "/orchestrator/core/orc_server/orchestrator/preferences_registry.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import ipaddress\nimport os\nimport re\nimport uuid\n\nfrom django.forms import ValidationError\nfrom dynamic_preferences.admin import GlobalPreferenceAdmin, PerInstancePreferenceAdmin\nfrom dynamic_preferences.types import StringPreference\nfrom dynamic_preferences.preferences import Section\nfrom dynamic_preferences.registries import global_preferences_registry as global_registry\n\nGlobalPreferenceAdmin.has_add_permission = lambda *args, **kwargs: False\nGlobalPreferenceAdmin.has_delete_permission = lambda *args, **kwargs: False\nPerInstancePreferenceAdmin.has_add_permission = lambda *args, **kwargs: False\nPerInstancePreferenceAdmin.has_delete_permission = lambda *args, **kwargs: False\n\norchestrator = Section(\"orchestrator\")\n\n\n# Validation Functions\ndef is_valid_hostname(hostname):\n \"\"\"\n Validate a hostname\n :param hostname: hostname to validate\n :return: bool - valid hostname\n \"\"\"\n if len(hostname) > 255:\n return False\n if hostname[-1] == \".\":\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n allowed = re.compile(r\"(?!-)[A-Z\\d-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(x) for x in hostname.split(\".\"))\n\n\ndef is_valid_ipv4_address(address):\n \"\"\"\n Validate an IPv4 Address\n :param address: IP Address to validate\n :return: bool - valid address\n \"\"\"\n try:\n ipaddress.IPv4Address(address)\n except ValueError: # not a valid address\n return False\n return True\n\n\ndef is_valid_ipv6_address(address):\n \"\"\"\n Validate an IPv6 Address\n :param address: IP Address to validate\n :return: bool - valid address\n \"\"\"\n try:\n ipaddress.IPv6Address(address)\n except ValueError: # not a valid address\n return False\n return True\n\n\n# Orchestrator section\n@global_registry.register\nclass OrchestratorName(StringPreference):\n \"\"\"\n Dynamic Preference for Orchestrator Name\n \"\"\"\n section = orchestrator\n name = \"name\"\n help_text = \"The name of the orchestrator\"\n default = \"Jazzy Ocelot\"\n\n\n@global_registry.register\nclass OrchestratorID(StringPreference):\n \"\"\"\n Dynamic Preference for Orchestrator ID\n \"\"\"\n section = orchestrator\n name = \"id\"\n help_text = \"The uuid of the orchestrator\"\n default = \"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize the ID of the orchestrator\n :param args: positional args\n :param kwargs: key/value args\n \"\"\"\n super(StringPreference, self).__init__(*args, **kwargs)\n if self.default in (\"\", \" \", None):\n self.default = str(uuid.uuid4())\n\n def validate(self, value):\n \"\"\"\n Validate the ID when updated\n :param value: new value to validate\n :return: None/exception\n \"\"\"\n try:\n uuid.UUID(value, version=4)\n except Exception as e:\n raise ValidationError(str(e))\n\n\n@global_registry.register\nclass OrchestratorHost(StringPreference):\n \"\"\"\n Dynamic Preference for Orchestrator Hostname/IP\n \"\"\"\n section = orchestrator\n name = \"host\"\n help_text = \"The hostname/ip of the orchestrator\"\n _default = os.environ.get(\"ORC_IP\", \"127.0.0.1\")\n _val_host_addr = any([is_valid_hostname(_default), is_valid_ipv4_address(_default), is_valid_ipv6_address(_default)])\n default = _default if _val_host_addr else \"127.0.0.1\"\n\n def validate(self, value):\n \"\"\"\n Validate the Hostname/IP when updated\n :param value: new value to validate\n :return: None/exception\n \"\"\"\n if not any([is_valid_hostname(value), is_valid_ipv4_address(value), is_valid_ipv6_address(value)]):\n raise ValidationError(\"The host is not a valid Hostname/IPv4/IPv6\")\n" }, { "alpha_fraction": 0.579160749912262, "alphanum_fraction": 0.5834177732467651, "avg_line_length": 28.896968841552734, "blob_id": "ee81c851a36d6a8cf8f23b8d5b42e3b9cbb4eae0", "content_id": "33d41b2ab6519694c7e678d3aad6fb109c627d39", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4933, "license_type": "permissive", "max_line_length": 134, "num_lines": 165, "path": "/configure.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport atexit\nimport os\nimport re\nimport shutil\nimport sys\n\nfrom datetime import datetime\nfrom optparse import OptionParser\n\nfrom base.modules.script_utils import (\n # Functions\n build_image,\n check_docker,\n check_docker_compose,\n checkRequiredArguments,\n human_size,\n install_pkg,\n # Classes\n ConsoleStyle,\n FrozenDict\n)\n\nif sys.version_info < (3, 6):\n print(\"PythonVersionError: Minimum version of v3.6+ not found\")\n exit(1)\n\n# Option Parsing\nparser = OptionParser()\nparser.add_option(\"-f\", \"--log-file\", dest=\"log_file\", help=\"Write logs to LOG_FILE\")\nparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Verbose output of container/GUI build\")\n\n(options, args) = parser.parse_args()\ncheckRequiredArguments(options, parser)\n\nlog_file = None\ninit_now = datetime.now()\n\nif options.log_file:\n name, ext = os.path.splitext(options.log_file)\n ext = \".log\" if ext is \"\" else ext\n fn = f\"{name}-{init_now:%Y.%m.%d_%H.%M.%S}{ext}\"\n # log_file = open(fn, \"w+\")\n log_file = open(options.log_file, \"w+\")\n log_file.write(f\"Configure run at {init_now:%Y.%m.%d_%H:%M:%S}\\n\\n\")\n log_file.flush()\n atexit.register(log_file.close)\n\n\n# Script Vars\nItemCount = 1\n\nRootDir = os.path.dirname(os.path.realpath(__file__))\n\nCONFIG = FrozenDict(\n WorkDir=RootDir,\n Requirements=(\n (\"docker\", \"docker\"),\n (\"colorama\", \"colorama\"),\n (\"yaml\", \"pyyaml\")\n ),\n ImagePrefix=\"g2inc\",\n Logging=FrozenDict(\n Default=(\n (\"orchestrator\", \"-p orchestrator -f orchestrator-compose.yaml -f orchestrator-compose.log.yaml\"),\n ),\n Central=(\n (\"orchestrator\", \"-p orchestrator -f orchestrator-compose.yaml\"),\n )\n ),\n Composes=tuple(file for file in os.listdir(RootDir) if re.match(r\"^\\w*?-compose(\\.\\w*?)?\\.yaml$\", file))\n)\n\n\n# Utility Functions\ndef get_count():\n global ItemCount\n c = ItemCount\n ItemCount += 1\n return c\n\n\nif __name__ == \"__main__\":\n os.chdir(CONFIG.WorkDir)\n print(\"Installing Requirements\")\n\n for PKG in CONFIG.Requirements:\n install_pkg(PKG)\n\n Stylize = ConsoleStyle(options.verbose, log_file)\n import docker\n\n Stylize.h1(f\"[Step {get_count()}]: Check Docker Environment\")\n check_docker(Stylize)\n check_docker_compose(Stylize)\n system = docker.from_env()\n\n try:\n system.info()\n except Exception as e:\n Stylize.error(\"Docker connection failed, check that docker is running\")\n exit(1)\n\n # -------------------- Build Base Images -------------------- #\n Stylize.h1(f\"[Step {get_count()}]: Build Base Images ...\")\n\n Stylize.info(\"Building base alpine python3 image\")\n build_image(\n docker_sys=system,\n console=Stylize,\n path=\"./base\",\n dockerfile=\"./Dockerfile_alpine-python3\",\n tag=f\"{CONFIG.ImagePrefix}/oif-python\",\n rm=True\n )\n\n # -------------------- Build Compose Images -------------------- #\n Stylize.h1(f\"[Step {get_count()}]: Creating compose images ...\")\n from yaml import load\n\n try:\n from yaml import CLoader as Loader\n except ImportError:\n from yaml import Loader\n\n compose_images = []\n\n Stylize.h1(f\"Build images ...\")\n for compose in CONFIG.Composes:\n with open(f\"./{compose}\", \"r\") as orc_compose:\n for service, opts in load(orc_compose.read(), Loader=Loader)[\"services\"].items():\n if \"build\" in opts and opts[\"image\"] not in compose_images:\n compose_images.append(opts[\"image\"])\n Stylize.info(f\"Building {opts['image']} image\")\n build_image(\n docker_sys=system,\n console=Stylize,\n rm=True,\n path=opts[\"build\"][\"context\"],\n dockerfile=opts[\"build\"].get(\"dockerfile\", \"Dockerfile\"),\n tag=opts[\"image\"]\n )\n\n # -------------------- Cleanup Images -------------------- #\n Stylize.h1(f\"[Step {get_count()}]: Cleanup unused images ...\")\n try:\n rm_images = system.images.prune()\n Stylize.info(f\"Space reclaimed {human_size(rm_images.get('SpaceReclaimed', 0))}\")\n if rm_images[\"ImagesDeleted\"]:\n for image in rm_images[\"ImagesDeleted\"]:\n Stylize.verbose(\"info\", f\"Image deleted: {image.get('Deleted', 'IMAGE')}\")\n\n except docker.errors.APIError as e:\n Stylize.error(f\"Docker API error: {e}\")\n exit(1)\n except KeyboardInterrupt:\n Stylize.error(\"Keyboard Interrupt\")\n exit(1)\n\n Stylize.success(\"\\nConfiguration Complete\")\n for key, opts in CONFIG.Logging.items():\n Stylize.info(f\"{key} logging\")\n for opt in opts:\n Stylize.info(f\"-- Run 'docker-compose {opt[1]} up' to start the {opt[0]} compose\")\n" }, { "alpha_fraction": 0.5581395626068115, "alphanum_fraction": 0.5629099607467651, "avg_line_length": 33.224491119384766, "blob_id": "c4a9e2edc2e89639910c1f058f3836afa26e3dc7", "content_id": "f2dfcf56051ed7a64c07fc8e784332891ad9b22f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5031, "license_type": "permissive", "max_line_length": 120, "num_lines": 147, "path": "/orchestrator/gui/client/src/components/account/lib/change_password.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\nimport qs from 'query-string'\nimport { toast } from 'react-toastify'\n\nimport {\n Button,\n Modal,\n ModalBody,\n ModalFooter,\n ModalHeader\n} from 'reactstrap'\n\nimport { confirmAlert } from 'react-confirm-alert'\nimport 'react-confirm-alert/src/react-confirm-alert.css'\n\nimport * as AccountActions from '../../../actions/account'\nimport { withGUIAuth } from '../../../actions/util'\n\nconst str_fmt = require('string-format')\n\nclass ChangePassword extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.submitForm = this.submitForm.bind(this)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Account - Change Password'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.state = {\n password: {\n old: '',\n new_1: '',\n new_2: ''\n },\n errors: {},\n status: ''\n }\n }\n\n submitForm(e) {\n e.preventDefault()\n Promise.resolve(this.props.changePassword(this.props.username, ...Object.values(this.state.password))).then(rsp => {\n this.setState({\n errors: this.props.errors[AccountActions.CHANGE_ACCOUNT_PASSWORD_FAILURE],\n status: this.props.status[AccountActions.CHANGE_ACCOUNT_PASSWORD_SUCCESS]\n })\n })\n }\n\n render() {\n return (\n <div className='jumbotron col-md-6 mx-auto'>\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <h1 className='text-center'>Password Change</h1>\n\n <form className='col-md-10 mx-auto' onSubmit={ this.submitForm }>\n {\n this.state.status ? (\n <p className=\"form-text text-info\">{ this.state.status }</p>\n ) : ''\n }\n <div className='form-group'>\n <label htmlFor='old_password'>Old Password</label>\n <input\n type='password'\n className='form-control'\n id='old_password'\n required=''\n placeholder='password'\n value={ atob(this.state.password.old) }\n onChange={ e => this.setState({ password: { ...this.state.password, old: btoa(e.target.value) } }) }\n />\n {\n this.state.errors.old_password ? (\n <small className=\"form-text text-danger\">{ this.state.errors.old_password }</small>\n ) : ''\n }\n </div>\n <div className='form-group'>\n <label htmlFor='new_password_1'>New Password</label>\n <input\n type='password'\n className='form-control'\n id='new_password_1'\n required=''\n placeholder='password'\n value={ atob(this.state.password.new_1) }\n onChange={ e => this.setState({ password: { ...this.state.password, new_1: btoa(e.target.value) } }) }/>\n {\n this.state.errors.new_password_1 ? (\n <small className=\"form-text text-danger\">{ this.state.errors.new_password_1 }</small>\n ) : ''\n }\n </div>\n <div className='form-group'>\n <label htmlFor='new_password_2'>New Password Confirmation</label>\n <input\n type='password'\n className='form-control'\n id='new_password_2'\n required=''\n placeholder='password'\n value={ atob(this.state.password.new_2) }\n onChange={ e => this.setState({ password: { ...this.state.password, new_2: btoa(e.target.value) } }) }/>\n {\n this.state.errors.new_password_2 ? (\n <small className=\"form-text text-danger\">{ this.state.errors.new_password_2 }</small>\n ) : ''\n }\n </div>\n\n <small className='form-text text-muted'>\n <ul>\n <li>Your password can't be too similar to your other personal information.</li>\n <li>Your password must contain at least 8 characters.</li>\n <li>Your password can't be a commonly used password.</li>\n <li>Your password can't be entirely numeric.</li>\n </ul>\n </small>\n\n <Button type='submit' color='primary' className=\"float-right\">Save changes</Button>\n </form>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n username: state.Auth.access == undefined ? 'User' : state.Auth.access.username,\n errors: state.Account.errors,\n status: state.Account.status,\n siteTitle: state.Util.site_title\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n changePassword: (usrn, op, np1, np2) => dispatch(AccountActions.changeAccountPassword(usrn, op, np1, np2))\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(ChangePassword)\n" }, { "alpha_fraction": 0.7483221292495728, "alphanum_fraction": 0.7483221292495728, "avg_line_length": 20.285715103149414, "blob_id": "21cbafafbfc184ea1cfcb35ab58bf1a88ac5f15d", "content_id": "2a2863fb958bf5cf8498249e5c9eaa77e22b1ce8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "permissive", "max_line_length": 51, "num_lines": 14, "path": "/orchestrator/core/orc_server/tracking/urls/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\n\nfrom .. import views\n\n\nrouter = routers.DefaultRouter()\nrouter.register('event', views.EventLogViewSet)\nrouter.register('request', views.RequestLogViewSet)\n\nurlpatterns = [\n # Routers\n path('', include(router.urls))\n]\n" }, { "alpha_fraction": 0.6263227462768555, "alphanum_fraction": 0.6309523582458496, "avg_line_length": 25.526315689086914, "blob_id": "313151eb8ff089e2bd8af2f16ac2b51ef3a982a9", "content_id": "1091a4f24f9436ddc96e85939854ee1cb9df292d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1512, "license_type": "permissive", "max_line_length": 101, "num_lines": 57, "path": "/orchestrator/core/orc_server/utils/general.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nGeneral Utilities\n\"\"\"\nimport random\nimport string\nimport sys\nimport uuid\n\nfrom typing import Any\n\nvalid_hex = set(string.hexdigits)\nvalid_hex.add(\" \")\n\n\ndef prefixUUID(pre: str = \"PREFIX\", max_length: int = 30) -> str:\n \"\"\"\n Create a unique name with a prefix and a UUID string\n :param pre: prefix to use\n :param max_length: max length of the unique name\n :return: unique name with the given prefix\n \"\"\"\n if len(pre) > max_length:\n raise ValueError(f\"max_length is greater than the length of the prefix: {len(pre)}\")\n\n uid_max = max_length - len(pre)\n uid = str(uuid.uuid4()).replace(\"-\", \"\")[:uid_max]\n if pre in [\"\", \" \", None]:\n return f\"{uid}\"[:max_length]\n return f\"{pre}-{uid}\"[:max_length]\n\n\ndef to_str(s: Any) -> str:\n \"\"\"\n Convert a given type to a default string\n :param s: item to convert to a string\n :return: converted string\n \"\"\"\n return s.decode(sys.getdefaultencoding(), \"backslashreplace\") if hasattr(s, \"decode\") else str(s)\n\n\ndef randBytes(b: int = 2) -> bytes:\n \"\"\"\n Get a random number of bytes\n :param b: number of bytes generate\n :return: random number of bytes requested\n \"\"\"\n return bytes([random.getrandbits(8) for _ in range(b)])\n\n\ndef isHex(val: str) -> bool:\n \"\"\"\n Determine if the given value is a valid hexadecimal string\n :param val: string to validate\n :return: bool - valid/invalid hexadecimal\n \"\"\"\n val = ''.join(val.split(\"0x\"))\n return len(set(val) - valid_hex) == 0\n" }, { "alpha_fraction": 0.7163636088371277, "alphanum_fraction": 0.7236363887786865, "avg_line_length": 17.33333396911621, "blob_id": "53153f5ca6d60f2da188f96638c9364b34e6cfe4", "content_id": "cc457d5a82b7912ec76b7fbc5f6a443cb517afb9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "permissive", "max_line_length": 51, "num_lines": 15, "path": "/orchestrator/core/orc_server/es_mirror/settings.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import logging\n\nfrom django.conf import settings\n\nSETTINGS = dict(\n host=None,\n prefix='',\n timeout=60\n)\n\nSETTINGS.update(getattr(settings, 'ES_MIRROR', {}))\n\n# elastic logger config\nes_logger = logging.getLogger('elasticsearch')\nes_logger.setLevel(logging.WARNING)\n" }, { "alpha_fraction": 0.5986095666885376, "alphanum_fraction": 0.603732168674469, "avg_line_length": 32.74074172973633, "blob_id": "e1b72ab3fb69c0b5a98e7763a40bce166e627ecc", "content_id": "ed5c04d42031b97c6288db5e53fe6737b7726193", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5466, "license_type": "permissive", "max_line_length": 167, "num_lines": 162, "path": "/update_subs.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport atexit\nimport os\nimport re\nimport sys\n\nfrom datetime import datetime\nfrom optparse import OptionParser\nfrom pathlib import Path\n\nfrom base.modules.script_utils import (\n # Functions\n checkRequiredArguments,\n install_pkg,\n recursive_find,\n update_repo,\n # Classes\n ConsoleStyle,\n FrozenDict\n)\n\n\nif sys.version_info < (3, 6):\n print('PythonVersionError: Minimum version of v3.6+ not found')\n exit(1)\n\n\n# Option Parsing\nparser = OptionParser()\nparser.add_option(\"-u\", \"--url_base\", dest=\"url_base\", default=\"\", help=\"[REQUIRED] Base URL for git repo\")\nparser.add_option(\"-r\", \"--repo_branch\", dest=\"repo_branch\", default=\"master\", help=\"Branch to clone from the repo\")\nparser.add_option(\"-l\", \"--log_file\", dest=\"log_file\", help=\"Write logs to LOG_FILE\")\nparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Verbose output of container build\")\n\n(options, args) = parser.parse_args()\ncheckRequiredArguments(options, parser)\n\nlog_file = None\ninit_now = datetime.now()\n\nif options.log_file:\n name, ext = os.path.splitext(options.log_file)\n ext = '.log' if ext is '' else ext\n fn = f'{name}-{init_now:%Y.%m.%d_%H.%M.%S}{ext}'\n log_file = open(options.log_file, 'w+')\n log_file.write(f'Configure run at {init_now:%Y.%m.%d_%H:%M:%S}\\n\\n')\n log_file.flush()\n atexit.register(log_file.close)\n\n\n# Script Config\nItemCount = 1\n\nif options.url_base.startswith(\"http\"):\n Base_URL = options.url_base if options.url_base.endswith(\"/\") else options.url_base + '/'\nelse:\n Base_URL = options.url_base if options.url_base.endswith(\":\") else options.url_base + ':'\n\nCONFIG = FrozenDict(\n RootDir=os.path.dirname(os.path.realpath(__file__)),\n Requirements=(\n ('git', 'gitpython'),\n ('colorama', 'colorama')\n ),\n BaseRepo=f\"{Base_URL}ScreamingBunny\",\n ImageReplace=(\n (\"base\", r\"gitlab.*?docker:alpine( as.*)?\", r\"alpine\\g<1>\\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\"),\n (\"python3\", r\"gitlab.*plus:alpine-python3( as.*)?\", fr\"g2inc/oif-python\\g<1>\\nRUN apk upgrade --update && apk add --no-cache dos2unix && rm /var/cache/apk/*\"),\n ),\n Repos=FrozenDict(\n Orchestrator=('Core', 'GUI'),\n Transport=('HTTPS', 'MQTT', 'CoAP'),\n )\n)\n\n\n# Utility Classes (Need Config)\nclass Stage:\n def __init__(self, name='Stage', root=CONFIG.RootDir):\n self.name = name\n self.root = root if root.startswith(CONFIG.RootDir) else os.path.join(CONFIG.RootDir, root)\n\n def __enter__(self):\n Stylize.h1(f\"[Step {get_count()}]: Update {self.name}\")\n return self._mkdir_chdir()\n\n def __exit__(self, type, value, traceback):\n global ItemCount\n ItemCount += 1\n os.chdir(CONFIG.RootDir)\n Stylize.success(f'Updated {self.name}')\n\n def _mkdir_chdir(self):\n Path(self.root).mkdir(parents=True, exist_ok=True)\n os.chdir(self.root)\n return self.root\n\n\n# Utility Functions\ndef get_count():\n global ItemCount\n c = ItemCount\n ItemCount += 1\n return c\n\n\nif __name__ == '__main__':\n os.chdir(CONFIG.RootDir)\n\n print('Installing Requirements')\n for PKG in CONFIG.Requirements:\n install_pkg(PKG)\n\n Stylize = ConsoleStyle(options.verbose, log_file)\n import git\n\n Stylize.default('')\n\n if sys.platform in [\"win32\", \"win64\"]: # Windows 32/64-bit\n git.Git.USE_SHELL = True\n\n Stylize.underline('Starting Update')\n\n # -------------------- Modules -------------------- #\n with Stage('Modules', 'base/modules'):\n Stylize.h2(\"Updating Utilities\")\n update_repo(f\"{CONFIG.BaseRepo}/Utils.git\", 'utils', options.repo_branch)\n\n # -------------------- Orchestrator -------------------- #\n with Stage('Orchestrator', 'orchestrator'):\n for repo in CONFIG.Repos.Orchestrator:\n Stylize.h2(f\"Updating {repo}\")\n update_repo(f\"{CONFIG.BaseRepo}/Orchestrator/{repo}.git\", repo.lower(), options.repo_branch)\n\n # -------------------- Orchestrator Transport -------------------- #\n with Stage(f'Orchestrator Transport', os.path.join('orchestrator', 'transport')):\n for transport in CONFIG.Repos.Transport:\n Stylize.h2(f\"Updating Orchestrator {transport}\")\n update_repo(f\"{CONFIG.BaseRepo}/Orchestrator/Transport/{transport}.git\", transport.lower(), options.repo_branch)\n\n # -------------------- Logger -------------------- #\n with Stage('Logger'):\n Stylize.h2(\"Updating Logger\")\n update_repo(f\"{CONFIG.BaseRepo}/Logger.git\", 'logger', options.repo_branch)\n\n # -------------------- Dockerfile -------------------- #\n with Stage('Dockerfiles'):\n for dockerfile in recursive_find(patterns=['Dockerfile']):\n with open(dockerfile, 'r') as f:\n tmpFile = f.read()\n\n for (name, orig_img, repl_img) in CONFIG.ImageReplace:\n if re.search(orig_img, tmpFile):\n Stylize.info(f'Updating {dockerfile}')\n Stylize.bold(f'- Found {name} image, updating for public repo\\n')\n tmpFile = re.sub(orig_img, repl_img, tmpFile)\n with open(dockerfile, 'w') as f:\n f.write(tmpFile)\n break\n\n Stylize.info(\"Run `configure.py` from the public folder to create the base containers necessary to run the OIF Orchestrator\")\n" }, { "alpha_fraction": 0.6220789551734924, "alphanum_fraction": 0.6486704349517822, "avg_line_length": 40.36666488647461, "blob_id": "74366df411a6d7452e3d6e4565e1c2dd4fee1851", "content_id": "fe801579624135227275d146165c5694f16620e8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "permissive", "max_line_length": 169, "num_lines": 30, "path": "/orchestrator/core/orc_server/conformance/migrations/0001_initial.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.10 on 2020-03-09 15:14\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\nimport jsonfield.fields\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('actuator', '0002_auto_20190417_1319'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ConformanceTest',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('test_id', models.UUIDField(default=uuid.uuid4, help_text='Unique UUID of the test', unique=True)),\n ('test_time', models.DateTimeField(default=django.utils.timezone.now, help_text='Time the test was run')),\n ('tests_run', jsonfield.fields.JSONField(blank=True, help_text='Tests that were selected for conformance', null=True)),\n ('test_results', jsonfield.fields.JSONField(blank=True, help_text='Tests results', null=True)),\n ('actuator_tested', models.ForeignKey(help_text='Actuator tests were run against', on_delete=django.db.models.deletion.CASCADE, to='actuator.Actuator')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5945626497268677, "alphanum_fraction": 0.5981087684631348, "avg_line_length": 23.171428680419922, "blob_id": "81ef3045e9bd953d0642188adca85c857ae20862", "content_id": "209e0b664f6f7e340a7ed1658507f1ab37185abc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "permissive", "max_line_length": 112, "num_lines": 35, "path": "/orchestrator/gui/server/gui_server/webApp/views/api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\n@api_view(['GET'])\n@permission_classes((permissions.AllowAny,))\ndef api_root(request):\n \"\"\"\n Orchestrator basic information\n \"\"\"\n attrs = {}\n\n for attr in dir(request):\n try:\n attrs[attr] = getattr(request, attr)\n except Exception as e:\n print(e)\n\n rtn = dict(\n message=\"Hello, {}. You're at the orchestrator gui api index.\".format(request.user.username or 'guest'),\n commands=dict(\n sent=0,\n responses=0\n ),\n name='GUI Server',\n id='',\n protocols=[],\n serializations=[]\n )\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.5257108807563782, "alphanum_fraction": 0.538392961025238, "avg_line_length": 25.355091094970703, "blob_id": "dc2f728e33664a2c2fe4d88b09df18a7e327c8ad", "content_id": "e8bdcf26e9083e65ff09f5c66e604bacc30ce220", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference", "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10093, "license_type": "permissive", "max_line_length": 111, "num_lines": 383, "path": "/orchestrator/gui/client/src/components/utils/jadn-editor/utils.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// General\nconst deleteCharAt = (string, position) => string.slice(0, position) + string.slice(position + 1);\n\nconst add_tokenSecondary = (buffer) => {\n if (buffer.tokenSecondary.length === 0) return false;\n buffer.tokens.push(buffer.tokenSecondary);\n buffer.tokenSecondary = '';\n return true;\n}\n\nconst add_tokenPrimary = (buffer, value) => {\n if (value.length === 0) return false;\n buffer.tokens.push(value);\n return true;\n}\n\n// DomNode_Update\nexport const quarkize = (text, prefix='') => {\n let buffer = {\n active: false,\n string: '',\n number: '',\n symbol: '',\n space: '',\n delimiter: '',\n quarks: []\n };\n\n const pushAndStore = (char, type) => {\n switch (type) {\n case 'symbol':\n case 'delimiter':\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n }\n buffer[buffer.active] = '';\n buffer.active = type;\n buffer[buffer.active] = char;\n break;\n default:\n if (type !== buffer.active || ([buffer.string,char].indexOf('\\n') > -1)) {\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n }\n buffer[buffer.active] = '';\n buffer.active = type;\n buffer[buffer.active] = char;\n } else {\n buffer[type] += char;\n }\n break;\n }\n }\n\n const finalPush = () => {\n if (buffer.active) {\n buffer.quarks.push({\n string: buffer[buffer.active],\n type: prefix + '-' + buffer.active\n });\n buffer[buffer.active] = '';\n buffer.active = false;\n }\n }\n\n text.split('').forEach((char, i) => {\n switch (char) {\n case '\"':\n case \"'\":\n pushAndStore(char,'delimiter');\n break;\n case ' ':\n case '\\u00A0':\n pushAndStore(char,'space');\n break;\n case '{':\n case '}':\n case '[':\n case ']':\n case ':':\n case ',':\n pushAndStore(char,'symbol');\n break;\n case '0':\n case '1':\n case '2':\n case '3':\n case '4':\n case '5':\n case '6':\n case '7':\n case '8':\n case '9':\n pushAndStore(char, buffer.active === 'string' ? 'string' : 'number');\n break;\n case '-' :\n if (i < text.length - 1 && '0123456789'.indexOf(text.charAt(i + 1)) > -1) {\n pushAndStore(char, 'number');\n break;\n }\n case '.' :\n if (i < text.length - 1 && i > 0) {\n if ('0123456789'.indexOf(text.charAt(i + 1)) > -1 && '0123456789'.indexOf(text.charAt(i - 1)) > -1) {\n pushAndStore(char, 'number');\n break;\n }\n }\n default:\n pushAndStore(char, 'string');\n break;\n }\n })\n\n finalPush();\n return buffer.quarks;\n}\n\nexport const validToken = (string, type) => {\n const quotes = '\\'\"';\n let firstChar = '',\n lastChar = '',\n quoteType = false;\n\n switch (type) {\n case 'primitive':\n if (['true','false','null','undefined'].indexOf(string) === -1) return false;\n case 'string':\n if (string.length < 2) return false;\n firstChar = string.charAt(0)\n lastChar = string.charAt(string.length-1)\n quoteType = quotes.indexOf(firstChar)\n\n if (quoteType === -1 ||firstChar !== lastChar) return false;\n string.split('').forEach((char, i) => {\n if (i > 0 && i < string.length - 1)\n if (char === quotes[quoteType] && string.charAt(i - 1) !== '\\\\')\n return false;\n })\n case 'key':\n if (string.length === 0) return false;\n firstChar = string.charAt(0)\n lastChar = string.charAt(string.length-1)\n quoteType = quotes.indexOf(firstChar)\n\n if (quoteType > -1) {\n if (string.length === 1 || firstChar !== lastChar) return false;\n for (var i = 0; i < string.length; i++) {\n if (i > 0 && i < string.length - 1)\n if (string.charAt(i) === quotes[quoteType])\n if (string.charAt(i - 1) !== '\\\\') return false;\n }\n } else {\n const nonAlphanumeric = '\\'\"`.,:;{}[]&<>=~*%\\\\|/-+!?@^ \\xa0';\n nonAlphanumeric.split('').forEach((nonAlpha, i) => {\n if (string.indexOf(nonAlpha) > -1) return false;\n })\n }\n case 'number':\n string.split('').forEach((char, i) => {\n if ('0123456789'.indexOf(char) === -1)\n if ((i === 0 && '-' !== string.charAt(0)) || '.' !== char) return false;\n })\n case 'symbol':\n if (string.length > 1 || '{[:]},'.indexOf(string) === -1) return false;\n case 'colon':\n if (string.length > 1 || ':' !== string) return false;\n default:\n return true;\n }\n return true;\n}\n\nexport const tokenFollowed = (buffer) => {\n const last = buffer.tokens_normalize.length - 1;\n if (last < 1) return false;\n for (var i = last; i >= 0; i--) {\n const previousToken = buffer.tokens_normalize[i];\n switch (previousToken.type) {\n case 'space':\n case 'linebreak':\n break;\n default:\n return previousToken;\n }\n }\n return false;\n}\n\nexport const followedBySymbol = (buffer, tokenID, options) => {\n if (tokenID === undefined) {\n console.error('tokenID argument must be an integer.');\n }\n if (options === undefined) {\n console.error('options argument must be an array.');\n }\n if (tokenID === buffer.tokens_merge.length-1) return false;\n\n for (var i = tokenID + 1; i < buffer.tokens_merge.length; i++) {\n const nextToken = buffer.tokens_merge[i];\n\n switch (nextToken.type) {\n case 'space':\n case 'linebreak':\n break;\n case 'symbol':\n case 'colon':\n if (options.indexOf(nextToken.string)>-1) {\n return i;\n } else {\n return false;\n }\n break;\n default:\n return false;\n break;\n }\n }\n return false;\n}\n\nexport const followsSymbol = (buffer, tokenID, options) => {\n if (tokenID === undefined) {\n console.error('tokenID argument must be an integer.');\n }\n if (options === undefined) {\n console.error('options argument must be an array.');\n }\n if (tokenID === 0) return false;\n\n for (var i = tokenID - 1; i >= 0; i--) {\n const previousToken = buffer.tokens_merge[i];\n\n switch (previousToken.type) {\n case 'space':\n case 'linebreak':\n break;\n case 'symbol':\n case 'colon':\n if (options.indexOf(previousToken.string) > -1) {\n return true;\n }\n return false;\n break;\n default:\n return false;\n break;\n }\n }\n return false;\n}\n\nexport const typeFollowed = (buffer, tokenID) => {\n if (tokenID === undefined) {\n console.error('tokenID argument must be an integer.');\n }\n if (tokenID === 0) return false;\n\n for (var i = tokenID - 1; i >= 0; i--) {\n const previousToken = buffer.tokens_merge[i];\n switch (previousToken.type) {\n case 'space':\n case 'linebreak':\n break;\n default:\n return previousToken.type;\n break;\n }\n }\n return false;\n}\n\n// JSON_Placeholder\nexport const escape_character = (buffer) => {\n if (buffer.currentChar !== '\\\\') return false;\n buffer.inputText = deleteCharAt(buffer.inputText, buffer.position);\n return true;\n}\n\nexport const determine_string = (buffer) => {\n if ('\\'\"'.indexOf(buffer.currentChar) === -1) return false;\n\n if (!buffer.stringOpen) {\n add_tokenSecondary(buffer);\n buffer.stringStart = buffer.position;\n buffer.stringOpen = buffer.currentChar;\n return true;\n }\n\n if (buffer.stringOpen === buffer.currentChar) {\n add_tokenSecondary(buffer);\n const stringToken = buffer.inputText.substring(buffer.stringStart, buffer.position + 1);\n add_tokenPrimary(buffer, stringToken);\n buffer.stringOpen = false;\n return true;\n }\n return false;\n}\n\nexport const determine_value = (buffer) => {\n if (':,{}[]'.indexOf(buffer.currentChar) === -1) return false;\n\n if (buffer.stringOpen) return false;\n add_tokenSecondary(buffer);\n add_tokenPrimary(buffer, buffer.currentChar);\n switch (buffer.currentChar) {\n case ':':\n buffer.isValue = true;\n return true;\n break;\n case '{':\n case '[':\n buffer.brackets.push(buffer.currentChar);\n break;\n case '}':\n case ']':\n buffer.brackets.pop();\n break;\n }\n\n if (buffer.currentChar !== ':') {\n buffer.isValue = (buffer.brackets[buffer.brackets.length-1]==='[');\n }\n return true;\n}\n\nexport const stripQuotesFromKey = (text) => {\n if (text.length === 0) return text;\n if (['\"\"',\"''\"].indexOf(text) > -1) return \"''\";\n\n let wrappedInQuotes = false;\n for (var i = 0; i < 2; i++) {\n if ([text.charAt(0), text.charAt(text.length-1)].indexOf(['\"',\"'\"][i]) > -1) {\n wrappedInQuotes = true;\n break;\n }\n }\n\n if (wrappedInQuotes && text.length >= 2) {\n text = text.slice(1, -1);\n }\n\n const nonAlphaNumeric = text.replace(/\\w/g,''),\n alphaNumeric = text.replace(/\\W+/g,''),\n mayRemoveQuotes = ((nonAlphaNumeric,text) => {\n let numberAndLetter = false;\n for (var i = 0; i < text.length; i++) {\n if (i === 0) if(isNaN(text.charAt(i))) break;\n if (isNaN(text.charAt(i))) {\n numberAndLetter = true;\n break;\n }\n }\n return !(nonAlphaNumeric.length > 0 || numberAndLetter);\n })(nonAlphaNumeric, text),\n hasQuotes = (string => {\n for (var i = 0; i < string.length; i++) {\n if ([\"'\",'\"'].indexOf(string.charAt(i)) > -1) return true;\n }\n return false;\n })(nonAlphaNumeric);\n\n if (hasQuotes) {\n let newText = '';\n const charList = text.split('');\n\n for (var ii = 0; ii < charList.length; ii++) {\n let char = charList[ii];\n if ([\"'\",'\"'].indexOf(char)>-1) char = '\\\\' + char;\n newText += char;\n }\n text = newText;\n }\n\n return mayRemoveQuotes ? text : \"'\" + text + \"'\";\n}\n\nexport const indent = (number) => (number > 0 ? '\\n' : '') + Array(number * 2).fill(' ').join('');" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6042194366455078, "avg_line_length": 24.212766647338867, "blob_id": "5d590c6c133edb080a4282c0a7ce327d595339f7", "content_id": "46c0ab56e257affb35d26d8b39c9f1b0aa023488", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1185, "license_type": "permissive", "max_line_length": 119, "num_lines": 47, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/map.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_jadn,\n keys,\n zip,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass MapField extends Component {\n render() {\n let name = this.props.name || this.props.def.name\n let msgName = (this.props.parent ? [this.props.parent, name] : [name]).join('.')\n let fields = this.props.def.fields.map((field, i) => {\n return <Field key={ i } def={ zip(keys.Gen_Def, field) } parent={ msgName } optChange={ this.props.optChange } />\n })\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (isOptional_jadn(this.props.def) ? '' : '*') + name }</legend>\n { this.props.def.desc != '' ? <FormText color=\"muted\">{ this.props.def.desc }</FormText> : '' }\n <div className=\"col-12 my-1 px-0\">\n { fields }\n </div>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(MapField)\n" }, { "alpha_fraction": 0.6718506813049316, "alphanum_fraction": 0.6718506813049316, "avg_line_length": 26.95652198791504, "blob_id": "dbee85599606a951ff3b85447fbd402d4861f88e", "content_id": "d0fd3094dcd1b91857eaaa336b207edde7e9b0d2", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 78, "num_lines": 46, "path": "/orchestrator/core/orc_server/actuator/admin.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Local imports\nfrom utils import ReadOnlyModelAdmin\nfrom .models import Actuator, ActuatorGroup, ActuatorProfile\n\n\nclass ActuatorAdmin(admin.ModelAdmin):\n \"\"\"\n Actuator admin\n \"\"\"\n readonly_fields = ('actuator_id', 'profile', 'schema_format')\n list_display = ('name', 'device', 'profile', 'schema_format')\n\n\nclass ActuatorGroupAdmin(admin.ModelAdmin):\n \"\"\"\n Actuator Group admin\n \"\"\"\n list_display = ('name', 'user_count', 'actuator_count')\n filter_horizontal = ('users', 'actuators')\n\n\nclass ActuatorProfileAdmin(ReadOnlyModelAdmin, admin.ModelAdmin):\n \"\"\"\n Actuator Profile admin\n \"\"\"\n list_display = ('name', 'actuator_count')\n filter_horizontal = ('actuators', )\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"\n Set name and actuator fields to read only if user is not the superuser\n :param request: request instance\n :param obj: ...\n :return: tuple - read only fields\n \"\"\"\n if request.user.is_superuser:\n return ()\n return 'name', 'actuators'\n\n\n# Register models\nadmin.site.register(Actuator, ActuatorAdmin)\nadmin.site.register(ActuatorGroup, ActuatorGroupAdmin)\nadmin.site.register(ActuatorProfile, ActuatorProfileAdmin)\n" }, { "alpha_fraction": 0.7078145146369934, "alphanum_fraction": 0.7108200788497925, "avg_line_length": 48.04210662841797, "blob_id": "525b1d80d324667a8b981957dd09ccbdcd9e96b9", "content_id": "6fbb2a5de48cf615a1a114e2f88f38baa8808b20", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4658, "license_type": "permissive", "max_line_length": 119, "num_lines": 95, "path": "/orchestrator/core/ReadMe.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OASIS TC Open: oif-orchestrator-core\n## OpenC2 Orchestrator Server\n\n### About this image\n- Django based\n - Django REST Framework\n - User/Pass via JWT Token\n- Default info:\n - Login - admin/password\n - Ports\n - HTTPS/API - 8080\n- Serializations\n - JSON is currently the only approved OpenC2 serialization\n - All others are included for development/testing purposes\n\n#### Django Apps\n##### Orchestrator - /api/<orchestrator_urls>\n- Main application/Root \n\n##### Account - /api/account/<account_urls>\n- Note: Naming conflict with user, same concept different name\n- Handles all endpoints related to accounts\n\n##### Actuator - /api/actuator/<actuator_urls>\n- Handles all endpoints related to actuators\n\n##### Backup - /api/backup/<backup_url>\n- Handles all endpoints related to data backup\n \n##### Command - /api/command/<command_urls>\n- Handles all endpoints related to commands\n\n##### Device - /api/device/<device_urls>\n- Handles all endpoints related to devices\n\n##### Log - /api/log/<log_urls>\n- Handles all endpoints related to logs\n\n##### Admin GUI - /admin/<admin_urls>\n- Administration GUI, prebuild and preconfigured from Django\n\n\n### How to use this image\n- Prior to the Core starting, the mysql database and queue/buffer should be started and running.\n\nEnvironment Variables\n\n| Variable | Type | Description | Default |\n| ----------- | ----------- | ----------- | ----------- |\n| DATABASE_NAME | String | Name of the database to use, create if not created | orchestrator |\n| DATABASE_HOST | String | Hostname/IP address of the system runnig the MySQL Database | localhost |\n| DATABASE_PORT | Integer | Port the database has available for connections | 3306 |\n| DATABASE_USER | String | User to connect to the database | orc_root |\n| DATABASE_PASSWORD | String | Password of the connection user | 0Rch35Tr@t0r | \n| QUEUE_HOST | String | Hostname/IP address of the system running the AMQP capable queue | localhost |\n| QUEUE_PORT | Integer | Port the queue has available for connections | 5672 |\n| QUEUE_USER | String | User to connect to the queue | guest |\n| QUEUE_PASSWORD | String | Password of the connection user | guest |\n\n - Adding Certs\n\t- Certificates are not necessary for the `Core` container as it does not directly connect to by the user\n\t- For adding certificates to the web/API interface, see `orchestrator/gui/client/`\n\n\n### Resources\n- General\n - [HTTP Status Codes](https://www.restapitutorial.com/httpstatuscodes.html)\n\n- Core\n - [Django](https://www.djangoproject.com/) - Core Framework\n - [Cors Headers](https://pypi.org/project/django-cors-headers/) - Cross Origin Headers\n - [Django REST Framework](http://www.django-rest-framework.org/) - Core Framework REST\n - [DRF DataTables](https://django-rest-framework-datatables.readthedocs.io/en/latest/) - Server Side processing\n - [DRF Files](https://pypi.org/project/djangorestframework-files/) - File download/upload\n - [DRF JWT](https://getblimp.github.io/django-rest-framework-jwt/) - JSON WebTokens\n - [DRF MessagePack](https://pypi.org/project/djangorestframework-msgpack/) - MessagePack serialization support\n - [DRF QueryFields](https://djangorestframework-queryfields.readthedocs.io/en/latest/) - Dynamic fields in API\n - [DRF Swagger](https://django-rest-swagger.readthedocs.io/en/latest/) - Dynamic Rest API\n - [DRF Tracking](https://drf-tracking.readthedocs.io/en/latest/) - Tracking app based from\n - [DRF Writable Nested](https://pypi.org/project/drf-writable-nested/) - Writable Nested Serializer\n - [DRF XML](https://pypi.org/project/djangorestframework-XML/) - XML serialization support\n - [Dynamic Preferences](https://django-dynamic-preferences.readthedocs.io/en/latest/) - Dynamic config\n - [Json Field](https://pypi.org/project/jsonfield/) - JSON field for database\n - [Bleach](https://bleach.readthedocs.io/en/latest/index.html) - String Sanitation\n - [PyExcel XLS](https://pypi.org/project/pyexcel-xls/) - XLS file parsing for python\n - [uWSGI](https://uwsgi-docs.readthedocs.io/en/latest/) - Production Server\n - [Whitenoise](http://whitenoise.evans.io/en/stable/index.html#) - Static file serving\n \n#### Interesting Modules\n- [REST MultiToken Auth](https://pypi.org/project/django-rest-multitokenauth/)\n- [JWT Asymetric Auth](https://pypi.org/project/asymmetric_jwt_auth/)\n- [Central Authentication Server](https://hub.docker.com/r/apereo/cas/)\n- [CAS Auth](https://github.com/mingchen/django-cas-ng)\n- [User Agents](https://github.com/selwin/django-user_agents)\n- [Django Channels](https://channels.readthedocs.io/en/latest/)" }, { "alpha_fraction": 0.5314285755157471, "alphanum_fraction": 0.5353845953941345, "avg_line_length": 23.728260040283203, "blob_id": "eb1f26a3d0f786691227f3e9971b37ba0a794842", "content_id": "cfe299e3f4dd905da1bee9455d8a7b1081529e98", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2275, "license_type": "permissive", "max_line_length": 108, "num_lines": 92, "path": "/orchestrator/gui/client/src/components/admin/pages/settings.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\nimport { Helmet } from 'react-helmet-async'\n\nconst str_fmt = require('string-format')\n\nimport { SettingsModal } from '../lib'\nimport { RemotePageTable } from '../../utils'\n\nclass Settings extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.meta = {\n title: str_fmt('{base} | {page}', {base: this.props.siteTitle, page: 'Admin - Settings'}),\n canonical: str_fmt('{origin}{path}', {origin: window.location.origin, path: window.location.pathname})\n }\n\n this.tableColumns = [\n {\n text: 'Name',\n dataField: 'name',\n sort: true\n },{\n text: 'Value',\n dataField: 'value',\n sort: false\n },\n ]\n\n this.editOptions = {\n modal: SettingsModal\n }\n\n if (this.props.settings.loaded === 0) {\n this.props.getSettings()\n }\n\n this.props.getSettings()\n }\n\n render() {\n return (\n <div className=\"row mx-auto\">\n <Helmet>\n <title>{ this.meta.title }</title>\n <link rel=\"canonical\" href={ this.meta.canonical } />\n </Helmet>\n <div className=\"col-12\">\n <div className=\"col-12\">\n {/* <SettingsModal register className=\"float-right\" /> */}\n <h1>Settings</h1>\n </div>\n\n <RemotePageTable\n keyField='id'\n dataKey='Users.users'\n dataGet={ this.props.getUsers }\n columns={ this.tableColumns }\n editRows\n editOptions={ this.editOptions }\n defaultSort={[\n {\n dataField: 'last_name',\n order: 'desc'\n },{\n dataField: 'first_name',\n order: 'desc'\n }\n ]}\n />\n </div>\n </div>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n siteTitle: state.Util.site_title,\n settings: {\n settings: [], //state.Settings.settings,\n loaded: 1, // state.Settings.settings.length\n total: 1, // state.Settings.count\n }\n})\n\nconst mapDispatchToProps = (dispatch) => ({\n updateSetting: (set, val) => {},\n getSettings: () => {}\n})\n\nexport default connect(mapStateToProps, mapDispatchToProps)(Settings)\n" }, { "alpha_fraction": 0.732876718044281, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 28.399999618530273, "blob_id": "e4e10ebdc5a27a3eb7fbfda4cfba9d728cc6998f", "content_id": "d28c900a9271eed73aaf7f05fc3d8398c61486d0", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 146, "license_type": "permissive", "max_line_length": 58, "num_lines": 5, "path": "/orchestrator/transport/mqtt/docker_dev_start.sh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\ndockerize -wait tcp://$QUEUE_HOST:$QUEUE_PORT -timeout 30s\n\necho \"Running MQTT Transport Module.\"\npython3 -u mqtt_transport.py" }, { "alpha_fraction": 0.6483309864997864, "alphanum_fraction": 0.6497414112091064, "avg_line_length": 29.811594009399414, "blob_id": "0baba56e3cfb0737aae2001a6381b25e4995b4d6", "content_id": "b0626fcb6bb5c30d3a51671fe9a408560e835285", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2127, "license_type": "permissive", "max_line_length": 130, "num_lines": 69, "path": "/orchestrator/gui/client/src/actions/command.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for command API\nimport { RSAA } from 'redux-api-middleware'\nimport { withGUIAuth } from './util'\n\nconst str_fmt = require('string-format')\n\n// API Base URL\nconst baseAPI = '/api/command'\n\n// Helper Functions\n// None\n\n// API Calls\n// GET - /api/command/ - all commands for requesting user\nconst GET_COMMANDS_REQUEST = '@@command/GET_COMMANDS_REQUEST'\nexport const GET_COMMANDS_SUCCESS = '@@command/GET_COMMANDS_SUCCESS'\nexport const GET_COMMANDS_FAILURE = '@@command/GET_COMMANDS_FAILURE'\nexport const getCommands = ({page=1, count=10, sort='name', refresh=false}={}) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}?page={page}&length={count}&ordering={sort}', {base: baseAPI, page: page, count: count, sort: sort}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_COMMANDS_REQUEST,\n {\n type: GET_COMMANDS_SUCCESS,\n meta: {\n sort: sort,\n refresh: refresh\n }\n }, GET_COMMANDS_FAILURE\n ]\n }\n})\n\n// PUT - /api/command/send/ - send command\nconst SEND_COMMAND_REQUEST = '@@command/SEND_COMMAND_REQUEST'\nexport const SEND_COMMAND_SUCCESS = '@@command/SEND_COMMAND_SUCCESS'\nexport const SEND_COMMAND_FAILURE = '@@command/SEND_COMMAND_FAILURE'\nexport const sendCommand = (command, act, chan) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/send/', {base: baseAPI}),\n method: 'PUT',\n headers: withGUIAuth(),\n body: JSON.stringify({\n actuator: act,\n command: command,\n channel: chan\n }),\n types: [\n SEND_COMMAND_REQUEST, SEND_COMMAND_SUCCESS, SEND_COMMAND_FAILURE\n ]\n }\n})\n\n// GET - /api/command/{command_id} - get specific command\nconst GET_COMMAND_REQUEST = '@@command/GET_COMMAND_REQUEST'\nexport const GET_COMMAND_SUCCESS = '@@command/GET_COMMAND_SUCCESS'\nexport const GET_COMMAND_FAILURE = '@@command/GET_COMMAND_FAILURE'\nexport const getCommand = (command_id) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{command_id}/', {base: baseAPI, command_id: command_id}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_COMMAND_REQUEST, GET_COMMAND_SUCCESS, GET_COMMAND_FAILURE\n ]\n }\n})\n\n" }, { "alpha_fraction": 0.6670992970466614, "alphanum_fraction": 0.667748212814331, "avg_line_length": 24.683332443237305, "blob_id": "430dca6e7551efa323babc9837c5071de5764d5b", "content_id": "43920207f10a212ea16ab3defd04b08f157649bd", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1541, "license_type": "permissive", "max_line_length": 89, "num_lines": 60, "path": "/orchestrator/core/orc_server/conformance/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom drf_queryfields import QueryFieldsMixin\nfrom jsonfield import JSONField\nfrom rest_framework import serializers\n\n# Local imports\nfrom actuator.models import Actuator, ActuatorSerializer\n\n\nclass ConformanceTest(models.Model):\n \"\"\"\n Conformance Test instance base\n \"\"\"\n test_id = models.UUIDField(\n default=uuid.uuid4,\n help_text=\"Unique UUID of the test\",\n unique=True\n )\n\n actuator_tested = models.ForeignKey(\n Actuator,\n on_delete=models.CASCADE,\n help_text=\"Actuator tests were run against\"\n )\n\n test_time = models.DateTimeField(\n default=timezone.now,\n help_text=\"Time the test was run\"\n )\n\n tests_run = JSONField(\n blank=True,\n help_text=\"Tests that were selected for conformance\",\n null=True\n )\n\n test_results = JSONField(\n blank=True,\n help_text=\"Tests results\",\n null=True\n )\n\n\nclass ConformanceTestSerializer(QueryFieldsMixin, serializers.ModelSerializer):\n \"\"\"\n Actuator API Serializer\n \"\"\"\n test_id = serializers.UUIDField(format='hex_verbose')\n actuator_tested = ActuatorSerializer(read_only=True)\n test_time = serializers.DateTimeField()\n tests_run = serializers.JSONField()\n test_results = serializers.JSONField()\n\n class Meta:\n model = ConformanceTest\n fields = ('test_id', 'actuator_tested', 'test_time', 'tests_run', 'test_results')\n read_only_fields = fields\n" }, { "alpha_fraction": 0.7582417726516724, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "db71d36b076a0d54d6cbc9e9c6c851f77bc6852c", "content_id": "6b14619ef86ef837581c691358fc050f12084631", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "permissive", "max_line_length": 33, "num_lines": 5, "path": "/orchestrator/core/orc_server/actuator/apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ActuatorConfig(AppConfig):\n name = 'actuator'\n" }, { "alpha_fraction": 0.5648577809333801, "alphanum_fraction": 0.5771636366844177, "avg_line_length": 32.49324417114258, "blob_id": "998ca54459e45882bcf8889ae048fbceb0850d02", "content_id": "d5e1e9bee6c68bbddb817956a8741c11a93b2e5a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4957, "license_type": "permissive", "max_line_length": 111, "num_lines": 148, "path": "/base/modules/utils/root/sb_utils/message/pybinn/decoder.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nImplementation of BINNDecoder\n\"\"\"\nimport io\n\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom struct import unpack\nfrom typing import (\n Callable,\n Dict,\n Union\n)\n\nfrom . import datatypes as types\n\n\nclass BINNDecoder:\n \"\"\"\n BINN <https://github.com/liteserver/binn> decoder for Python\n \"\"\"\n _decoders: Dict[bytes, Callable]\n\n def __init__(self, buffer=None, fp=None, *custom_decoders): # pylint: disable=keyword-arg-before-vararg\n if buffer:\n self._buffer = io.BytesIO(buffer)\n if fp:\n self._buffer = fp\n self._custom_decoders = custom_decoders\n\n self._decoders = {\n types.BINN_STRING: self._decode_str,\n types.BINN_UINT8: partial(self._unpack, 'B', 1),\n types.BINN_INT8: partial(self._unpack, 'b', 1),\n types.BINN_UINT16: partial(self._unpack, 'H', 2),\n types.BINN_INT16: partial(self._unpack, 'h', 2),\n types.BINN_UINT32: partial(self._unpack, 'I', 4),\n types.BINN_INT32: partial(self._unpack, 'i', 4),\n types.BINN_UINT64: partial(self._unpack, 'L', 8),\n types.BINN_INT64: partial(self._unpack, 'l', 8),\n types.BINN_FLOAT64: partial(self._unpack, 'd', 8),\n types.BINN_BLOB:self._decode_bytes,\n types.BINN_DATETIME: self._decode_datetime,\n types.BINN_LIST: self._decode_list,\n types.BINN_OBJECT: self._decode_dict,\n types.BINN_MAP: self._decode_dict,\n types.PYBINN_MAP: self._decode_dict,\n types.BINN_TRUE: lambda: True,\n types.BINN_FALSE: lambda: False,\n types.BINN_NULL: lambda: None\n }\n\n def decode(self):\n \"\"\"\n Decode date from buffer\n \"\"\"\n binntype = self._buffer.read(1)\n decoder = self._decoders.get(binntype, None)\n if decoder and binntype in (types.BINN_OBJECT, types.BINN_MAP, types.PYBINN_MAP):\n return decoder(binntype)\n\n if decoder:\n return decoder()\n\n # if type was not found, try using custom decoders\n for decoder in self._custom_decoders:\n if not issubclass(type(decoder), CustomDecoder):\n raise TypeError(\"Type {} is not CustomDecoder.\")\n if binntype == decoder.datatype:\n return self._decode_custom_type(decoder)\n\n raise TypeError(f\"Invalid data format: {binntype}\")\n\n def _decode_str(self):\n size = self._from_varint()\n value = self._buffer.read(size).decode('utf8')\n # Ready null terminator byte to advance position\n self._buffer.read(1)\n return value\n\n def _decode_bytes(self):\n size = unpack('I', self._buffer.read(4))[0]\n return self._buffer.read(size)\n\n def _decode_datetime(self):\n timestamp = float(unpack('d', self._buffer.read(8))[0])\n # datetime.utcfromtimestamp method in python3.3 has rounding issue (https://bugs.python.org/issue23517)\n return datetime(1970, 1, 1) + timedelta(seconds=timestamp)\n\n def _decode_list(self):\n # read container size\n self._from_varint()\n count = self._from_varint()\n result = []\n for _ in range(count):\n result.append(self.decode())\n return result\n\n def _decode_dict(self, binntype):\n # read container size\n self._from_varint()\n count = self._from_varint()\n result = {}\n for _ in range(count):\n if binntype == types.BINN_OBJECT:\n key_size = unpack('B', self._buffer.read(1))[0]\n key = self._buffer.read(key_size).decode('utf8')\n if binntype == types.BINN_MAP:\n key = unpack('I', self._buffer.read(4))[0]\n if binntype == types.PYBINN_MAP:\n key = self._buffer.read(types.PYBINN_MAP_SIZE)\n result[key] = self.decode()\n return result\n\n def _decode_custom_type(self, decoder):\n size = self._from_varint()\n return decoder.getobject(self._buffer.read(size))\n\n def _from_varint(self):\n value = unpack('B', self._buffer.read(1))[0]\n if value & 0x80:\n self._buffer.seek(self._buffer.tell() - 1)\n value = unpack('>I', self._buffer.read(4))[0]\n value &= 0x7FFFFFFF\n return value\n\n # Switch Helpers\n def _unpack(self, fmt: str, rb: int) -> Union[int, float]:\n return unpack(fmt, self._buffer.read(rb))[0]\n\n\nclass CustomDecoder:\n \"\"\"\n Base class for handling decoding user types\n \"\"\"\n\n def __init__(self, data_type):\n # check if custom data type is not BINN type\n if data_type in types.ALL:\n raise Exception(f\"Data type {data_type} is defined as internal type.\")\n\n self.datatype = data_type\n\n def getobject(self, data_bytes):\n \"\"\"\n Decode object from bytes\n \"\"\"\n raise NotImplementedError()\n" }, { "alpha_fraction": 0.7301204800605774, "alphanum_fraction": 0.7301204800605774, "avg_line_length": 28.64285659790039, "blob_id": "f9eb6b6c09bae36f03d99bc9a9dc797dafb963cb", "content_id": "32b392967b1c8016a2a5c8b90939a8c69a692a71", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "permissive", "max_line_length": 88, "num_lines": 14, "path": "/orchestrator/core/orc_server/utils/permissions.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango View Permission Utilities\n\"\"\"\nfrom rest_framework import permissions\n\n\nclass IsAdminOrIsSelf(permissions.BasePermission):\n \"\"\"\n Object-level permission to only allow owners of an object to edit it.\n Only functional for User model functions\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n return obj == request.user or request.user.is_staff or request.user.is_superuser\n" }, { "alpha_fraction": 0.639072835445404, "alphanum_fraction": 0.639072835445404, "avg_line_length": 19.133333206176758, "blob_id": "e30af124f0d6abf25ad02684d2841f101be4b0f4", "content_id": "fa9ea093876842728dd9b9f8454d7d291f5e0908", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "permissive", "max_line_length": 56, "num_lines": 15, "path": "/orchestrator/core/orc_server/tracking/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .api import api_root\nfrom .gui import gui_events, gui_requests, gui_root\nfrom .viewsets import EventLogViewSet, RequestLogViewSet\n\n__all__ = [\n # API\n 'api_root',\n # GUI\n 'gui_events',\n 'gui_requests',\n 'gui_root',\n # Viewsets\n 'EventLogViewSet',\n 'RequestLogViewSet'\n]\n" }, { "alpha_fraction": 0.5996510982513428, "alphanum_fraction": 0.6015409231185913, "avg_line_length": 36.79670333862305, "blob_id": "00ddd8a8d1d729f57df38d3c969c2201728c2657", "content_id": "1cff124d47f9687eb32a84608b19fe6fa2e10155", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6879, "license_type": "permissive", "max_line_length": 176, "num_lines": 182, "path": "/base/modules/utils/root/sb_utils/amqp_tools.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\namqp_tools.py\nImplements wrapper for kombu module to more-easily read/write from message queue.\n\"\"\"\nimport kombu # handles interaction with AMQP server\nimport socket # identify exceptions that occur with timeout\nimport datetime # print time received message\nimport os # to determine localhost on a given machine\n\nfrom functools import partial\nfrom inspect import isfunction\nfrom multiprocessing import Event, Process\nfrom typing import Union\n\n\nclass Consumer(Process):\n \"\"\"\n The Consumer class reads messages from message queue and determines what to do with them.\n \"\"\"\n HOST = os.environ.get(\"QUEUE_HOST\", \"localhost\")\n PORT = os.environ.get(\"QUEUE_PORT\", 5672)\n EXCHANGE = \"transport\"\n ROUTING_KEY = \"*\"\n\n def __init__(self, host: str = HOST, port: int = PORT, exchange: str = EXCHANGE, routing_key: str = ROUTING_KEY, callbacks: Union[list, tuple] = None, debug: bool = False):\n \"\"\"\n Consume message from queue exchange.\n :param host: host running RabbitMQ\n :param port: port which handles AMQP (default 5672)\n :param exchange: specifies where to read messages from\n :param routing_key:\n :param callbacks: list of callback functions which are called upon receiving a message\n :param debug: print debugging messages\n \"\"\"\n super().__init__()\n self._exit = Event()\n\n self._url = f\"amqp://{host}:{port}\"\n self._exchange_name = exchange\n self._callbacks = ()\n self._debug = debug\n\n if isinstance(callbacks, (list, tuple)):\n for func in callbacks:\n self.add_callback(func)\n\n # Initialize connection we are consuming from based on defaults/passed params\n self._conn = kombu.Connection(hostname=host, port=port, userid=\"guest\", password=\"guest\", virtual_host=\"/\")\n self._exchange = kombu.Exchange(self._exchange_name, type=\"topic\")\n self._routing_key = routing_key\n\n # At this point, consumers are reading messages regardless of queue name\n # so I am just setting it to be the same as the exchange.\n self._queue = kombu.Queue(name=self._routing_key, exchange=self._exchange, routing_key=self._routing_key)\n\n # Start consumer as an independent process\n self.start()\n if self._debug:\n print(f\"Connected to {self._url}\")\n\n def run(self) -> None:\n \"\"\"\n Runs the consumer until stopped.\n \"\"\"\n with kombu.Consumer(self._conn, queues=self._queue, callbacks=[self._on_message], accept=[\"text/plain\", \"application/json\"]):\n if self._debug:\n print(f\"Connected to {self._url} on exchange [{self._exchange_name}], routing_key [{self._routing_key}] and waiting to consume...\")\n\n while not self._exit.is_set():\n try:\n self._conn.drain_events(timeout=5)\n except socket.timeout:\n pass\n except KeyboardInterrupt:\n self.shutdown()\n\n def _on_message(self, body, message):\n \"\"\"\n Default option for a consumer callback, prints out message and message data.\n :param body: contains the body of the message sent\n :param message: contains meta data about the message sent (ie. delivery_info)\n \"\"\"\n if self._debug:\n print(f\"Message Received @ {datetime.datetime.now()}\")\n\n message.ack()\n for func in self._callbacks:\n func(body, message)\n\n def add_callback(self, fun):\n \"\"\"\n Add a function to the list of callback functions.\n :param fun: function to add to callbacks\n \"\"\"\n if isfunction(fun) or isinstance(fun, partial):\n if fun in self._callbacks:\n raise ValueError(\"Duplicate function found in callbacks\")\n self._callbacks = (*self._callbacks, fun)\n\n def get_exchanges(self):\n \"\"\"\n Get a list of exchange names on the queue\n :return: list of exchange names\n \"\"\"\n exchanges = self._conn.get_manager().get_exchanges()\n return list(filter(None, [exc.get(\"name\", \"\")for exc in exchanges]))\n\n def get_queues(self):\n \"\"\"\n Get a list of queue names on the queue\n :return: list of queue names\n \"\"\"\n queues = self._conn.get_manager().get_queues()\n return list(filter(None, [que.get(\"name\", \"\") for que in queues]))\n\n def get_binds(self):\n \"\"\"\n Get a list of exchange/topic bindings\n :return: list of exchange/topic bindings\n \"\"\"\n binds = []\n manager = self._conn.get_manager()\n for queue in self.get_queues():\n for bind in manager.get_queue_bindings(vhost=\"/\", qname=queue):\n binds.append({\n \"exchange\": bind.get(\"source\", \"\"),\n \"routing_key\": bind.get(\"routing_key\", \"\")\n })\n\n return binds\n\n def shutdown(self):\n \"\"\"\n Shutdown the consumer and cleanly close the process\n \"\"\"\n self._exit.set()\n print(\"The consumer has shutdown.\")\n\n\nclass Producer:\n \"\"\"\n The Producer class writes messages to the message queue to be consumed.\n \"\"\"\n HOST = os.environ.get(\"QUEUE_HOST\", \"localhost\")\n PORT = os.environ.get(\"QUEUE_PORT\", 5672)\n EXCHANGE = \"transport\"\n ROUTING_KEY = \"*\"\n\n def __init__(self, host: str = HOST, port: int = PORT, debug: bool = False):\n \"\"\"\n Sets up connection to broker to write to.\n :param host: hostname for the queue server\n :param port: port for the queue server\n :param debug: print debugging messages\n \"\"\"\n self._url = f\"amqp://{host}:{port}\"\n self._debug = debug\n self._conn = kombu.Connection(hostname=host, port=port, userid=\"guest\", password=\"guest\", virtual_host=\"/\")\n\n def publish(self, message: Union[dict, str] = \"\", headers: dict = None, exchange: str = EXCHANGE, routing_key: str = ROUTING_KEY):\n \"\"\"\n Publish a message to th AMQP Queue\n :param message: message to be published\n :param headers: header key-values to publish with the message\n :param exchange: specifies the top level specifier for message publish\n :param routing_key: determines which queue the message is published to\n \"\"\"\n self._conn.connect()\n queue = kombu.Queue(routing_key, kombu.Exchange(exchange, type=\"topic\"), routing_key=routing_key)\n queue.maybe_bind(self._conn)\n queue.declare()\n\n producer = kombu.Producer(self._conn.channel())\n producer.publish(\n message,\n headers=headers or {},\n exchange=queue.exchange,\n routing_key=queue.routing_key,\n declare=[queue]\n )\n producer.close()\n self._conn.release()\n" }, { "alpha_fraction": 0.5325443744659424, "alphanum_fraction": 0.5887573957443237, "avg_line_length": 18.882352828979492, "blob_id": "344f928f975a2f444f9159725acb8c8c7b36fd94", "content_id": "f434454d8173b5d5e2b25cc7c3cfa900e0aa412d", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "permissive", "max_line_length": 49, "num_lines": 17, "path": "/orchestrator/core/orc_server/device/migrations/0004_remove_device_multi_actuator.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-20 13:45\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('device', '0003_device_multi_actuator'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='device',\n name='multi_actuator',\n ),\n ]\n" }, { "alpha_fraction": 0.676616907119751, "alphanum_fraction": 0.6909204125404358, "avg_line_length": 35.54545593261719, "blob_id": "11e21780a913e1d9942e590b7d32ea8f6c0deb9f", "content_id": "eb4200a9a2b9c1b5581345ce271b732d7c49db6a", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1608, "license_type": "permissive", "max_line_length": 102, "num_lines": 44, "path": "/orchestrator/gui/server/gui_server/account/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport base64\n\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import permissions, status, viewsets\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom ..models import UserSerializer, PasswordSerializer\n\nfrom utils import IsAdminOrIsSelf\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n permission_classes = (permissions.IsAdminUser, )\n serializer_class = UserSerializer\n lookup_field = 'username'\n\n queryset = User.objects.all().order_by('-date_joined')\n\n @detail_route(methods=['post'], permission_classes=[IsAdminOrIsSelf], url_path='change_password')\n def change_password(self, request, username=None):\n \"\"\"\n Change user password\n passwords sent as base64 encoded strings\n \"\"\"\n serializer = PasswordSerializer(data=request.data)\n user = self.get_object()\n\n if serializer.is_valid():\n if not user.check_password(base64.b64decode(serializer.data.get('old_password'))):\n return Response({'old_password': ['Wrong password.']}, status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(base64.b64decode(serializer.data.get('new_password_1')))\n user.save()\n return Response({'status': 'password changed'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n" }, { "alpha_fraction": 0.5455740690231323, "alphanum_fraction": 0.5482033491134644, "avg_line_length": 22.77083396911621, "blob_id": "847e0c9c05d1a530775b9b414fda21961c7f9d96", "content_id": "ba58f3919cac43643d038cd85b9711e1519d653f", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2282, "license_type": "permissive", "max_line_length": 123, "num_lines": 96, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/json_field/basicField.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_json,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass BasicField extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.BasicFieldTypes = [\n \"boolean\",\n \"integer\",\n \"number\",\n \"string\"\n ]\n }\n\n change(val) {\n let def_type = this.props.def.type\n switch(def_type) {\n case \"integer\":\n val = parseInt(val, 10) || null\n break;\n case \"number\":\n val = parseFloat(val.replace(\",\", \".\")) || null\n break;\n }\n this.props.optChange(this.msgName, val, this.props.arr ? true : false)\n }\n\n inputOpts(type, format) {\n switch (type) {\n case 'number':\n case \"integer\":\n return {\n type: 'number',\n placeholder: 0,\n }\n case 'boolean':\n return {\n type: 'checkbox',\n style: {\n position: 'inherit',\n marginLeft: 0,\n }\n }\n default:\n return {\n type: 'text'\n }\n }\n }\n\n render() {\n let name = this.props.name || this.props.def.name\n this.msgName = (this.props.parent ? [this.props.parent, name] : [name]).join(\".\")\n\n if (this.BasicFieldTypes.indexOf(this.props.def.type) == -1) { // name is type if not field\n return <Field parent={ this.props.parent } name={ name } def={ this.props.def } optChange={ this.props.optChange } />\n } else {\n let opts = this.inputOpts(this.props.def.type, this.props.def.format)\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (this.props.required ? '*' : '') + name }</legend>\n <Input\n { ...opts }\n name={ name }\n onChange={ e => this.change(e.target.value) }\n />\n { this.props.def.description ? <FormText color=\"muted\">{ this.props.def.description }</FormText> : '' }\n </FormGroup>\n )\n }\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(BasicField)\n" }, { "alpha_fraction": 0.627407968044281, "alphanum_fraction": 0.6342355608940125, "avg_line_length": 36.62385177612305, "blob_id": "de10a49f4a243a17c27be1610aad9f411cf60050", "content_id": "9f117cb62415b784dd4db8f8614374f3e1499ccc", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4101, "license_type": "permissive", "max_line_length": 118, "num_lines": 109, "path": "/orchestrator/transport/coap/COAP/coap_client.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import json\n\nfrom coapthon import defines\nfrom coapthon.client.helperclient import HelperClient\nfrom coapthon.messages.option import Option\nfrom coapthon.utils import generate_random_token\n\nfrom sb_utils import encode_msg, safe_cast, Consumer\n\n\nclass CoapClient(HelperClient):\n def post(self, path, payload, callback=None, timeout=5, no_response=False, **kwargs):\n \"\"\"\n Perform a POST on a certain path.\n :param path: the path\n :param payload: the request payload\n :param callback: the callback function to invoke upon response\n :param timeout: the timeout of the request\n :return: the response\n \"\"\"\n request = kwargs.pop(\"request\", self.mk_request(defines.Codes.POST, path))\n request.payload = payload\n request.token = generate_random_token(2)\n request.version = 1\n\n if no_response:\n request.add_no_response()\n request.type = defines.Types[\"NON\"]\n\n for k, v in kwargs.items():\n if hasattr(request, k):\n setattr(request, k, v)\n\n return self.send_request(request, callback, timeout)\n\n\ndef send_coap(body, message):\n \"\"\"\n AMQP Callback when we receive a message from internal buffer to be published\n :param body: Contains the message to be sent.\n :param message: Contains data about the message as well as headers\n \"\"\"\n # Set destination and build requests for multiple potential endpoints.\n for device in message.headers.get(\"destination\", {}):\n host, port = device[\"socket\"].split(\":\", 1)\n encoding = device[\"encoding\"]\n\n # Check necessary headers exist\n if host and port and encoding:\n path = \"transport\"\n client = CoapClient(server=(host, safe_cast(port, int, 5683)))\n request = client.mk_request(defines.Codes.POST, path)\n response = client.post(\n path=path,\n payload=encode_msg(json.loads(body), encoding),\n request=build_request(request, message.headers.get(\"source\", {}), device)\n )\n\n if response:\n print(f\"Response from device: {response}\")\n client.stop()\n else:\n # send error back to orch\n print(f\"Error: not enough data - {host}, {port}, {encoding}\")\n\n\ndef build_request(request, headers, device):\n \"\"\"\n Helper method to organized required headers into the CoAP Request.\n :param request: Request being build\n :param headers: Data from AMQP message which contains data to forward OpenC2 Command.\n :param device: Device specific data from headers sent by O.I.F.\n \"\"\"\n orc_host, orc_port = headers[\"transport\"][\"socket\"].split(\":\", 1) # location of orchestrator-side CoAP server\n request.source = (orc_host, safe_cast(orc_port, int, 5683))\n\n dev_host, dev_port = device[\"socket\"].split(\":\", 1) # location of device-side CoAP server\n request.destination = (dev_host, safe_cast(dev_port, int, 5683))\n\n encoding = f\"application/{device['encoding']}\" # Content Serialization\n request.content_type = defines.Content_types[encoding] # using application/json, TODO: add define to openc2+json\n request.mid = int(\"0x\" + headers[\"correlationID\"], 16) # 16-bit correlationID\n request.timestamp = headers[\"date\"] # time message was sent from orchestrator\n\n # Add OIF-unique value used for routing to the desired actuator\n profile = Option()\n profile.number = 8\n profile.value = device.get(\"profile\", \"\")[0]\n request.add_option(profile)\n\n source_socket = Option()\n source_socket.number = 3\n source_socket.value = headers[\"transport\"][\"socket\"]\n request.add_option(source_socket)\n\n return request\n\n\nif __name__ == \"__main__\":\n # Begin consuming messages from internal message queue\n try:\n consumer = Consumer(\n exchange=\"transport\",\n routing_key=\"coap\",\n callbacks=[send_coap]\n )\n except Exception as e:\n print(f\"Consumer error: {e}\")\n consumer.shutdown()\n" }, { "alpha_fraction": 0.6928839087486267, "alphanum_fraction": 0.6928839087486267, "avg_line_length": 19.538461685180664, "blob_id": "6cdb15d3e5c9debb7f854c86c263694d488acdec", "content_id": "b1df89aeafa8113425a2af5d3912b126241a1752", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "permissive", "max_line_length": 53, "num_lines": 13, "path": "/orchestrator/core/orc_server/account/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .api import actuatorDelete\nfrom .apiviews import ActuatorAccess\nfrom .viewsets import UserViewSet, UserHistoryViewSet\n\n__all__ = [\n # API\n 'actuatorDelete',\n # APIViews\n 'ActuatorAccess',\n # Viewsets\n 'UserViewSet',\n 'UserHistoryViewSet',\n]\n" }, { "alpha_fraction": 0.5750798583030701, "alphanum_fraction": 0.584664523601532, "avg_line_length": 27.08974266052246, "blob_id": "c0ccf9710a3d08e1f9b6eeec555695d757e9a765", "content_id": "180861e3da3c3291952540b85f86acc80d934500", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2191, "license_type": "permissive", "max_line_length": 136, "num_lines": 78, "path": "/orchestrator/gui/client/src/components/command/pages/generate/lib/jadn_field/choice.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { connect } from 'react-redux'\n\nimport {\n Button,\n Form,\n FormGroup,\n FormText,\n Input,\n Label,\n} from 'reactstrap'\n\nimport {\n isOptional_jadn,\n keys,\n zip,\n Field\n} from './'\n\nimport * as GenActions from '../../../../../../actions/generate'\n\n\nclass ChoiceField extends Component {\n constructor(props, context) {\n super(props, context)\n\n this.handleChange = this.handleChange.bind(this)\n\n this.state = {\n selected: -1,\n selectedBase: ''\n }\n }\n\n handleChange(e) {\n this.setState({\n selected: e.target.value\n }, () => {\n if (this.state.selected == -1) {\n this.props.optChange(this.props.def[1], undefined)\n }\n })\n }\n\n render() {\n let name = this.props.name || this.props.def.name\n let msgName = (this.props.parent ? [this.props.parent, name] : [name]).join('.')\n let def_opts = this.props.def.fields.map(opt => <option key={ opt[0] } data-subtext={ opt[2] } value={ opt[0] }>{ opt[1] }</option>)\n\n this.selectedDef = this.props.def.fields.filter(opt => opt[0] == this.state.selected)\n this.selectedDef = this.selectedDef.length === 1 ? zip(keys.Gen_Def, this.selectedDef[0]) : {}\n\n return (\n <FormGroup tag=\"fieldset\" className=\"border border-dark p-2\">\n <legend>{ (isOptional_jadn(this.props.def) ? '' : '*') + name }</legend>\n { this.props.def.desc != '' ? <FormText color=\"muted\">{ this.props.def.desc }</FormText> : '' }\n <div className=\"col-12 my-1 px-0\">\n <Input type=\"select\" name={ name } title={ name } className=\"selectpicker\" onChange={ this.handleChange } default={ -1 }>\n <option data-subtext={ name + ' options' } value={ -1 }>{ name } options</option>\n { def_opts }\n </Input>\n\n <div className=\"col-12 py-2\">\n {\n this.state.selected >= 0 ? <Field def={ this.selectedDef } parent={ msgName } optChange={ this.props.optChange } /> : ''\n }\n </div>\n </div>\n </FormGroup>\n )\n }\n}\n\nconst mapStateToProps = (state) => ({\n schema: state.Generate.selectedSchema\n})\n\nexport default connect(mapStateToProps)(ChoiceField)\n" }, { "alpha_fraction": 0.5712270736694336, "alphanum_fraction": 0.574518084526062, "avg_line_length": 24.327381134033203, "blob_id": "8f697eb4189cf4c6153e753ee2ad9cf8ee34aebb", "content_id": "82dafe02c34ab01ebdb058b7e70a4fb34c9149af", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4254, "license_type": "permissive", "max_line_length": 95, "num_lines": 168, "path": "/logger/gui/src/components/utils/theme-switcher/switcher.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport PropTypes from 'prop-types';\nimport { Helmet } from 'react-helmet-async';\nimport validThemes from './themes';\nimport './assets/css/loader.css';\n\nimport * as themeActions from './theme-actions';\n\nconst setItem = (key, obj) => {\n if (!key) return null;\n try {\n localStorage.setItem(key, JSON.stringify(obj));\n } catch (err) {\n return null;\n }\n};\n\nconst getItem = key => {\n if (!key) return null;\n try {\n const item = localStorage.getItem(key);\n return item ? JSON.parse(item) : null;\n } catch (err) {\n return null;\n }\n};\n\n//------------------------------------------------------------------------------\n// Top level ThemeSwitcher Component\n//------------------------------------------------------------------------------\nclass ThemeSwitcher extends Component {\n constructor(props, context) {\n super(props, context);\n this.load = this.load.bind(this);\n this.loadTheme = this.loadTheme.bind(this);\n\n const themeOptions = new Set(this.props.themeOptions.filter(t => validThemes.includes(t)));\n\n let defaultTheme = getItem(this.props.storeThemeKey);\n defaultTheme = defaultTheme || this.props.defaultTheme;\n themeOptions.add(defaultTheme);\n\n this.state = {\n currentTheme: defaultTheme,\n themes: this.props.themes || {},\n themeOptions\n };\n\n this.loadTheme(defaultTheme);\n setTimeout(() => {\n themeOptions.forEach(theme => this.loadTheme(theme));\n }, 100);\n }\n\n // pass reference to this down to ThemeChooser component\n getChildContext() {\n return {\n defaultTheme: this.props.defaultTheme,\n themeSwitcher: this,\n themes: [ ...this.state.themeOptions ],\n currentTheme: this.state.currentTheme\n };\n }\n\n async loadTheme(theme) {\n if (!this.state.themeOptions.has(theme)) { return; }\n\n if (!(theme in this.state.themes)) {\n themeActions.loadTheme(theme).then(rsp => {\n this.setState(prevState => ({\n themes: {\n ...prevState.themes,\n [rsp.theme]: rsp.styles\n }\n }));\n });\n }\n }\n\n async load(theme) {\n if (!theme) {\n const storedTheme = getItem(this.props.storeThemeKey);\n // see if a theme was previously stored, will return null if storedThemeKey not set\n // eslint-disable-next-line no-param-reassign\n theme = storedTheme || this.props.defaultTheme;\n }\n\n if (!this.state.themeOptions.has(theme)) { return; }\n\n setItem(this.props.storeThemeKey, theme);\n this.setState({\n currentTheme: theme\n });\n\n if (Object.keys(this.state.themes).indexOf(theme) === -1) {\n return themeActions.loadTheme(theme).then(rsp => {\n this.setState(prevState => ({\n themes: {\n ...prevState.themes,\n [rsp.theme]: rsp.styles\n }\n }));\n });\n }\n }\n\n getContents() {\n if (Object.keys(this.state.themes).length === 0) {\n return (\n <div style={{\n display: 'table',\n position: 'fixed',\n top: 0,\n height: '100%',\n width: '100%'\n }}>\n <div style={{\n display: 'table-cell',\n textAlign: 'center',\n verticalAlign: 'middle'\n }}>\n <div className=\"loader\" />\n <p className='pt-0 mt-0'>Loading...</p>\n </div>\n </div>\n );\n }\n return this.props.children || <span />;\n }\n\n render() {\n return (\n <div>\n <Helmet>\n <style type=\"text/css\" data-type=\"theme\">\n { this.state.themes[this.state.currentTheme] || '' }\n </style>\n </Helmet>\n { this.getContents() }\n </div>\n );\n }\n}\n\nThemeSwitcher.childContextTypes = {\n defaultTheme: PropTypes.string,\n themeSwitcher: PropTypes.instanceOf(ThemeSwitcher),\n themes: PropTypes.array,\n currentTheme: PropTypes.string\n};\n\nThemeSwitcher.propTypes = {\n defaultTheme: PropTypes.string,\n storeThemeKey: PropTypes.string,\n themes: PropTypes.object,\n themeOptions: PropTypes.array,\n children: PropTypes.element\n};\n\nThemeSwitcher.defaultProps = {\n defaultTheme: 'lumen',\n storeThemeKey: null,\n themes: null,\n themeOptions: validThemes,\n children: null\n};\n\nexport default ThemeSwitcher;" }, { "alpha_fraction": 0.7426470518112183, "alphanum_fraction": 0.7426470518112183, "avg_line_length": 67.5, "blob_id": "f1f2009ff22e87b45b47092f45bc7413bb95cad3", "content_id": "2127e2e5da579141293825d7ab0b2266092137b8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 136, "license_type": "permissive", "max_line_length": 116, "num_lines": 2, "path": "/.git_ssh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nssh -o IdentitiesOnly=yes -o StrictHostKeyChecking=no -o PasswordAuthentication=yes -i ~/.ssh/id_rsa -F /dev/null $@" }, { "alpha_fraction": 0.6966068148612976, "alphanum_fraction": 0.7085828185081482, "avg_line_length": 21.772727966308594, "blob_id": "42c308d28ca0c5e041efd5714ab9f0590f06df81", "content_id": "7f6f79a6a8202dfed965a103015d5a8043a6b137", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 501, "license_type": "permissive", "max_line_length": 58, "num_lines": 22, "path": "/orchestrator/transport/https/docker_dev_start.sh", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\necho \"Running HTTPS Transport Module.\"\n\ndockerize -wait tcp://$QUEUE_HOST:$QUEUE_PORT -timeout 30s\n\n# Start the first process\necho \"Starting flask app\"\npython3 -u ./HTTPS/main.py &\nstatus=$?\nif [ $status -ne 0 ]; then\n echo \"Failed to start flask app: $status\"\n exit $status\nfi\n\n# Start the second process\necho \"Starting message sender\"\npython3 -u ./HTTPS/https_transport.py\nstatus=$?\nif [ $status -ne 0 ]; then\n echo \"Failed to start message sender: $status\"\n exit $status\nfi\n" }, { "alpha_fraction": 0.5837838053703308, "alphanum_fraction": 0.5848648548126221, "avg_line_length": 27.030303955078125, "blob_id": "9590c4b92ed586cbd457fadc9c249f1f884fe03b", "content_id": "36f6368f8e3fefec1daf9f1d6ab7eb2ca712d40c", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 925, "license_type": "permissive", "max_line_length": 61, "num_lines": 33, "path": "/orchestrator/gui/server/gui_server/webApp/management/commands/loaddata_apps.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management import ManagementUtility\nfrom django.core.management.base import BaseCommand\nfrom threading import Thread\n\n\nclass Command(BaseCommand):\n \"\"\"\n Custom django command - loaddata_apps\n Load data for the custom apps available to the Django app\n \"\"\"\n def handle(self, *args, **kwargs):\n \"\"\"\n Handle command execution\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n args = (sys.argv[0], 'loaddata')\n\n for app in settings.INSTALLED_APPS:\n app_dir = os.path.join(settings.BASE_DIR, app)\n if os.path.isdir(app_dir):\n print(f'Loading Fixtures for {app}')\n\n utility = ManagementUtility([*args, app])\n p = Thread(target=utility.execute)\n p.start()\n p.join()\n print('')\n" }, { "alpha_fraction": 0.6330645084381104, "alphanum_fraction": 0.6330645084381104, "avg_line_length": 13.588234901428223, "blob_id": "b66edb02a89430875975e02e46563b36866ef51c", "content_id": "b8fbfebe81a29d1fb1a795b3abe41995153752b9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "permissive", "max_line_length": 28, "num_lines": 17, "path": "/orchestrator/core/orc_server/backup/views/__init__.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from .api import (\n backupRoot\n)\n\nfrom .import_export import (\n ActuatorImportExport,\n DeviceImportExport\n)\n\n__all__ = [\n # API\n 'backupRoot',\n # APIViews\n # Import/Export\n 'ActuatorImportExport',\n 'DeviceImportExport'\n]\n" }, { "alpha_fraction": 0.6660467386245728, "alphanum_fraction": 0.6668437719345093, "avg_line_length": 32.60714340209961, "blob_id": "73e340e3a259cf36e3f2e297cc447db8978ee351", "content_id": "ae392296d913ad8a83a32ceae9878d603cd05aef", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3764, "license_type": "permissive", "max_line_length": 130, "num_lines": 112, "path": "/orchestrator/gui/client/src/actions/device.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "// Actions for device API\nimport { RSAA } from 'redux-api-middleware'\nimport { withGUIAuth } from './util'\n\nconst str_fmt = require('string-format')\n\n// API Base URL\nconst baseAPI = '/api/device'\n\n// Helper Functions\n// None\n\n// API Calls\n// GET - /api/device/ - all devices\nconst GET_DEVICES_REQUEST = '@@device/GET_DEVICES_REQUEST'\nexport const GET_DEVICES_SUCCESS = '@@device/GET_DEVICES_SUCCESS'\nexport const GET_DEVICES_FAILURE = '@@device/GET_DEVICES_FAILURE'\nexport const getDevices = ({page=1, count=10, sort='name', refresh=false}={}) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}?page={page}&length={count}&ordering={sort}', {base: baseAPI, page: page, count: count, sort: sort}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_DEVICES_REQUEST,\n {\n type: GET_DEVICES_SUCCESS,\n meta: {\n sort: sort,\n refresh: refresh\n }\n },\n GET_DEVICES_FAILURE\n ]\n }\n})\n\n// POST - /api/device/ - create device (name, host, port, protocol, serialization, type)\nconst CREATE_DEVICE_REQUEST = '@@device/CREATE_DEVICE_REQUEST'\nexport const CREATE_DEVICE_SUCCESS = '@@device/CREATE_DEVICE_SUCCESS'\nexport const CREATE_DEVICE_FAILURE = '@@device/CREATE_DEVICE_FAILURE'\nexport const createDevice = (device) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/', {base: baseAPI}),\n method: 'POST',\n headers: withGUIAuth(),\n body: JSON.stringify(device),\n types: [\n CREATE_DEVICE_REQUEST, CREATE_DEVICE_SUCCESS, CREATE_DEVICE_FAILURE\n ]\n }\n})\n\n// GET - /api/device/{name}/ - specific device\nconst GET_DEVICE_REQUEST = '@@device/GET_DEVICE_REQUEST'\nexport const GET_DEVICE_SUCCESS = '@@device/GET_DEVICE_SUCCESS'\nexport const GET_DEVICE_FAILURE = '@@device/GET_DEVICE_FAILURE'\nexport const getDevice = (deviceUUID) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{device}/', {base: baseAPI, device: deviceUUID}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_DEVICE_REQUEST, GET_DEVICE_SUCCESS, GET_DEVICE_FAILURE\n ]\n }\n})\n\n// PATCH - /api/device/{name}/ - update specified device\nconst UPDATE_DEVICE_REQUEST = '@@device/UPDATE_DEVICE_REQUEST'\nexport const UPDATE_DEVICE_SUCCESS = '@@device/UPDATE_DEVICE_SUCCESS'\nexport const UPDATE_DEVICE_FAILURE = '@@device/UPDATE_DEVICE_FAILURE'\nexport const updateDevice = (deviceUUID, device) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{device}/', {base: baseAPI, device: deviceUUID}),\n method: 'PATCH',\n headers: withGUIAuth(),\n body: JSON.stringify(device),\n types: [\n UPDATE_DEVICE_REQUEST, UPDATE_DEVICE_SUCCESS, UPDATE_DEVICE_FAILURE\n ]\n }\n})\n\n// DELETE - /api/device/{name}/ - delete specific device\nconst DELETE_DEVICE_REQUEST = '@@device/DELETE_DEVICE_REQUEST'\nexport const DELETE_DEVICE_SUCCESS = '@@device/DELETE_DEVICE_SUCCESS'\nexport const DELETE_DEVICE_FAILURE = '@@device/DELETE_DEVICE_FAILURE'\nexport const deleteDevice = (deviceUUID) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{device}/', {base: baseAPI, device: deviceUUID}),\n method: 'DELETE',\n headers: withGUIAuth(),\n types: [\n DELETE_DEVICE_REQUEST, DELETE_DEVICE_SUCCESS, DELETE_DEVICE_FAILURE\n ]\n }\n})\n\n// GET - /api/device/{name}/users/ - users with access to device\nconst GET_DEVICE_USERS_REQUEST = '@@device/GET_DEVICE_USERS_REQUEST'\nexport const GET_DEVICE_USERS_SUCCESS = '@@device/GET_DEVICE_USERS_SUCCESS'\nexport const GET_DEVICE_USERS_FAILURE = '@@device/GET_DEVICE_USERS_FAILURE'\nexport const getDeviceUsers = (deviceUUID) => ({\n [RSAA]: {\n endpoint: str_fmt('{base}/{device}/users', {base: baseAPI, device: deviceUUID}),\n method: 'GET',\n headers: withGUIAuth(),\n types: [\n GET_DEVICE_USERS_REQUEST, GET_DEVICE_USERS_SUCCESS, GET_DEVICE_USERS_FAILURE\n ]\n }\n})\n" }, { "alpha_fraction": 0.6240601539611816, "alphanum_fraction": 0.6268302202224731, "avg_line_length": 34.843971252441406, "blob_id": "3d4a8eaf72a8a4ab4b40df4f707a5236f1e554fb", "content_id": "4fafcc7ae01686a379c670d05fa7c20a0356bfd9", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5054, "license_type": "permissive", "max_line_length": 128, "num_lines": 141, "path": "/orchestrator/core/orc_server/actuator/views/viewsets.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import bleach\n\nfrom rest_framework import viewsets, filters\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom ..models import Actuator, ActuatorGroup, ActuatorSerializer\n\n\nclass ActuatorViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Actuators to be viewed or edited.\n \"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = ActuatorSerializer\n lookup_field = 'actuator_id'\n\n queryset = Actuator.objects.order_by('name')\n filter_backends = (filters.OrderingFilter,)\n ordering_fields = ('actuator_id', 'name', 'profile', 'type')\n\n permissions = {\n 'create': (IsAdminUser,),\n 'destroy': (IsAdminUser,),\n 'partial_update': (IsAdminUser,),\n 'retrieve': (IsAuthenticated,),\n 'update': (IsAdminUser,),\n }\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all actuators that the user has permissions for\n \"\"\"\n self.pagination_class.page_size_query_param = 'length'\n self.pagination_class.max_page_size = 100\n\n queryset = self.filter_queryset(self.get_queryset())\n\n # TODO: set permissions\n '''\n if not request.user.is_staff: # Standard User\n user_actuators = ActuatorGroup.objects.filter(users__in=[request.user])\n user_actuators = list(g.actuators.values_list('name', flat=True) for g in user_actuators)\n queryset = queryset.filter(name__in=user_actuators)\n ''' # pylint: disable=pointless-string-statement\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific actuators that the user has permissions for\n \"\"\"\n actuator = self.get_object()\n\n # TODO: set permissions\n '''\n if not request.user.is_staff: # Standard User\n user_actuators = ActuatorGroup.objects.filter(users__in=[request.user])\n user_actuators = list(g.actuators.values_list('name', flat=True) for g in user_actuators)\n\n if actuator is not None and actuator.name not in user_actuators:\n raise PermissionDenied(detail='User not authorised to access actuator', code=401)\n ''' # pylint: disable=pointless-string-statement\n\n serializer = self.get_serializer(actuator)\n return Response(serializer.data)\n\n @action(methods=['PATCH'], detail=False)\n def refresh(self, request, *args, **kwargs):\n \"\"\"\n API endpoint that allows Actuator data to be refreshed\n \"\"\"\n instance = self.get_object()\n valid_refresh = ['all', 'info', 'schema']\n refresh = bleach.clean(kwargs.get('refresh', 'info'))\n\n if instance is not None:\n if refresh not in valid_refresh:\n refresh = 'info'\n\n # TODO: refresh actuator data\n # print('Valid instance')\n\n # print('refresh')\n return Response({\n 'refresh': refresh\n })\n\n @action(methods=['GET'], detail=False)\n def profile(self, request, *args, **kwargs):\n \"\"\"\n API endpoint that allows for Actuator profile retrieval\n \"\"\"\n actuator = self.get_object()\n\n if not request.user.is_staff: # Standard User\n actuator_groups = [g.name for g in ActuatorGroup.objects.filter(actuator=actuator).filter(users__in=[request.user])]\n\n if len(actuator_groups) == 0:\n raise PermissionDenied(detail='User not authorised to access actuator', code=401)\n\n rtn = {\n 'schema': actuator.schema\n }\n\n return Response(rtn)\n\n @action(methods=['GET'], detail=False)\n def users(self, request, *args, **kwargs):\n \"\"\"\n API endpoint that allows for Actuator user retrieval\n \"\"\"\n actuator = self.get_object()\n\n if not request.user.is_staff: # Standard User\n actuator_groups = [g.name for g in ActuatorGroup.objects.filter(actuator=actuator).filter(users__in=[request.user])]\n\n if len(actuator_groups) == 0:\n raise PermissionDenied(detail='User not authorised to access actuator', code=401)\n\n group_users = [[u.username for u in ag.users.all()] for ag in ActuatorGroup.objects.filter(actuator=actuator)]\n\n rtn = {\n 'users': sum(group_users, [])\n }\n\n return Response(rtn)\n" }, { "alpha_fraction": 0.5225191116333008, "alphanum_fraction": 0.5249027609825134, "avg_line_length": 34.26991271972656, "blob_id": "73e45a9af2ad4f20f28ce5e4e15b9b710d215331", "content_id": "6a5f458094ec0bb70547056ffd5c300aabdc1bdd", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7971, "license_type": "permissive", "max_line_length": 94, "num_lines": 226, "path": "/orchestrator/gui/server/gui_server/utils/orchestrator_api.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import atexit\nimport json\nimport re\nimport websocket\n\nfrom functools import partial\nfrom simple_rest_client.api import API\nfrom simple_rest_client.resource import Resource\n\nfrom sb_utils import FrozenDict, safe_cast\n\n\nclass RootResource(Resource):\n _base_url = \"\"\n actions = dict(\n info=dict(method=\"GET\", url=f\"{_base_url}\"),\n api=dict(method=\"GET\", params={\"format\": \"corejson\"}, url=f\"{_base_url}/schema\")\n )\n\n\nclass AccountResource(Resource):\n _base_url = \"/account\"\n actions = dict(\n info=dict(method=\"GET\", url=f\"{_base_url}\"),\n # Basic CRUD\n list=dict(method=\"GET\", url=f\"{_base_url}\"),\n create=dict(method=\"POST\", url=f\"{_base_url}\"),\n retrieve=dict(method=\"GET\", url=f\"{_base_url}/{{}}\"),\n update=dict(method=\"PUT\", url=f\"{_base_url}/{{}}\"),\n partial_update=dict(method=\"PATCH\", url=f\"{_base_url}/{{}}\"),\n destroy=dict(method=\"DELETE\", url=f\"{_base_url}/{{}}\"),\n # JWT\n jwt=dict(method=\"POST\", url=f\"{_base_url}/jwt\"),\n jwt_refresh=dict(method=\"POST\", url=f\"{_base_url}/jwt/refresh\"),\n jwt_verify=dict(method=\"POST\", url=f\"{_base_url}/jwt/verify\"),\n # User Actuator\n get_actuator=dict(method=\"GET\", url=f\"{_base_url}/{{}}/actuator\"),\n add_actuators=dict(method=\"PUT\", url=f\"{_base_url}/{{}}/actuator\"),\n delete_actuators=dict(method=\"DELETE\", url=f\"{_base_url}/{{}}/actuator/{{}}\"),\n # Password Update\n change_password=dict(method=\"POST\", url=f\"{_base_url}/{{}}/change_password\"),\n # User Commands\n history=dict(method=\"GET\", url=f\"{_base_url}/{{}}/history\"),\n history_command=dict(method=\"POST\", url=f\"{_base_url}/{{}}/history/{{}}\"),\n )\n\n\nclass ActuatorResource(Resource):\n _base_url = \"/actuator\"\n actions = dict(\n info=dict(method=\"GET\", url=f\"{_base_url}\"),\n # Basic CRUD\n list=dict(method=\"GET\", url=f\"{_base_url}\"),\n create=dict(method=\"POST\", url=f\"{_base_url}\"),\n retrieve=dict(method=\"GET\", url=f\"{_base_url}/{{}}\"),\n update=dict(method=\"PUT\", url=f\"{_base_url}/{{}}\"),\n partial_update=dict(method=\"PATCH\", url=f\"{_base_url}/{{}}\"),\n destroy=dict(method=\"DELETE\", url=f\"{_base_url}/{{}}\"),\n # Custom Functions\n profile=dict(method=\"GET\", url=f\"{_base_url}/{{}}/profile\"),\n refresh=dict(method=\"PATCH\", url=f\"{_base_url}/{{}}/refresh\"),\n users=dict(method=\"GET\", url=f\"{_base_url}/{{}}/users\")\n )\n\n\nclass CommandResource(Resource):\n _base_url = \"/command\"\n actions = dict(\n info=dict(method=\"GET\", url=f\"{_base_url}\"),\n # Basic CRUD\n list=dict(method=\"GET\", url=f\"{_base_url}\"),\n create=dict(method=\"POST\", url=f\"{_base_url}\"),\n retrieve=dict(method=\"GET\", url=f\"{_base_url}/{{}}\"),\n update=dict(method=\"PUT\", url=f\"{_base_url}/{{}}\"),\n partial_update=dict(method=\"PATCH\", url=f\"{_base_url}/{{}}\"),\n destroy=dict(method=\"DELETE\", url=f\"{_base_url}/{{}}\"),\n # Send Command\n send=dict(method=\"PUT\", url=f\"{_base_url}/send\")\n )\n\n\nclass DeviceResource(Resource):\n _base_url = \"/device\"\n actions = dict(\n info=dict(method=\"GET\", url=f\"{_base_url}\"),\n # Basic CRUD\n list=dict(method=\"GET\", url=f\"{_base_url}\"),\n create=dict(method=\"POST\", url=f\"{_base_url}\"),\n retrieve=dict(method=\"GET\", url=f\"{_base_url}/{{}}\"),\n update=dict(method=\"PUT\", url=f\"{_base_url}/{{}}\"),\n partial_update=dict(method=\"PATCH\", url=f\"{_base_url}/{{}}\"),\n destroy=dict(method=\"DELETE\", url=f\"{_base_url}/{{}}\"),\n # Custom Functions\n users=dict(method=\"GET\", url=f\"{_base_url}/{{}}/users\")\n )\n\n\nclass LogResource(Resource):\n _base_url = \"/log\"\n actions = dict(\n events=dict(method=\"GET\", url=f\"{_base_url}/event\"),\n event=dict(method=\"GET\", url=f\"{_base_url}/event/{{}}\"),\n requests=dict(method=\"GET\", url=f\"{_base_url}/request/{{}}\"),\n request=dict(method=\"GET\", url=f\"{_base_url}/request/{{}}\")\n )\n\n\nclass OrchestratorAPI(object):\n def __init__(self, root=\"http://localhost:8080\", ws=False):\n ws = ws if isinstance(ws, bool) else False\n self._root_url = root if root.endswith(\"/\") else f\"{root}/\"\n self._socket_url = re.sub(r\"^https?\", \"ws\", self._root_url)\n\n self._webSocket = None\n\n self._api = dict(\n root=RootResource,\n account=AccountResource,\n actuator=ActuatorResource,\n command=CommandResource,\n device=DeviceResource,\n log=LogResource\n )\n\n self.api = self._socket() if ws else self._rest()\n atexit.register(self._close)\n\n def __getattr__(self, item):\n if item in self.api:\n return self.api[item]\n else:\n super(OrchestratorAPI, self).__getattribute__(item)\n\n def _rest(self):\n api = API(\n api_root_url=self._root_url, # base api url\n headers={ # default headers\n \"Content-Type\": \"application/json\"\n },\n timeout=2, # default timeout in seconds\n append_slash=True, # append slash to final url\n json_encode_body=True, # encode body as json\n )\n for name, cls in self._api.items():\n api.add_resource(resource_name=name, resource_class=cls)\n\n _api = {}\n for resource in api.get_resource_list():\n res = getattr(api, resource)\n _api[resource] = FrozenDict({act: getattr(res, act) for act in res.actions})\n\n return FrozenDict(_api)\n\n def _socket(self):\n self._webSocket = websocket.create_connection(self._socket_url, timeout=2)\n init_msg = self._webSocket.recv()\n\n api = dict()\n for name, cls in self._api.items():\n res = {}\n for act, args in getattr(cls, \"actions\", {}).items():\n res[act] = partial(self._socketMsg, act, args)\n api[name] = FrozenDict(res)\n return api\n\n def _socketMsg(self, action, act_args, *args, **kwargs):\n auth = kwargs.get(\"headers\", {}).get(\"Authorization\", \"\")\n token = re.sub(r\"^JWT\\s+\", \"\", auth) if auth.startswith(\"JWT\") else \"\"\n\n url = f\"api{act_args['url'].format(*args)}\"\n url_params = act_args.get('params', {})\n if len(url_params) > 0:\n url += f\"?{'&'.join(f'{k}={v}' for k, v in url_params.items())}\"\n\n rtn = dict(\n body={},\n method=act_args[\"method\"],\n status_code=500,\n url=f\"{self._root_url}{url}\",\n # Extra Options\n meta={}\n )\n\n try:\n self._webSocket.send(json.dumps(dict(\n endpoint=url,\n method=act_args[\"method\"],\n jwt=token,\n data=kwargs.get(\"body\", {}),\n types=dict(\n success=f\"@@socket/{action.upper()}_SUCCESS\",\n failure=f\"@@socket/{action.upper()}_FAILURE\"\n )\n )))\n\n try:\n rslt = json.loads(self._webSocket.recv())\n except ValueError as e:\n rslt = {}\n\n rtn.update(\n body=rslt.get('payload', {}),\n status_code=safe_cast(rslt.get('meta', {}).get(\"status_code\", 200), int, 200),\n # Extra Options\n meta=rslt.get('meta', {})\n )\n return FrozenDict(rtn)\n\n except Exception as e:\n print(e)\n rtn.update(\n status_code=500,\n )\n return FrozenDict(rtn)\n\n def _close(self):\n if hasattr(self._webSocket, 'close'):\n try:\n self._webSocket.close()\n except Exception as e:\n print(f\"{e.__class__.__name__} - {e}\")\n\n\n__all__ = [\n \"OrchestratorAPI\"\n]\n" }, { "alpha_fraction": 0.7590159773826599, "alphanum_fraction": 0.7637926936149597, "avg_line_length": 82.73999786376953, "blob_id": "02a609efb3b37c75fdacb18cd13cffcc8ad8cb3c", "content_id": "1b75310a239e88a6af766d302d14941e3d9fd1e5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4187, "license_type": "permissive", "max_line_length": 291, "num_lines": 50, "path": "/docs/OIF-FAQ.md", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "# OpenC2 Integration Framework FAQ\n\n_Updated 21 April 2020_\n\n* What is OIF?\n * A general-purpose prototyping and testing environment for OpenC2 that includes an Orchestrator (OpenC2 Producer) for generating commands, and a Device (OpenC2 Consumer) that can host one or more Actuators to process and respond to commands.\n* What language(s) is OIF implemented in?\n * Orchestrator (Producer)\n * Python (Django RESTful) for the server\n * Javascript (React) for the GUI\n * Device (Consumer)\n * Python\n* What external components are required / incorporated?\n * The OIF is built using Docker and as such the only requirement for using OIF is Docker\n * The OIF GUI is built using a standard JavaScript framework and is supported by modern browsers.\n * For the Orchestrator?\n * See ReadMe files of the OIF Orchestrator [services](https://github.com/oasis-open/openc2-oif-orchestrator/tree/master/orchestrator) \n * For the Device?\n * See ReadMe files of the OIF Device [services](https://github.com/oasis-open/openc2-oif-device/tree/master/device) \n* What OSes are supported for hosting the Docker images?\n * Hosting Docker images follows the same requirements as Docker. The Docker Registry is a container or set of containers depending on how it is installed/implemented by an individual\n * This is not necessary as the containers are hosted on DockerHub\n * DOCKERHUB URL: TBSL (pending development infrastructure updates)\n* What configuration is required once Docker images are installed?\n * Docker configuration is dependent on the system that it has been installed.\n * The configuration of the OIF Orchestrator/Device is handled by the docker-compose file\n * Network IP assignments are irrelevant as that will be internal to Docker\n * The ONLY IP required is that of the host (system running Docker) and the ports associated with the OIF Orchestrator/Device\n * Example: A Docker host with core services would utilize a different configuration as compared to a development host.\n* What transfer protocols are supported Orchestrator-to-Device?\n * Currently the OASIS-standard protocol is [HTTPS](https://docs.oasis-open.org/openc2/open-impl-https/v1.0/open-impl-https-v1.0.html), however CoAP and MQTT have a beta implementations within OIF\n* What would be required to add a new transfer protocol?\n * See [transport docs](https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/docs/Transport.md)\n* What message encodings are supported Orchestrator-to-Device?\n * Currently the only OASIS-approved serialization is JSON\n * OIF supports schemaless serializations and has reference implementations for [BINN](https://github.com/liteserver/binn/blob/master/spec.md), [BSON](http://bsonspec.org/), [CBOR](https://cbor.io), [MessagePack](https://msgpack.org), S-Expression, XML, [UBJSON](http://ubjson.org/), YAML\n* What would be required to add a new message encoding?\n * See [serialization docs](https://github.com/oasis-open/openc2-oif-orchestrator/blob/master/docs/Serializations.md)\n* How would a user integrate their own actuator for testing?\n * See Orchestrator/docs/Orchestrator.md#registration\n * Note: The actuator may need to be registered twice under OIF as both a Device and Actuator\n* What certificates are being used for HTTPS message transfer?\n * Certificates used are self-signed by default with the ability to use custom certs, see the Orchestrator's [HTTPS transport ReadMe](https://github.com/oasis-open/openc2-oif-orchestrator/tree/master/orchestrator/transport/https)\n* What security is applied to MQTT message transfer?\n * Currently there is no security implemented\n* How many OIF Devices can an OIF Orchestrator command?\n * Currently there is no defined limit, further usability testing is required to determine this\n* How many OIF Orchestrators can an OIF Device respond to?\n * An OIF device responds to the OIF Orchestrator that sent the command\n * At the current implementation of OIF, there is no concept of linking a device to an orchestrator. So, a device can and will respond to ANY orchestrator that it receives a command from.\n" }, { "alpha_fraction": 0.5291500687599182, "alphanum_fraction": 0.532832145690918, "avg_line_length": 32.42564010620117, "blob_id": "42269047674f8868b1c4b2467cf3810062056992", "content_id": "2c250d03835f70c2aa97268ebcb19ed80c1a0101", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6518, "license_type": "permissive", "max_line_length": 109, "num_lines": 195, "path": "/orchestrator/core/orc_server/conformance/tests/utils.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "\"\"\"\nUnittest Utilities\n\"\"\"\nimport inspect\nimport os\nimport unittest\n\nfrom typing import (\n Dict,\n List,\n Tuple,\n Union\n)\n\nfrom .test_setup import SetupTestSuite, SetupTestCase\n\ntest_dirs = [\n os.path.dirname(os.path.realpath(__file__)), # Local Dir\n # \"./tests\" # Custom Dir\n]\n\n\ndef inherits_from(child, parents: Union[Tuple[type, ...], type]):\n parents = tuple(p.__name__ for p in ((parents, ) if isinstance(parents, type) else parents))\n if inspect.isclass(child):\n bases = [c.__name__ for c in inspect.getmro(child)[1:]]\n if any([p in bases for p in parents]):\n return True\n return False\n\n\ndef load_test_suite() -> SetupTestSuite:\n suite = SetupTestSuite()\n for d in test_dirs:\n suite.addTests(unittest.defaultTestLoader.discover(start_dir=d, pattern=f\"*_tests.py\"))\n return get_tests(suite)\n\n\ndef tests_in_suite(suite: unittest.TestSuite) -> Dict[str, Dict[str, Union[str, Dict[str, str]]]]:\n rtn = {}\n for test in suite:\n if unittest.suite._isnotsuite(test):\n t = test.id().split(\".\")[-2:]\n f = getattr(test, t[1])\n rtn.setdefault(t[0], dict(\n profile=test.profile,\n doc=(getattr(test, \"__doc__\", \"\") or \"\").strip(),\n tests={}\n ))[\"tests\"][t[1]] = (getattr(f, \"__doc__\", \"\") or \"\").strip()\n else:\n rtn.update(tests_in_suite(test))\n return rtn\n\n\ndef _load_tests(s: unittest.TestSuite, t: Union[Dict[str, List[str]], List[str]], **kwargs) -> list:\n rtn = []\n test_list = []\n if isinstance(t, dict):\n for c, ts in t.items():\n c = c if c.endswith(\"_UnitTests\") else f\"{c}_UnitTests\"\n test_list.extend([f\"{c}{f'.{f}' if f else ''}\" for f in ts] if ts else [c])\n else:\n test_list.extend(t)\n\n for test in s:\n if unittest.suite._isnotsuite(test):\n f = test.id().split(\".\")[-2:]\n cls = test.__class__\n if not inherits_from(cls, SetupTestCase):\n cls = type(\n cls.__name__,\n (SetupTestCase, ),\n {\n \"__doc__\": getattr(cls, \"__doc__\", \"\"),\n \"profile\": getattr(cls, \"profile\", \"Unknown\"),\n f[1]: getattr(test.__class__, f[1])\n }\n )\n for t in test_list:\n t = t.split(\".\")\n if (t[0] == f[0] and len(t) == 1) or (t[0] == f[0] and t[1] == f[1]):\n rtn.append(cls(f[1], **kwargs))\n else:\n rtn.extend(_load_tests(test, test_list))\n return rtn\n\n\ndef get_tests(suite: unittest.TestSuite, tests: Dict[str, List[str]] = None, **kwargs) -> SetupTestSuite:\n tests = tests or {k: [] for k in tests_in_suite(suite)}\n rtn = SetupTestSuite(**kwargs)\n rtn.addTests(_load_tests(suite, tests, **kwargs))\n return rtn\n\n\nclass TestResults(unittest.TextTestResult):\n _testReport: dict\n\n def __init__(self, stream, descriptions, verbosity):\n super().__init__(stream, descriptions, verbosity)\n self._testReport = {}\n\n def getReport(self, verbose: bool = False) -> dict:\n \"\"\"\n Returns the run tests as a list of the form of a dict\n \"\"\"\n rtn = dict(\n stats=dict(\n Overall=self._getStats(self._testReport, True)\n )\n )\n\n for profile, tests in self._testReport.items():\n rtn[profile] = {}\n for key, val in tests.items():\n if verbose:\n rtn[profile][key] = {k: v if isinstance(v, str) else \"\" for k, v in val.items()}\n else:\n rtn[profile][key] = list(val.keys())\n rtn[\"stats\"][profile] = self._getStats(rtn[profile])\n\n print(\"\")\n return rtn\n\n def addError(self, test: unittest.case.TestCase, err) -> None:\n super().addError(test, err)\n self._addReport(\"error\", test, err)\n\n def addFailure(self, test: unittest.case.TestCase, err) -> None:\n super().addFailure(test, err)\n self._addReport(\"failure\", test, err)\n\n def addSuccess(self, test: unittest.case.TestCase) -> None:\n super().addSuccess(test)\n self._addReport(\"success\", test)\n\n def addExpectedFailure(self, test: unittest.case.TestCase, err) -> None:\n super().addExpectedFailure(test, err)\n self._addReport(\"expected_failure\", test, err)\n\n def addSkip(self, test: unittest.case.TestCase, reason: str) -> None:\n super().addSkip(test, reason)\n self._addReport(\"skipped\", test, reason)\n\n def addUnexpectedSuccess(self, test: unittest.case.TestCase) -> None:\n super().addUnexpectedSuccess(test)\n self._addReport(\"unexpected_success\", test)\n\n def addSubTest(self, test, subtest, err):\n subparams = \", \".join([f\"{k}='{v}'\" for k, v in subtest.params.items()])\n subtest._testMethodName = f\"{test._testMethodName} subTest({subparams})\"\n subtest.profile = test.profile\n if err is None:\n self.addSuccess(subtest)\n else:\n self.addFailure(subtest, err)\n\n super(TestResults, self).addSubTest(test, subtest, err)\n # add to total number of tests run\n self.testsRun += 1\n\n # Helper Functions\n def _addReport(self, category: str, test: unittest.case.TestCase, err: Union[tuple, str] = None) -> None:\n profile = getattr(test, \"profile\", \"Unknown\")\n val = err or test\n if isinstance(val, tuple):\n exctype, value, _ = err\n val = f\"{exctype.__name__}: {value}\"\n\n self._testReport.setdefault(profile, {}).setdefault(category, {})[test._testMethodName] = val\n\n def _getStats(self, results: dict, overall: bool = False) -> Dict[str, int]:\n stats = (\"error\", \"failure\", \"success\", \"expected_failure\", \"skipped\", \"unexpected_success\")\n rtn = dict(\n total=0,\n error=0,\n failure=0,\n success=0,\n expected_failure=0,\n skipped=0,\n unexpected_success=0\n )\n\n if overall:\n for p in results:\n for s in stats:\n c = len(results[p].get(s, {}))\n rtn[s] += c\n rtn[\"total\"] += c\n else:\n for s in stats:\n c = len(results.get(s, {}))\n rtn[s] += c\n rtn[\"total\"] += c\n\n return rtn\n" }, { "alpha_fraction": 0.5651363134384155, "alphanum_fraction": 0.5686320066452026, "avg_line_length": 24.85542106628418, "blob_id": "7c41ec10573afb122c7c362d3b0cb04ede8c2f17", "content_id": "8789f5c8e2e662977a5915efe7b20b0df72f9956", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4291, "license_type": "permissive", "max_line_length": 229, "num_lines": 166, "path": "/orchestrator/gui/client/src/components/utils/theme-switcher/switcher.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react'\nimport { Helmet } from 'react-helmet-async'\nimport PropTypes from 'prop-types'\nimport validThemes from './themes'\nimport './assets/css/loader.css'\n\nimport { sleep } from '../'\n\nconst setItem = (key, obj) => {\n if (!key) return null;\n try {\n localStorage.setItem(key, JSON.stringify(obj));\n } catch (err) {\n return null;\n }\n}\n\nconst getItem = (key) => {\n if (!key) return null;\n try {\n let item = localStorage.getItem(key)\n return item ? JSON.parse(item) : null;\n } catch (err) {\n return null;\n }\n}\n\n//------------------------------------------------------------------------------\n// Top level ThemeSwitcher Component\n//------------------------------------------------------------------------------\nclass ThemeSwitcher extends Component {\n constructor(props, context) {\n super(props, context);\n this.load = this.load.bind(this);\n this.loadTheme = this.loadTheme.bind(this);\n\n let validThemes = new Set(this.props.themeOptions)\n\n let defaultTheme = getItem(this.props.storeThemeKey)\n defaultTheme = defaultTheme ? defaultTheme : this.props.defaultTheme\n validThemes.add(defaultTheme)\n\n this.state = {\n currentTheme: defaultTheme,\n themes: {},\n validThemes: validThemes\n }\n\n this.loadTheme(defaultTheme)\n setTimeout(() => {\n for (let theme of validThemes) {\n this.loadTheme(theme)\n }\n }, 100)\n }\n\n async loadTheme(theme) {\n if (!this.state.validThemes.has(theme)) {\n return\n }\n\n if (Object.keys(this.state.themes).indexOf(theme) == -1) {\n return await fetch(window.location.origin + \"/assets/css/\" + theme + \".css\")\n .then(rsp => rsp.text())\n .then(data => {\n this.setState(prevState => ({\n themes: {\n ...prevState.themes,\n [theme]: data\n }\n }))\n }).catch(err => {\n console.error(err)\n })\n }\n }\n\n async load(theme) {\n if (!theme) {\n let storedTheme = getItem(this.props.storeThemeKey)\n // see if a theme was previously stored, will return null if storedThemeKey not set\n theme = storedTheme ? storedTheme : this.props.defaultTheme\n }\n\n if (!this.state.validThemes.has(theme)) { return }\n\n setItem(this.props.storeThemeKey, theme)\n this.setState({\n currentTheme: theme\n })\n if (Object.keys(this.state.themes).indexOf(theme) == -1) {\n return await this.loadTheme(theme)\n }\n }\n\n // pass reference to this down to ThemeChooser component\n getChildContext() {\n return {\n defaultTheme: this.props.defaultTheme,\n themeSwitcher: this,\n themes: [...this.state.validThemes],\n currentTheme: this.state.currentTheme\n }\n }\n\n getContents() {\n if (Object.keys(this.state.themes).length === 0) {\n return (\n <div style={{\n display: 'table',\n position: 'fixed',\n top: 0,\n height: '100%',\n width: '100%'\n }}>\n <div style={{\n display: 'table-cell',\n textAlign: 'center',\n verticalAlign: 'middle'\n }}>\n <div className=\"loader\" />\n <p className='pt-0 mt-0'>Loading...</p>\n </div>\n </div>\n )\n } else {\n return this.props.children || <span />\n }\n }\n\n render() {\n return (\n <div>\n <Helmet>\n <style type=\"text/css\" data-type=\"theme\">\n { this.state.themes[this.state.currentTheme] || \"\" }\n </style>\n </Helmet>\n { this.getContents() }\n </div>\n )\n }\n}\n\nThemeSwitcher.childContextTypes = {\n defaultTheme: PropTypes.string,\n themeSwitcher: PropTypes.object,\n themes: PropTypes.array,\n currentTheme: PropTypes.string\n};\n\nThemeSwitcher.propTypes = {\n defaultTheme: PropTypes.string,\n storeThemeKey: PropTypes.string,\n themes: PropTypes.object,\n themeOptions: PropTypes.array\n};\n\nThemeSwitcher.defaultProps = {\n defaultTheme: 'lumen',\n storeThemeKey: null,\n themes: null,\n themeOptions: ['cerulean', 'cosmo', 'cyborg', 'darkly', 'flatly', 'journal', 'litera', 'lumen', 'lux', 'materia', 'minty', 'pulse', 'sandstone', 'simplex', 'sketchy', 'slate', 'solar', 'spacelab', 'superhero', 'united', 'yeti']\n};\n\nexport default ThemeSwitcher;" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 38, "blob_id": "b55593bc40b9d21b1197ce154faca5d78cb27bde", "content_id": "310f2e6f19a35ff82da0eb60aacda322bea23d48", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "permissive", "max_line_length": 46, "num_lines": 3, "path": "/orchestrator/gui/server/gui_server/webApp/models.py", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n" }, { "alpha_fraction": 0.4951609671115875, "alphanum_fraction": 0.5058265924453735, "avg_line_length": 29.5, "blob_id": "0478f52f63b3de6ac2c752fe9214d6ccab51bf77", "content_id": "3b7c95604400033be565ebb929803ef1b65766cb", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5063, "license_type": "permissive", "max_line_length": 188, "num_lines": 166, "path": "/logger/gui/src/app.js", "repo_name": "g2-inc/openc2-oif-orchestrator", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport {\n ActionBar,\n ActionBarRow,\n Hits,\n HitsStats,\n InitialLoader,\n NoHits,\n PageSizeSelector,\n Pagination,\n RefinementListFilter,\n ResetFilters,\n SearchBox,\n SearchkitManager,\n SearchkitProvider,\n Select,\n SelectedFilters,\n SortingSelector\n} from 'searchkit';\n\nimport { LogItem } from './components/lib';\nimport { ThemeChooser } from './components/utils';\n\nclass App extends Component {\n constructor(props, context) {\n super(props, context);\n\n this.searchkit = new SearchkitManager('/api/');\n this.searchkit.addDefaultQuery(query => query);\n this.refreshInterval = null;\n\n this.searchkit.translateFunction = key => {\n return {\n 'pagination.next': 'Next Page',\n 'pagination.previous': 'Previous Page'\n }[key];\n };\n\n this.themeOptionStyles = {\n position: 'fixed',\n bottom: '5px',\n right: '5px'\n };\n }\n\n componentDidMount() {\n this.refreshInterval = setInterval(this.refresh.bind(this), 5000);\n }\n\n componentWillUnmount() {\n clearInterval(this.refreshInterval);\n }\n\n refresh() {\n try {\n this.searchkit.performSearch();\n } catch (err) {\n console.error('Cannot connect to log data store');\n }\n }\n\n render() {\n return (\n <SearchkitProvider id=\"contents\" searchkit={ this.searchkit }>\n <nav className=\"navbar navbar-expand-sm fixed-top navbar-dark bg-dark\">\n <div className=\"container-fluid\">\n <a className=\"navbar-brand\" href=\"/\">Logger</a>\n <button className=\"navbar-toggler\" type=\"button\" data-toggle=\"collapse\" data-target=\"#searchNav\" aria-controls=\"searchNav\" aria-expanded=\"false\" aria-label=\"Toggle navigation\">\n <span className=\"navbar-toggler-icon\" />\n </button>\n\n <div className=\"collapse navbar-collapse\" id=\"searchNav\">\n <SearchBox\n translations={{'searchbox.placeholder': 'search logs'}}\n queryOptions={{'minimum_should_match': '50%'}}\n searchOnChange\n queryFields={['appname^1', 'severity^2', 'msg^3']}\n />\n </div>\n </div>\n </nav>\n\n <div className=\"container-fluid\" style={{ marginTop: '100px'}}>\n <div className=\"row mx-auto\">\n <div className=\"col-md-2 col-12\">\n <div className=\"pb-3 mb-3 border-bottom\">\n <h5>App</h5>\n <RefinementListFilter\n id=\"appnames\"\n field=\"appname.keyword\"\n operator=\"OR\"\n size={ 10 }\n />\n </div>\n <div className=\"pb-3 mb-3 border-bottom\">\n <h5>Severity</h5>\n <RefinementListFilter\n id=\"severity\"\n field=\"severity.keyword\"\n operator=\"OR\"\n size={ 10 }\n />\n </div>\n <div className=\"pb-3 mb-3 border-bottom\">\n <h5>Source</h5>\n <RefinementListFilter\n id=\"log_source\"\n field=\"hostname.keyword\"\n operator=\"OR\"\n size={ 10 }\n />\n </div>\n </div>\n\n <div className=\"col-12 m-2 d-md-none\" />\n\n <div className=\"col-md-10 col-12\">\n <ActionBar mod=\"row mx-auto\">\n <ActionBarRow>\n <SelectedFilters />\n <ResetFilters />\n </ActionBarRow>\n\n <ActionBarRow>\n <HitsStats translations={{'hitstats.results_found': '{hitCount} results found'}} />\n <PageSizeSelector\n showNumbers\n options={[10, 20, 30, 40, 50]}\n />\n <SortingSelector\n options={[\n {label: 'Most Recent', field: 'timestamp', order: 'desc', defaultOption: true},\n {label: 'App', field: 'appname.keyword', order: 'asc'},\n {label: 'Severity', field: 'severity.keyword', order: 'asc'}\n ]}\n />\n </ActionBarRow>\n </ActionBar>\n\n <Hits hitsPerPage={ 10 } sourceFilter={ ['appname', 'severity', 'timestamp', 'msg'] } listComponent={ LogItem } />\n\n <NoHits\n translations={{\n 'NoHits.NoResultsFound': 'No logs found were found for {query}',\n 'NoHits.DidYouMean': 'Search for {suggestion}',\n 'NoHits.SearchWithoutFilters': 'Search for {query} without filters'\n }}\n suggestionsField='appname'\n />\n\n <InitialLoader />\n\n <Pagination showNumbers={ true } />\n </div>\n </div>\n </div>\n\n <div style={ this.themeOptionStyles }>\n <ThemeChooser size='sm' />\n </div>\n </SearchkitProvider>\n );\n }\n}\n\nexport default App;\n" } ]
262
xji3/clemensCode
https://github.com/xji3/clemensCode
b0956e2f7b11b09a115a6c2a12072bf274153b1e
ae66ca5c4c93ed8d64edbdeaeab206cb93c93674
4656c57475dd30375d63002b19333ba993e85cfe
refs/heads/master
2020-04-14T15:08:56.808628
2013-10-10T20:03:28
2013-10-10T20:03:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4897187054157257, "alphanum_fraction": 0.500017523765564, "avg_line_length": 48.56076431274414, "blob_id": "c65e6f20ce393669c33c454e8d71ff3613101390", "content_id": "20d08184dc8d85909a9bb81c0316f24d203f8ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28547, "license_type": "no_license", "max_line_length": 189, "num_lines": 576, "path": "/protein.py", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport Bio.PDB\nfrom collections import defaultdict\nimport os.path, gzip\nfrom geneticCode import *\nimport itertools, numpy, string, subprocess, re\nimport math as math\nimport residueDepth as ResDepth\n\n#-------------------------------------------------------------------------------\nclass Protein:\n#-------------------------------------------------------------------------------\n def __init__(self, id, aa, ds_dir,criterion):\n print 'PDB:', id,\n self.out_dir = ds_dir\n self.pdb_ID = id\n self.pdb_AA = aa # the fasta sequence\n self.pdb_structure_AA = '' # the structure seq aligned to the fasta seq\n self.ccds_match = []\n self.hasMatch = False\n self.crit=criterion\n self.parsePDBfile()\n\n#-------------------------------------------------------------------------------\n def printFastaSequences(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '_ccds.fasta.gz', 'w') as f_fasta:\n for a in self.ccds_match:\n f_fasta.write('>' + a.ccds_ID + '_protein\\n')\n f_fasta.write(a.ccds_AA + '\\n')\n\n f_fasta.write('>' + a.ccds_ID + '_dna\\n')\n f_fasta.write(a.ccds_DNA + '\\n')\n\n#-------------------------------------------------------------------------------\n def printPDBfasta2StructureAlignment(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '_fasta2struct.fasta.gz', 'w') as f_fasta:\n f_fasta.write('>' + self.pdb_ID + '_fasta\\n')\n f_fasta.write(self.pdb_AA + '\\n')\n\n f_fasta.write('>' + self.pdb_ID + '_pdb_structure_residues\\n')\n f_fasta.write(self.pdb_structure_AA + '\\n')\n\n#-------------------------------------------------------------------------------\n def printCCDS2PDBAlignments(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '_best_ungapped.fasta.gz', 'w') as f_fasta:\n ccds = self.ccds_match[0]\n \n tmpstr = ccds.ccds_AA[:ccds.ccds_local_alignment_start] + string.replace(ccds.ccds_alignedAA[:ccds.ungapped_segment_start], '-', '')\n idx = 3*len(tmpstr)\n offset = str(idx + 1)\n f_fasta.write('>' + ccds.ccds_ID + '_to_' + self.pdb_ID + '_fasta_longest_ungapped_segment' + '_Starting_DNA_CCDS_pos_' + offset + '\\n')\n f_fasta.write(ccds.ccds_DNA[idx:idx+3*ccds.ungapped_segment_length] + '\\n')\n\n offset = str(ccds.ungapped_segment_start + ccds.ccds_local_alignment_start + 1)\n f_fasta.write('>' + ccds.ccds_ID + '_to_' + self.pdb_ID + '_fasta_longest_ungapped_segment' + '_Starting_CCDS_pos_' + offset + '\\n')\n f_fasta.write(ccds.ccds_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length] + '\\n')\n\n i1 = string.find(self.pdb_AA, ccds.pdb_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length])\n i2 = i1 + ccds.ungapped_segment_length\n f_fasta.write('>' + self.pdb_ID + '_to_' + ccds.ccds_ID + '_longest_ungapped_segment' + '_Starting_aligned_PDB_struct_pos_' + str(i1 + 1) + '\\n')\n f_fasta.write(self.pdb_structure_AA[i1:i2] + '\\n')\n\n#-------------------------------------------------------------------------------\n def printLocalCCDSalignments(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '_fasta2CCDS.fasta.gz', 'w') as f_fasta:\n for ccds in self.ccds_match:\n offset = str(ccds.ccds_local_alignment_start + 1)\n f_fasta.write('>' + ccds.ccds_ID + '_locally_aligned_to_' + self.pdb_ID + '_fasta' + '_starting_CCDS_pos_' + offset + '\\n')\n f_fasta.write(ccds.ccds_alignedAA + '\\n')\n \n offset = str(ccds.pdb_local_alignment_start + 1)\n f_fasta.write('>' + self.pdb_ID + '_fasta_locally_aligned_to_' + ccds.ccds_ID + '_starting_fasta_pos_' + offset + '\\n')\n f_fasta.write(ccds.pdb_alignedAA + '\\n')\n\n#-------------------------------------------------------------------------------\n def printDMat(self):\n ccds = self.ccds_match[0]\n r_from = string.find(self.pdb_AA, ccds.pdb_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length])\n r_to = r_from + ccds.ungapped_segment_length\n self.printSubDMatrix(r_from, r_to)\n\n#-------------------------------------------------------------------------------\n def printMismatches(self):\n ccds = self.ccds_match[0]\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.mm.gz', 'w') as f:\n start = ccds.ungapped_segment_start\n end = start + ccds.ungapped_segment_length\n ctr = 0\n for k, v in sorted(ccds.mismatch_or_gap.items()):\n if k >= start and k < end:\n ctr = ctr + 1\n f.write(str(ctr) + ' ' + self.pdb_ID + ' ' + ccds.ccds_ID + '\\n')\n for k, v in sorted(ccds.mismatch_or_gap.items()):\n if k >= start and k < end:\n\n f.write(str(k - ccds.ungapped_segment_start + 1) + '\\t')\n for i in range(len(v)-1):\n f.write(v[i] + '\\t')\n f.write(v[i+1] + '\\n')\n\n#-------------------------------------------------------------------------------\n def printSubDMatrix(self, r_from, r_to):\n assert(r_from <= r_to)\n\n dmat = self.calcDistMatrix()\n\n # Test version\n# with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '_' + 'v2.pw.gz', 'w') as f:\n# for i in range(r_from, r_to):\n# line = [ dmat[i, j] for j in range(r_from, r_to) ]\n# f.writelines([ \"%8.4f\\t\" % i for i in line ])\n# f.write('\\n')\n\n # Version for Jeff\n ccds = self.ccds_match[0] #alright, no loops here @Xiang\n tmpstr = ccds.ccds_AA[:ccds.ccds_local_alignment_start] + string.replace(ccds.ccds_alignedAA[:ccds.ungapped_segment_start], '-', '')\n idx = 3*len(tmpstr) # Maybe should record this location in the future, it's used several times @Xiang\n tmpstr = ccds.ccds_DNA[idx:idx+3*ccds.ungapped_segment_length]\n\n tmpstr2 = ccds.pdb_aligned_structure_AA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length]\n nres = len(string.replace(tmpstr2, '-', ''))\n\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.pw.gz', 'w') as f:\n f.write(self.pdb_ID + ' ' + str(nres) + '\\n')\n idx1 = 1\n for i in range(r_from, r_to-1): # not that necessary to use -1 for the loop here @Xiang range(r_to, r_to)=[], anyhow, it's good habit @Xiang\n idx2 = idx1+1\n for j in range(i+1, r_to):\n if not math.isnan(dmat[i, j]):\n f.write(str(idx1) + ' ' + str(idx2) + ' ' + tmpstr[(idx1-1)*3:(idx1-1)*3+3] + ' ')\n f.write(tmpstr[(idx2-1)*3:(idx2-1)*3+3] + ' ' + str(dmat[i, j]) + '\\n')\n idx2 = idx2+1\n idx1 = idx1+1\n\n#-------------------------------------------------------------------------------\n def printSeqFile(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.seq.gz', 'w') as f:\n ccds = self.ccds_match[0]\n f.write(self.pdb_ID + ' ' + str(ccds.ungapped_segment_length) + '\\n')\n tmpstr = ccds.ccds_AA[:ccds.ccds_local_alignment_start] + string.replace(ccds.ccds_alignedAA[:ccds.ungapped_segment_start], '-', '')\n idx = 3*len(tmpstr)\n f.write(ccds.ccds_DNA[idx:idx+3*ccds.ungapped_segment_length] + '\\n')\n f.write(ccds.ccds_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length] + '\\n') \n i1 = string.find(self.pdb_AA, ccds.pdb_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length])\n i2 = i1 + ccds.ungapped_segment_length\n f.write(self.pdb_structure_AA[i1:i2] + '\\n')\n\n#-------------------------------------------------------------------------------\n def printSolvAcc(self):\n with gzip.open(self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.sa.gz', 'w') as f:\n ccds = self.ccds_match[0]\n \n pdb_file = self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.pdb'\n handle = gzip.open(pdb_file + '.gz', \"r\")\n structure = Bio.PDB.PDBParser().get_structure(self.pdb_ID, handle)\n model = structure[0]\n chain_list = [ a.get_id() for a in model ]\n chain = model[chain_list[0]]\n \n rd = ResDepth.ResidueDepth(chain, pdb_file + '.gz')\n\n if rd.terminate:\n print '===========Warning: MSMS or pdb_to_xyzr doesnot work'\n return False\n\n # CCDS string up until the beginning of the ungapped segment without gaps\n tmpstr = ccds.ccds_AA[:ccds.ccds_local_alignment_start] + string.replace(ccds.ccds_alignedAA[:ccds.ungapped_segment_start], '-', '')\n # First codon of ungapped sequence in DNA sequence\n idx = 3*len(tmpstr)\n # The DNA sequence for the ungapped segment\n strng = ccds.ccds_DNA[idx:idx+3*ccds.ungapped_segment_length]\n\n \n # Start of ungapped segment in the PDB fasta sequence\n r_from = string.find(self.pdb_AA, ccds.pdb_alignedAA[ccds.ungapped_segment_start:ccds.ungapped_segment_start+ccds.ungapped_segment_length])\n # End of ungapped segment in the PDB fasta sequence\n r_to = r_from + ccds.ungapped_segment_length\n # The structure sequence is aligned to the fasta sequence, may have gaps in the ungapped segment\n nres = len(self.pdb_structure_AA[r_from:r_to]) - self.pdb_structure_AA[r_from:r_to].count('-')\n\n idx = 0\n\n # need to update idx to first residue that has structure information\n## while self.pdb_structure_AA[idx] == '-':\n## idx = idx + 1\n\n## f.write(self.pdb_ID + ' ' + str(nres) + '\\n')\n## nchck = 0\n## for item in rd:\n## if idx >= r_from and idx < r_to:\n## while self.pdb_structure_AA[idx] == '-':\n### f.write(str(idx+1) + '\\t-\\tnan\\tnan\\n')\n## idx = idx + 1\n## if item[0].has_id('CA') and item[0].get_id()[0] == ' ': # No Het\n## f.write(str(idx+1) + '\\t' + strng[idx*3:idx*3+3] + '\\t' + str(item[1][0]) + '\\t' + str(item[1][1]) + '\\n')\n## nchck = nchck + 1\n## idx = idx + 1\n\n #Xiang Version start\n Non_Stand_pos=defaultdict(list) # recording where non-standard AA or un-classified cases like 'H_PTR' is\n Strange_ID=[] # recording has_id('CA')==False cases\n nchck=0\n for item in rd:\n while idx<r_to and self.pdb_structure_AA[idx] == '-':\n #f.write(str(idx+1) + '\\t-\\tnan\\tnan\\n')\n idx = idx + 1\n\n if idx<r_to and idx>=r_from:\n if item[0].get_id()[0][0:2]=='H_' and item[0].get_id()[0]!='H_W': # exclude the non standard MSE case for now\n nres = nres-1\n nonStandID=item[0].get_id()[0]\n Non_Stand_pos[nonStandID].append(idx)\n if not item[0].has_id('CA'):\n nres = nres-1\n Strange_ID.append(idx)\n\n if item[0].has_id('CA') and item[0].get_id()[0] == ' ': # No Het\n f.write(str(idx+1) + '\\t' + strng[(idx-r_from)*3:(idx-r_from)*3+3] + '\\t' + str(self.pdb_structure_AA[idx]) + '\\t' + str(item[1][0]) + '\\t' + str(item[1][1]) + '\\n')\n nchck = nchck + 1\n idx = idx + 1\n if Non_Stand_pos:\n print '=============Warning: non-standard Amino Acid occuring'\n print 'pdb ID: ', self.pdb_ID\n for nonStand in Non_Stand_pos:\n print 'non-standard AA: ', nonStand, 'occurred at positions: ', Non_Stand_pos[nonStand]\n if Strange_ID:\n print '=============Warning: non-CA Detected'\n print 'pdb ID: ', self.pdb_ID, ' at position: ', Strange_ID\n\n #End of Xiang Version\n assert(nchck == nres)\n\n#-------------------------------------------------------------------------------\n def printnAccess(self): \n pdb_id=self.pdb_ID\n pdb_file_dir=self.out_dir + pdb_id + '/'\n pdb_file=pdb_file_dir+pdb_id+'.pdb'\n \n naccess_localfile_dir='/Users/xji3/Documents/Naccess'\n if os.path.isfile(naccess_localfile_dir+'/naccess'):\n naccess=naccess_localfile_dir+'/naccess'\n else:\n naccess= 'naccess'\n\n if os.path.isfile(pdb_file_dir+pdb_id+'.pdb.gz'):\n subprocess.check_output([\n 'gzip',\n '-d',\n '-f',\n pdb_id+'.pdb.gz',\n ],cwd=pdb_file_dir)\n \n print subprocess.check_output([\n naccess,\n pdb_file\n ],cwd=pdb_file_dir)\n \n elif os.path.isfile(pdb_file_dir+pdb_id+'.pdb'):\n print subprocess.check_output([\n naccess,\n pdb_file\n ],cwd=pdb_file_dir)\n else:\n print '=============Warning: .pdb file not found for pdbID:',pdb_id\n print 'Please check ftp query function and make sure', pdb_id, '.pdb.gz exists'\n \n\n subprocess.check_output([\n 'gzip',\n pdb_id+'.pdb',\n ],cwd=pdb_file_dir)\n\n#-------------------------------------------------------------------------------\n def printInfo(self):\n self.printFastaSequences()\n self.printPDBfasta2StructureAlignment()\n self.printCCDS2PDBAlignments()\n self.printLocalCCDSalignments()\n self.printDMat()\n self.printMismatches()\n self.printSeqFile()\n self.printSolvAcc()\n self.printnAccess()\n\n#-------------------------------------------------------------------------------\n def alignCCDSlocal(self):\n for pdb2ccds in self.ccds_match: # here pdb2ccds is class PDB_CCDS, not the dict in class data class @Xiang\n out = self.alignLocalWater(pdb2ccds.ccds_AA) # use water for local alignment\n iter = out.__iter__()\n # Parse water's output\n for line in iter:\n if line.strip() and line.split() and line.split()[0] == 'asis':\n break\n aligned_subseq1 = line.split()\n pdb2ccds.pdb_local_alignment_start = int(aligned_subseq1[1]) - 1\n s1 = pdb2ccds.pdb_alignedAA = aligned_subseq1[2] # PDB sequence\n iter.next() # skip the vertical bars\n aligned_subseq2 = iter.next().split()\n assert(len(aligned_subseq1[2]) == len(aligned_subseq2[2]))\n pdb2ccds.ccds_local_alignment_start = int(aligned_subseq2[1]) - 1\n s2 = pdb2ccds.ccds_alignedAA = aligned_subseq2[2] # CCDS sequence\n algnmt_length = pdb2ccds.local_alignment_length = len(aligned_subseq2[2])\n\n # Keep track of the gaps also in the actual structure sequence (not just the fasta of the structure)\n s3 = self.pdb_structure_AA[pdb2ccds.pdb_local_alignment_start:]\n gaps = [ i.start() for i in re.finditer('-', s1) ] # record all gap positions in s1 @Xiang\n string.replace(s3, '-', '')\n for i in gaps:\n s3 = self.insertInString(i, s3, '-')\n s3 = pdb2ccds.pdb_aligned_structure_AA = s3[:algnmt_length] # PDB file sequence\n\n # Find longest ungapped segment, mismatches, gaps, percent identity as compared to fasta seq\n seg_len = longest_seg_len = mtch_ctr = ccds_gap_ctr = 0\n for idx in range(algnmt_length):\n p_res = s1[idx]\n c_res = s2[idx]\n if p_res == '-' or c_res == '-':\n if seg_len > longest_seg_len:\n longest_seg_len = pdb2ccds.ungapped_segment_length = seg_len\n pdb2ccds.percent_identity = 100 * mtch_ctr/longest_seg_len\n # if we hit a gap we don't need to add one to get the start pos\n pdb2ccds.ungapped_segment_start = idx - seg_len\n # Gap => reset counters\n mtch_ctr = seg_len = 0\n elif p_res == c_res:\n seg_len += 1\n mtch_ctr += 1\n else: # mismatch\n seg_len += 1\n if seg_len > longest_seg_len:\n longest_seg_len = pdb2ccds.ungapped_segment_length = seg_len\n pdb2ccds.ungapped_segment_start = 1 + idx - seg_len\n pdb2ccds.percent_identity = 100 * mtch_ctr/longest_seg_len\n\n # In longest ungapped segment find mismatches, gaps, percent identity as compared to pdb file seq\n ccds_gap_ctr = 0\n for idx in range(algnmt_length):\n p_res = s3[idx]\n c_res = s2[idx]\n tmp = 3*(pdb2ccds.ccds_local_alignment_start + idx - ccds_gap_ctr)\n codon = pdb2ccds.ccds_DNA[tmp:tmp+3]\n if p_res == '-' or c_res == '-':\n if c_res == '-':\n ccds_gap_ctr = ccds_gap_ctr+1\n # { aligned AA sequences offset : [ pdb_aa_res, ccds_aa_res, ccds_dna_triplet, triplet transl. ] }\n pdb2ccds.mismatch_or_gap[idx] = [ p_res, '-', '---', '-' ]\n else:\n pdb2ccds.mismatch_or_gap[idx] = [ '-', c_res, codon, universalCode[codon] ]\n elif p_res != c_res or c_res != universalCode[codon]: # mismatch or translation exception\n pdb2ccds.mismatch_or_gap[idx] = [ p_res, c_res, codon, universalCode[codon] ]\n \n\n # Sort the ccds alignments by longest ungapped segment length & percent identity\n # Sorting algorithm is stable; order of the first sort is preserved if second criterium is equal\n self.ccds_match = sorted(self.ccds_match, key=lambda prot: prot.percent_identity, reverse=True)\n self.ccds_match = sorted(self.ccds_match, key=lambda prot: prot.ungapped_segment_length, reverse=True)\n # test in XiangDraft3.py @Xiang\n\n#-------------------------------------------------------------------------------\n def insertInString(self, idx, str, insert):\n return str[:idx] + insert + str[idx:]\n\n#-------------------------------------------------------------------------------\n def alignLocalWater(self, ccds_AA):\n water_localfile_dir='/Users/xji3/Downloads/EMBOSS-6.6.0/emboss'\n if os.path.isfile(water_localfile_dir+'/water'):\n water = water_localfile_dir+'/water'\n else:\n water = 'water'\n return subprocess.check_output([\n water,\n '-stdout',\n '-auto',\n '-awidth3=100000',\n '-asequence=asis:' + self.pdb_AA,\n '-bsequence=asis:' + ccds_AA\n ]).split('\\n')\n\n#-------------------------------------------------------------------------------\n def parsePDBfile(self):\n pdb_file = self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.pdb'\n handle = gzip.open(pdb_file + '.gz', \"r\")\n structure = Bio.PDB.PDBParser().get_structure(self.pdb_ID, handle)\n\n model = structure[0]\n chain_list = [ a.get_id() for a in model ]\n print 'Using chain:', chain_list[0]\n chain = model[chain_list[0]]\n\n if self.crit=='C-N':\n ppb = Bio.PDB.PPBuilder()\n print 'Using C-N distance criterion'\n # pdBuilder() uses C-N distance criterion\n elif self.crit=='Ca-Ca':\n ppb = Bio.PDB.CaPPBuilder()\n print 'Using Ca-Ca distance criterion'\n # CaPPBuilder() uses Ca-Ca distance criterion\n else:\n ppb = Bio.PDB.CaPPBuilder()\n print 'Using Ca-Ca distance criterion'\n # CaPPBuilder() uses Ca-Ca distance criterion\n # Use Ca-Ca criterion by default\n \n\n \n # Include non-standard residues\n # more info. in biopdb_faq.pdf, page 11 @Xiang\n tmp = [ str(pp.get_sequence()) for pp in ppb.build_peptides(chain, aa_only=False) ]\n\n assert(not self.pdb_structure_AA)\n self.pdb_structure_AA = '-' * len(self.pdb_AA) #assign all gaps\n edges = [None] * len(tmp)\n multiple = []\n i = 0\n for a in tmp:\n # Only consider fragments that occur only once\n # This doesn't cause a lot of loss since only very short (~2 res) fragments occur more than once\n n = self.pdb_AA.count(a)\n if(n == 1):\n idx = string.find(self.pdb_AA, a) # return the first element in a's notion in pdb_AA, could be 0 @Xiang\n self.pdb_structure_AA = self.pdb_structure_AA[:idx] + a + self.pdb_structure_AA[idx+len(a):]\n edges[i] = [idx, idx+len(a)]\n elif n > 1 :\n multiple.append(i)\n else:\n print 'There is no match of this fragment in the pdb AA sequence', a # May need more operation here @Xiang\n i = i+1\n\n # Give fragments we couldn't place unambiguously before a second try\n for i in multiple:\n b = e = -1\n if i == 0 and edges[i+1]:\n b = 0\n e = edges[i+1][0]\n elif i == (len(tmp)-1) and edges[i-1]:\n b = edges[i-1][1]\n e = len(tmp)-1\n elif edges[i-1] and edges[i+1]:\n b = edges[i-1][1]\n e = edges[i+1][0]\n else:\n print 'I don\\'t know where to place this fragment. It appears >= 2x in the structure.'\n print 'It will be ignored in the distance matrix:', tmp[i]\n continue\n if self.pdb_AA[b:e].count(tmp[i]) == 1:\n idx = string.find(self.pdb_AA[b:e], tmp[i])\n self.pdb_structure_AA = self.pdb_structure_AA[:b+idx] + tmp[i] + self.pdb_structure_AA[b+idx+len(tmp[i]):]\n else:\n print 'I don\\'t know where to place this fragment. It appears >= 2x in the structure.',\n print 'It will be ignored in the distance matrix:', tmp[i]\n\n#-------------------------------------------------------------------------------\n def calcDistMatrix(self):\n\n pdb_file = self.out_dir + self.pdb_ID + '/' + self.pdb_ID + '.pdb'\n handle = gzip.open(pdb_file + '.gz', \"r\")\n structure = Bio.PDB.PDBParser().get_structure(self.pdb_ID, handle)\n model = structure[0]\n chain_list = [ a.get_id() for a in model ]\n chain = model[chain_list[0]]\n\n if self.crit=='C-N':\n ppb = Bio.PDB.PPBuilder()\n # Include non-standard residues\n print 'Using C-N distance criterion for DistMatrix Calculation'\n elif self.crit=='Ca-Ca':\n\n ppb = Bio.PDB.CaPPBuilder()\n print 'Using Ca-Ca distance criterion for DistMatrix Calculation'\n # CaPPBuilder() uses Ca-Ca distance criterion\n #@Xiang\n else:\n ppb = Bio.PDB.CaPPBuilder()\n print 'Using Ca-Ca distance criterion for DistMatrix Calculation'\n # Use Ca-Ca criterion by default\n \n tmp = [ str(pp.get_sequence()) for pp in ppb.build_peptides(chain, aa_only=False) ]\n\n # Join the lists to get nRes\n structure_residues = [None] * len(list(itertools.chain(*tmp)))\n idx = 0\n for r in chain: #.get_residues() seems like r=residues in chain, which doesn't need that function @Xiang\n if r.has_id('CA') and r.get_id()[0] == ' ': # No Het\n structure_residues[idx] = r\n idx = idx + 1\n\n dmat = numpy.empty((len(self.pdb_AA), len(self.pdb_AA)), numpy.float)\n dmat[:] = numpy.NAN\n nres = len(structure_residues)\n r_idx1 = 0\n tot = len(self.pdb_AA)\n for i in range(tot-1): #why -1 range(3)=[0,1,2] Stop Codon? @Xiang \n if self.pdb_structure_AA[i] != '-' and structure_residues[r_idx1]:\n dmat[i, i] = 0.0\n r1 = structure_residues[r_idx1]\n r_idx1 = r_idx1 + 1\n else:\n continue\n r_idx2 = r_idx1\n for j in range(i+1, tot):\n if self.pdb_structure_AA[j] != '-' and structure_residues[r_idx2]:\n r2 = structure_residues[r_idx2]\n r_idx2 = r_idx2 + 1\n else:\n continue\n if (r1.has_id('CA') and r2.has_id('CA')):\n dmat[i, j] = self.calcCAdist(r1, r2)\n dmat[j, i] = dmat[i, j]\n return dmat\n\n#-------------------------------------------------------------------------------\n def printDMatrix(self):\n tot = len(self.pdb_AA)\n for i in range(tot):\n for j in range(tot):\n print \"%6.3f\" % self.dmat[i, j], ' ',\n print\n print\n\n#-------------------------------------------------------------------------------\n def calcCAdist(self, a, b):\n return a['CA'] - b['CA']\n\n#-------------------------------------------------------------------------------\n def checkAlignmentThresholds(self, min_alignment_length = 50, min_pct_identity = 97):\n self.hasMatch = False\n for i in self.ccds_match:\n if (i.ungapped_segment_length >= min_alignment_length) and (i.percent_identity >= min_pct_identity):\n i.aboveThresholds = True\n self.hasMatch = True\n return self.hasMatch\n\n#-------------------------------------------------------------------------------\nclass PDB_CCDS:\n#-------------------------------------------------------------------------------\n def __init__(self, id, seqs):\n self.ccds_ID = id\n self.ccds_AA = seqs[0]\n self.ccds_DNA = seqs[1]\n\n # { ccds_AA sequence index : [ ccds_AA_res, ccds_DNA_triplet, translated_AA ] }\n # only exceptions are recorded\n # To some extent this is redundant, because translation exceptions in the ungapped segment are\n # now also recorded in mismatch_or_gap\n self.translationExceptions = defaultdict(list)\n self.checkSequences()\n self.pdb_alignedAA = '' # CCDS specific, missing residues in structure gapped out after alignment\n self.pdb_aligned_structure_AA = '' # as above but w. gaps for residues that are missing in the structure\n self.ccds_alignedAA = ''\n self.pdb_local_alignment_start = -1 # index, 0-based\n self.ccds_local_alignment_start = -1 # AA sequence index, 0-based\n self.local_alignment_length = 0\n self.ungapped_segment_length = 0\n self.ungapped_segment_start = -1 # index, 0-based\n self.percent_identity = 0.0\n\n # { aligned AA sequences offset : [ pdb_aa_res, ccds_aa_res, ccds_dna_triplet, ccds_dna_triplet_translation ] }\n # the triplet translation should match the ccds_aa_res; else it should also be reflected in translationExceptions\n self.mismatch_or_gap = defaultdict(list)\n\n # Flag for selective printing of entries; all entries are sorted by seg length\n # entries with equal seg lengths are ordered by percent identity\n aboveThresholds = False\n\n#-------------------------------------------------------------------------------\n def checkSequences(self):\n assert(len(self.ccds_AA)*3+3 == len(self.ccds_DNA)) # They all end with a stop codon\n j = 0\n for i in range(len(self.ccds_AA)):\n aa_res = self.ccds_AA[i]\n codon = self.ccds_DNA[j:j+3]\n if universalCode[codon] != aa_res:\n self.translationExceptions[i] = [ aa_res, codon, universalCode[codon] ]\n j = j+3\n\n#-------------------------------------------------------------------------------\n" }, { "alpha_fraction": 0.4583333432674408, "alphanum_fraction": 0.4583333432674408, "avg_line_length": 11, "blob_id": "26e147332fb841830bc630e77b3f2fd080a5773d", "content_id": "63933bd1545db24ba4c0cbb2810ea8a4bb444c25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 24, "license_type": "no_license", "max_line_length": 11, "num_lines": 2, "path": "/README.md", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "clemensCode\n===========\n" }, { "alpha_fraction": 0.5952274203300476, "alphanum_fraction": 0.6074571013450623, "avg_line_length": 37.674556732177734, "blob_id": "673c64a5cdeb428c1e2f80a466fbebecd94415f9", "content_id": "e4dbb4f153356c8e1608d61acf93c5a822fe9388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6705, "license_type": "no_license", "max_line_length": 960, "num_lines": 169, "path": "/XiangDraft.py", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "from protein import *\r\nfrom data_test import *\r\n\r\n\r\n##ccds_seqs=defaultdict(list)\r\n##readme=False\r\n##idx=0\r\n##for line in f:\r\n## if line.strip(): # makes sure empty lines are not included\r\n#### print line\r\n## line = line.rstrip('\\n')\r\n#### print line\r\n#### a=raw_input()\r\n## if line[0] == '>':\r\n## line = line.split('|')\r\n## key = line[0].replace('>', '')\r\n## readme=True\r\n#### print line\r\n#### print key\r\n## elif key in ccds_seqs and len(ccds_seqs[key]) == (idx+1):\r\n## ccds_seqs[key][idx] = ccds_seqs[key][idx] + line\r\n## else:\r\n## ccds_seqs[key].append(line)\r\n## print key\r\n##\r\n## print ccds_seqs[key]\r\n## a=raw_input()\r\n \r\n \r\nPDB_id='4A14'\r\n#problem pdbIDs\r\nPDB_id='2V4U' #this protein has no match with the structure fragment from Bio.PDB\r\n\r\npdb_ID=PDB_id\r\n\r\npdb_AA='GAMGLPGAEEAPVRVALRVRPLLPKELLHGHQSCLQVEPGLGRVTLGRDRHFGFHVVLAEDAGQEAVYQACVQPLLEAFFEGFNATVFAYGQTGSGKTYTMGEASVASLLEDEQGIVPRAMAEAFKLIDENDLLDCLVHVSYLEVYKEEFRDLLEVGTASRDIQLREDERGNVVLCGVKEVDVEGLDEVLSLLEMGNAARHTGATHLNHLSSRSHTVFTVTLEQRGRAPSRLPRPAPGQLLVSKFHFVDLAGSERVLKTGSTGERLKESIQINSSLLALGNVISALGDPQRRGSHIPYRDSKITRILKDSLGGNAKTVMIACVSPSSSDFDETLNTLNYASRAQ'\r\n\r\nccds_id='CCDS32325.2'\r\n\r\nccds_AA='MGLEAQRLPGAEEAPVRVALRVRPLLPKELLHGHQSCLQVEPGLGRVTLGRDRHFGFHVVLAEDAGQEAVYQACVQPLLEAFFEGFNATVFAYGQTGSGKTYTMGEASVASLLEDEQGIVPRAMAEAFKLIDENDLLDCLVHVSYLEVYKEEFRDLLEVGTASRDIQLREDERGNVVLCGVKEVDVEGLDEVLSLLEMGNAARHTGATHLNHLSSRSHTVFTVTLEQRGRAPSRLPRPAPGQLLVSKFHFVDLAGSERVLKTGSTGERLKESIQINSSLLALGNVISALGDPQRRGSHIPYRDSKITRILKDSLGGNAKTVMIACVSPSSSDFDETLNTLNYASRAQNIRNRATVNWRPEAERPPEETASGARGPPRHRSETRIIHRGRRAPGPATASAAAAMRLGAECARYRACTDAAYSLLRELQAEPGLPGAAARKVRDWLCAVEGERSALSSASGPDSGIESASVEDQAAQGAGGRKEDEGAQQLLTLQNQVARLEEENRDFLAALEDAMEQYKLQSDRLREQQEEMVELRLRLELVRPGWGGPRLLNGLPPGSFVPRPHTAPLGGAHAHVLGMVPPACLPGDEVGSEQRGEQVTNGREAGAELLTEVNRLGSGSSAASEEEEEEEEPPRRTLHLRRNRISNCSQRAGARPGSLPERKGPELCLEELDAAIPGSRAVGGSKARVQARQVPPATASEWRLAQAQQKIRELAINIRMKEELIGELVRTGKAAQALNRQHSQRIRELEQEAEQVRAELSEGQRQLRELEGKELQDAGERSRLQEFRRRVAAAQSQVQVLKEKKQATERLVSLSAQSEKRLQELERNVQLMRQQQGQLQRRLREETEQKRRLEAEMSKRQHRVKELELKHEQQQKILKIKTEEIAAFQRKRRSGSNGSVVSLEQQQKIEEQKKWLDQEMEKVLQQRRALEELGEELHKREAILAKKEALMQEKTGLESKRLRSSQALNEDIVRVSSRLEHLEKELSEKSGQLRQGSAQSQQQIRGEIDSLRQEKDSLLKQRLEIDGKLRQGSLLSPEEERTLFQLDEAIEALDAAIEYKNEAITCRQRVLRASASLLSQCEMNLMAKLSYLSSSETRALLCKYFDKVVTLREEQHQQQIAFSELEMQLEEQQRLVYWLEVALERQRLEMDRQLTLQQKEHEQNMQLLLQQSRDHLGEGLADSRRQYEARIQALEKELGRYMWINQELKQKLGGVNAVGHSRGGEKRSLCSEGRQAPGNEDELHLAPELLWLSPLTEGAPRTREETRDLVHAPLPLTWKRSSLCGEEQGSPEELRQREAAEPLVGRVLPVGEAGLPWNFGPLSKPRRELRRASPGMIDVRKNPL'\r\n\r\nwater_localfile_dir='/Users/xji3/Downloads/EMBOSS-6.6.0/emboss'\r\nif os.path.isfile(water_localfile_dir+'/water'):\r\n water = water_localfile_dir+'/water'\r\nelse:\r\n water = 'water'\r\n\r\nout = subprocess.check_output([\r\n water,\r\n '-stdout',\r\n '-auto',\r\n '-awidth3=100000',\r\n '-asequence=asis:' + pdb_AA,\r\n '-bsequence=asis:' + ccds_AA\r\n ]).split('\\n')\r\n#print out\r\n\r\noffice_Mac_address='/Users/xji3/clemensCode'\r\naddress=office_Mac_address\r\nout_dir=address+'/newDataOutput/'\r\npdb_file = out_dir +'pdb/'+ pdb_ID + '/' + pdb_ID + '.pdb'\r\nhandle = gzip.open(pdb_file + '.gz', \"r\")\r\nstructure = Bio.PDB.PDBParser().get_structure(pdb_ID, handle)\r\n\r\nprint structure\r\nmodel = structure[0]\r\n\r\nchain_list = [ a.get_id() for a in model ]\r\nprint chain_list\r\nchain=model[chain_list[0]]\r\n\r\nppb=Bio.PDB.PPBuilder()\r\nprint ppb\r\nbb=ppb.build_peptides(chain)\r\nprint bb\r\ntmp = [ str(pp.get_sequence()) for pp in ppb.build_peptides(chain, aa_only=False) ]\r\nprint tmp\r\n\r\npdb_structure_AA = ''\r\nprint not pdb_structure_AA\r\nassert(not pdb_structure_AA)\r\npdb_structure_AA = '-' * len(pdb_AA)\r\n\r\nedges = [None] * len(tmp)\r\nprint edges\r\nmultiple=[]\r\n\r\na=tmp[0]\r\ni=0\r\nn = pdb_AA.count(a)\r\nif(n == 1):\r\n idx = string.find(pdb_AA, a) # return the first element in a's notion in pdb_AA, could be 0 @Xiang\r\n pdb_structure_AA = pdb_structure_AA[:idx] + a + pdb_structure_AA[idx+len(a):]\r\n edges[i] = [idx, idx+len(a)]\r\nelif n > 1:\r\n multiple.append(i)\r\nelse:\r\n print\r\ni=i+1\r\n\r\nfor i in multiple:\r\n b = e = -1\r\n if i == 0 and edges[i+1]:\r\n b = 0\r\n e = edges[i+1][0]\r\n elif i == (len(tmp)-1) and edges[i-1]:\r\n b = edges[i-1][1]\r\n e = len(tmp)-1\r\n elif edges[i-1] and edges[i+1]:\r\n b = edges[i-1][1]\r\n e = edges[i+1][0]\r\n else:\r\n print 'I don\\'t know where to place this fragment. It appears >= 2x in the structure.'\r\n print 'It will be ignored in the distance matrix:', tmp[i]\r\n continue\r\n if self.pdb_AA[b:e].count(tmp[i]) == 1:\r\n idx = string.find(self.pdb_AA[b:e], tmp[i])\r\n self.pdb_structure_AA = self.pdb_structure_AA[:b+idx] + tmp[i] + self.pdb_structure_AA[b+idx+len(tmp[i]):]\r\n else:\r\n print 'I don\\'t know where to place this fragment. It appears >= 2x in the structure.',\r\n print 'It will be ignored in the distance matrix:', tmp[i]\r\n\r\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\niter = out.__iter__()\r\nfor line in iter:\r\n if line.strip() and line.split() and line.split()[0] == 'asis':\r\n break\r\naligned_subseq1 = line.split()\r\npdb_local_alignment_start = int(aligned_subseq1[1]) - 1\r\ns1 = pdb_alignedAA = aligned_subseq1[2] # PDB sequence\r\niter.next() # skip the vertical bars\r\naligned_subseq2 = iter.next().split()\r\nassert(len(aligned_subseq1[2]) == len(aligned_subseq2[2]))\r\nccds_local_alignment_start = int(aligned_subseq2[1]) - 1\r\ns2 = ccds_alignedAA = aligned_subseq2[2] # CCDS sequence\r\nalgnmt_length = local_alignment_length = len(aligned_subseq2[2])\r\n\r\ns3 = pdb_structure_AA[pdb_local_alignment_start:]\r\ngaps = [ i.start() for i in re.finditer('-', s1) ]\r\n#print gaps\r\nstring.replace(s3, '-', '')\r\n\r\n\r\n#################################################################################\r\n#################################################################################\r\n#################################################################################\r\n\r\n\r\nfor pp in Bio.PDB.PPBuilder().build_peptides(chain,aa_only=False):\r\n print pp.get_sequence()\r\n\r\ntmp = [ str(pp.get_sequence()) for pp in ppb.build_peptides(chain, aa_only=False) ]\r\nprint tmp\r\n\r\nstructure_residues = [None] * len(list(itertools.chain(*tmp)))\r\nidx = 0\r\nfor r in chain: #.get_residues() seems like r=residues in chain, which doesn't need that function @Xiang\r\n if r.has_id('CA') and r.get_id()[0] == ' ': # No Het\r\n structure_residues[idx] = r\r\n idx = idx + 1\r\n#print structure_residues\r\n\r\ndmat = numpy.empty((len(pdb_AA), len(pdb_AA)), numpy.float)\r\ndmat[:] = numpy.NAN\r\n\r\nprint dmat\r\n" }, { "alpha_fraction": 0.5486928224563599, "alphanum_fraction": 0.5545751452445984, "avg_line_length": 31.774192810058594, "blob_id": "06879e89adf72a5bf9630b817e7d11c0842ecc45", "content_id": "1af3fc2a86f1fb2fe27247415d72cfc94c8c9b82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3060, "license_type": "no_license", "max_line_length": 108, "num_lines": 93, "path": "/XiangDraft2.py", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "from protein import *\nfrom data_test import *\n\noffice_Mac_address='/Users/xji3/Dropbox/My Files/BRC/Small Project/Clemens PY code'\ninitpdb_filename=office_Mac_address+'/input/init_pdbids.txt'\nxmlfilename=office_Mac_address+'/input/pdb_search.xml'\nds_dir=office_Mac_address+'/XiangTrialOutput'\ninitpdb_filename=office_Mac_address+'/input/init_pdbids.txt'\ninitmono_filename=office_Mac_address+'/input/monomers.txt'\nlocal_pisadir=office_Mac_address+'/XiangTrialOutput/pisa'\ninitmp_filename=office_Mac_address+'/input/init_mpids.txt'\n\ndata = Data(initpdb_filename, xmlfilename, ds_dir)\ndata.queryPDB(xmlfilename)\n\n \n#---------------------------------------------------------------------------\n# Apply additional filters (no membrane proteins, only monomers, ...)\n#---------------------------------------------------------------------------\ndata.filterMembraneProteins(initmp_filename)\ndata.filterNonMonomers(initmono_filename, local_pisadir)\n\n#---------------------------------------------------------------------------\n# Get the AA sequences for the PDB files and remove those with non-unique\n# chains\n#---------------------------------------------------------------------------\ndata.fasta4pdb()\ndata.removeNonUniqueChainPDBs()\n\n#---------------------------------------------------------------------------\n# Map PDB-IDs to CCDS-IDs (and keep track of all other mappings)\n#---------------------------------------------------------------------------\n#data.mapPDB2CCDS()\n\n\nccds_table = defaultdict(list)\npdb2ccds = defaultdict(list)\n\ntmp_ccds = []\nlocal_file = 'CCDS.current_short.txt'\nf = open(local_file, 'rb')\nfor line in f:\n if line.strip(): # makes sure empty lines are not included\n s = line.strip().split()\n## print s\n## print len(s)\n \n gene_id = s[3]\n\n chromosome = s[0]\n ccds_id = s[4]\n# a=raw_input()\n print s[3]\n print s[0]\n print s[4]\n\n## print chromosome\n## print ccds_id\n\n # Chromosome will be first in the list, followed by the CCDS-IDs\n if not ccds_table[gene_id]:\n ccds_table[gene_id].append(chromosome)\n\n if not chromosome==ccds_table[gene_id][0]:\n print\n print 'Attention: A gene id shows up in multiple choromosomes!'\n print 'Data.py readCCDStable function'\n print\n\n ccds_table[gene_id].append(ccds_id)\n tmp_ccds.append(ccds_id)\n\n## print ccds_table\n## a=raw_input()\n\nprint ccds_table.keys()\npdbID_trial=data.pdbIDs\n\nfor i in pdbID_trial:\n for gd in ccds_table.keys():\n for ccds in ccds_table[gd]:\n if ccds[0:4] == 'CCDS':\n if not ccds in pdb2ccds[i]:\n pdb2ccds[i].append(ccds)\n\n\nfor pdb in pdb2ccds:\n # Create a new instance of protein\n proteins[pdb] = Protein(pdb, data.pdb_seqs[pdb][0], data.out_dir + '/pdb/')\n\n # Assign CCDS\n for ccds in pdb2ccds[pdb]:\n proteins[pdb].ccds_match.append(PDB_CCDS(ccds, ccds_seqs[ccds])) # create a new instance of PDB_CCDS\n\n\n\n \n" }, { "alpha_fraction": 0.5691769123077393, "alphanum_fraction": 0.5814360976219177, "avg_line_length": 24.954545974731445, "blob_id": "6d95c2259f9c42372057818802b186b8d84b2822", "content_id": "328ec66002d3357f08a70bd0cdfe74f2bad521cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/XiangDraft3.py", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "from protein import *\nfrom data_test import *\nfrom operator import attrgetter\n\nclass Student:\n def __init__(self, name, grade, age):\n self.name = name\n self.grade = grade\n self.age = age\n def __repr__(self):\n return repr((self.name, self.grade, self.age))\n\n\nstudent_objects = [\n Student('john', 'C', 15),\n Student('jane', 'A', 12),\n Student('dave', 'B', 10),\n Student('Adam', 'A', 9)\n]\n\nprint sorted(student_objects,key=attrgetter('age'))\nprint sorted(student_objects,key=attrgetter('grade'))\n" }, { "alpha_fraction": 0.5491580367088318, "alphanum_fraction": 0.5594785213470459, "avg_line_length": 24.21917724609375, "blob_id": "2baf09f85737ad81ee57ca52e5b292701e8a826e", "content_id": "f8244063a30980d8a7e9edb05b60513676d4078b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1841, "license_type": "no_license", "max_line_length": 65, "num_lines": 73, "path": "/XiangDraftSubProcess.py", "repo_name": "xji3/clemensCode", "src_encoding": "UTF-8", "text": "#import itertools, numpy, string, subprocess, re\n#from __future__ import division\nimport subprocess\nfrom collections import defaultdict\nimport os.path, gzip\nimport tempfile\nimport os\n\n\n##water_localfile_dir='/Users/xji3/Downloads/EMBOSS-6.6.0/emboss'\n## if os.path.isfile(water_localfile_dir+'/water'):\n## water = water_localfile_dir+'/water'\n## else:\n## water = 'water'\n## return subprocess.check_output([\n## water,\n## '-stdout',\n## '-auto',\n## '-awidth3=100000',\n## '-asequence=asis:' + self.pdb_AA,\n## '-bsequence=asis:' + ccds_AA\n## ]).split('\\n')\n\nnaccess_localfile_dir='/Users/xji3/Documents/Naccess'\n\npdb_file='/Users/xji3/Documents/Naccess/4A14.pdb'\npdb_id='4A14'\n\nif os.path.isfile(naccess_localfile_dir+'/naccess'):\n naccess=naccess_localfile_dir+'/naccess'\nelse:\n naccess= 'naccess'\n\nprint subprocess.check_output([\n 'ls',\n '-l'\n ],cwd=naccess_localfile_dir+'/')\n\nif os.path.isfile(naccess_localfile_dir+'/'+pdb_id+'.pdb.gz'):\n print subprocess.check_output([\n 'gzip',\n '-d',\n '-f',\n pdb_id+'.pdb.gz',\n ],cwd=naccess_localfile_dir+'/')\n \n print subprocess.check_output([\n naccess,\n pdb_file\n ],cwd=naccess_localfile_dir+'/')\n \nelif os.path.isfile(naccess_localfile_dir+'/'+pdb_id+'.pdb'):\n print subprocess.check_output([\n naccess,\n pdb_file\n ],cwd=naccess_localfile_dir+'/')\n \nprint subprocess.check_output([\n 'gzip',\n pdb_id+'.pdb',\n ],cwd=naccess_localfile_dir+'/')\n\n\n\n\n\n\n#subprocess.Popen(naccess,cwd=r'./pdb')\n##with open('./pdb','w') as outfile:\n## subprocess.check_output([\n## naccess,\n## pdb_file\n## ])\n" } ]
6
cm-rennie/scripts
https://github.com/cm-rennie/scripts
57e7433c64ca855c1a43c9f6eb9d2480597798f9
0ff595173e16dbc299a60c17a14e8df37dfde3ad
4e315282dcba5fecc0e954ec23e9b2a512a7cbbf
refs/heads/master
2022-02-19T04:33:24.312499
2019-09-19T23:30:48
2019-09-19T23:30:48
208,908,131
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5840639472007751, "alphanum_fraction": 0.5863847136497498, "avg_line_length": 37.57143020629883, "blob_id": "137d523635a78de6ec2b067a18826457b328e1f4", "content_id": "b3058ae99834c6d8d536d218e499171b5c87da08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3878, "license_type": "no_license", "max_line_length": 134, "num_lines": 98, "path": "/deactivate_py.py", "repo_name": "cm-rennie/scripts", "src_encoding": "UTF-8", "text": "import atexit\r\nimport json\r\nimport traceback\r\nfrom confluent_kafka import avro, Consumer as KafkaConsumer, KafkaError, TopicPartition, OFFSET_END, TIMESTAMP_NOT_AVAILABLE, Producer\r\nfrom confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient\r\nfrom confluent_kafka.avro import AvroProducer as OGProducer, AvroConsumer\r\nfrom confluent_kafka.avro.serializer import SerializerError\r\nfrom confluent_kafka.avro.serializer.message_serializer import MessageSerializer\r\n\r\n\r\nclass AvroProducer(object):\r\n def __init__(self, config, default_key_schema=None,\r\n default_value_schema=None, schema_registry=None):\r\n schema_registry_url = config.pop(\"schema.registry.url\", None)\r\n schema_registry_ca_location = config.pop(\r\n \"schema.registry.ssl.ca.location\", None)\r\n schema_registry_certificate_location = config.pop(\r\n \"schema.registry.ssl.certificate.location\", None)\r\n schema_registry_key_location = config.pop(\r\n \"schema.registry.ssl.key.location\", None)\r\n\r\n if schema_registry is None:\r\n if schema_registry_url is None:\r\n raise ValueError(\"Missing parameter: schema.registry.url\")\r\n\r\n schema_registry = CachedSchemaRegistryClient(url=schema_registry_url,\r\n ca_location=schema_registry_ca_location,\r\n cert_location=schema_registry_certificate_location,\r\n key_location=schema_registry_key_location)\r\n elif schema_registry_url is not None:\r\n raise ValueError(\r\n \"Cannot pass schema_registry along with schema.registry.url config\")\r\n\r\n self.producer = Producer(config)\r\n self._serializer = MessageSerializer(schema_registry)\r\n self._key_schema = default_key_schema\r\n self._value_schema = default_value_schema\r\n\r\n def flush(self):\r\n self.producer.flush()\r\n\r\n def produce(self, **kwargs):\r\n key_schema = kwargs.pop('key_schema', self._key_schema)\r\n value_schema = kwargs.pop('value_schema', self._value_schema)\r\n topic = kwargs.pop('topic', None)\r\n if not topic:\r\n raise ClientError(\"Topic name not specified.\")\r\n value = kwargs.pop('value', None)\r\n key = kwargs.pop('key', None)\r\n\r\n if value is not None:\r\n if value_schema:\r\n value = self._serializer.encode_record_with_schema(\r\n topic, value_schema, value)\r\n else:\r\n raise ValueSerializerError(\"Avro schema required for values\")\r\n\r\n if key is not None:\r\n if key_schema:\r\n key = self._serializer.encode_record_with_schema(\r\n topic, key_schema, key, True)\r\n\r\n self.producer.produce(topic, value, key, **kwargs)\r\n\r\n\r\ndef deactivate(record):\r\n if record['active'] and record['listId'].endswith('_recorded_future'):\r\n record['active'] = False\r\n try:\r\n producer.produce(\r\n key=record['id'],\r\n value=record,\r\n topic=TOPIC\r\n )\r\n except BufferError:\r\n producer.flush()\r\n deactivate(record)\r\n\r\n\r\nwith open('./recs.json', 'r') as f:\r\n recs = json.loads(f.read())\r\n for rec in recs.values():\r\n deactivate(rec)\r\n\r\nif __name__ == \"__main__\":\r\n producer = AvroProducer({\r\n 'bootstrap.servers': 'localhost:29092',\r\n 'schema.registry.url': 'http://localhost:8081'\r\n }, default_value_schema=list_item_schema)\r\n\r\n TOPIC='sandbox_list_items'\r\n\r\n with open(\"schema.json\", \"r\") as f:\r\n list_item_schema = avro.loads(\r\n json.dumps(\r\n json.loads(f.read())['ListItem']\r\n )\r\n )\r\n" }, { "alpha_fraction": 0.5659213662147522, "alphanum_fraction": 0.5898226499557495, "avg_line_length": 25.489795684814453, "blob_id": "39f11a23792cb9df10ebe8fa1aed0fe509185835", "content_id": "f3f03bb07379c875ff2f1abe051ae15df30d5510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1297, "license_type": "no_license", "max_line_length": 102, "num_lines": 49, "path": "/suppression/force_max_message.py", "repo_name": "cm-rennie/scripts", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport sys\nimport datetime\n\nfrom max_message_vars import message, random_asset_from_group\nfrom confluent_kafka import Producer\n\n\nclass SimpleProducer(object):\n def __init__(self, config):\n self.producer = Producer(config)\n\n def flush(self):\n self.producer.flush()\n\n def produce(self, **kwargs):\n topic = kwargs.pop('topic', None)\n value = kwargs.pop('value', None)\n key = kwargs.pop('key', None)\n\n self.producer.produce(topic, value, key, **kwargs)\n\n\ndef main(**kwargs):\n topic = kwargs.pop('topic', 'sandbox_events')\n\n p = SimpleProducer({\n 'bootstrap.servers': 'localhost:9092'\n })\n\n i = 0\n timedelta = datetime.timedelta(hours=7)\n while True:\n asset_id = random_asset_from_group()\n msg = message(asset_id, timedelta, 40000)\n print(\"{0} -> Pushing message for asset {1} \\\\nTo topic: {2}\".format(str(i), asset_id, topic))\n p.produce(topic=topic, key=None, value=msg)\n if i % 10000 == 0:\n p.flush()\n if i % 50000 == 0:\n timedelta = timedelta + datetime.timedelta(minutes=10)\n i += 1\n\n\nif __name__ == \"__main__\":\n args = {}\n if len(sys.argv) > 0:\n args['topic'] = sys.argv[1].strip()\n main(**args)" }, { "alpha_fraction": 0.5871743559837341, "alphanum_fraction": 0.5908960700035095, "avg_line_length": 29.16964340209961, "blob_id": "adb6dba0b2960c7b43693504ab38049d9ce3f739", "content_id": "7a49218be087c4ce31cb7abcc9a2d16ee0915fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3493, "license_type": "no_license", "max_line_length": 134, "num_lines": 112, "path": "/get_active_py.py", "repo_name": "cm-rennie/scripts", "src_encoding": "UTF-8", "text": "import atexit\r\nimport json\r\nimport traceback\r\nfrom confluent_kafka import avro, Consumer as KafkaConsumer, KafkaError, TopicPartition, OFFSET_END, TIMESTAMP_NOT_AVAILABLE, Producer\r\nfrom confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient\r\nfrom confluent_kafka.avro import AvroProducer as OGProducer, AvroConsumer\r\nfrom confluent_kafka.avro.serializer import SerializerError\r\nfrom confluent_kafka.avro.serializer.message_serializer import MessageSerializer\r\n\r\n\r\nclass StringKeyAvroConsumer(AvroConsumer):\r\n\r\n def poll(self, timeout=None):\r\n \"\"\"\r\n This is an overriden method from AvroConsumer class. This handles message\r\n deserialization using avro schema for the value only.\r\n\r\n @:param timeout\r\n @:return message object with deserialized key and value as dict objects\r\n \"\"\"\r\n if timeout is None:\r\n timeout = -1\r\n message = super(AvroConsumer, self).poll(timeout)\r\n if message is None:\r\n return None\r\n if not message.value() and not message.key():\r\n return message\r\n if not message.error():\r\n if message.value() is not None:\r\n try:\r\n decoded_value = self._serializer.decode_message(\r\n message.value())\r\n message.set_value(decoded_value)\r\n except Exception as e:\r\n print(\"bad value\", message.value())\r\n return message\r\n\r\n\r\ndef seek_to(consumer, topic_name, offset=OFFSET_END):\r\n topic = consumer.list_topics(topic_name).topics[topic_name]\r\n partition_ids = topic.partitions.keys()\r\n consumer.assign([\r\n TopicPartition(topic_name, pid, offset)\r\n for pid in partition_ids\r\n ])\r\n\r\n\r\nconsumer = StringKeyAvroConsumer({\r\n 'bootstrap.servers': 'kafka-sharedv1.internal.jask.ai:9092',\r\n 'schema.registry.url': 'http://schema-registry.internal.jask.ai:8081',\r\n 'group.id': 'crennie',\r\n 'auto.offset.reset': 'earliest',\r\n 'enable.auto.commit': False\r\n})\r\n\r\nstate={}\r\n\r\ndef stop():\r\n with open('recs.json', 'w') as f:\r\n f.write(json.dumps(state))\r\n consumer.close()\r\n producer.flush()\r\n\r\natexit.register(stop)\r\n\r\n\r\ndef cycle(consumer, on_msg=lambda x: None):\r\n msg = None\r\n try:\r\n msg = consumer.poll(10)\r\n except SerializerError:\r\n print(\"Message deserialization failed\")\r\n traceback.print_exc()\r\n\r\n if msg is not None:\r\n if msg.error():\r\n if msg.error().code() != KafkaError._PARTITION_EOF:\r\n print(\"err\", msg.error())\r\n else:\r\n record = msg.value()\r\n print('rec', record)\r\n on_msg(record)\r\n\r\nTOPIC='vertiv-us_list_items'\r\n\r\ndef to_state(record):\r\n if record['listId'].endswith('_recorded_future'):\r\n if record['active']:\r\n state[record['id']] = record\r\n else:\r\n try:\r\n del state[record['id']]\r\n except KeyError:\r\n pass\r\n\r\ndef deactivate(record):\r\n if record['active'] and record['listId'].endswith('_recorded_future'):\r\n record['active'] = False\r\n try:\r\n producer.produce(\r\n key=record['id'],\r\n value=record,\r\n topic=TOPIC\r\n )\r\n except BufferError:\r\n producer.flush()\r\n deactivate(record)\r\n\r\nseek_to(consumer, TOPIC, 0)\r\n\r\nwhile True:\r\n cycle(consumer, to_state)\r\n\r\n" }, { "alpha_fraction": 0.6328532099723816, "alphanum_fraction": 0.6374502182006836, "avg_line_length": 41.94736862182617, "blob_id": "5e23a1e37bdf7c90387c3f30cc70e342f6a96ee5", "content_id": "03ab9d12d75b83b53831bdfd805fe1eb3ed79364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3263, "license_type": "no_license", "max_line_length": 108, "num_lines": 76, "path": "/suppression/checks.py", "repo_name": "cm-rennie/scripts", "src_encoding": "UTF-8", "text": "import re\nimport os\nfrom collections import Counter\n\n# first find list of files in /var/log.radar\nlog_files = [str(f) if str(f).endswith(\".log\") else None for f in os.listdir(\"/var/log/radar\")]\ninitial_anchors = []\nrevisited_anchors = []\nduplicates = []\n\n# Reg ex'es\nduplicate_re = r'(?<=Transformer publishing DUPLICATE: Some\\()\\w+-\\w+-\\w+-\\w+-\\w+'\nanchor_re = r'(?<=of anchor: Some\\()\\w+-\\w+-\\w+-\\w+-\\w+'\n\ninitial_anchor_re = r'(?<=publishing INITIAL anchor: Some\\(Event\\()\\w+-\\w+-\\w+-\\w+-\\w+'\nduplicates_re = r'(?<=with duplicates Vector\\()(\\w+-\\w+-\\w+-\\w+-\\w+,?)+' # note: need to parse this by comma\n\npunctuator_duplicate_re = r'(?<=DUPLICATE FROM PUNCTUATOR: Some\\()\\w+-\\w+-\\w+-\\w+-\\w+'\npunctuator_anchor_re = r'(?<=Punctuator publishing anchor: Some\\(Event\\()\\w+-\\w+-\\w+-\\w+-\\w+'\n\nfor _f in log_files:\n with open(\"/var/log/radar/{}\".format(_f), \"r\") as this_file:\n for line in this_file:\n anchor_res = re.search(anchor_re, line)\n if anchor_res:\n revisited_anchors.append(anchor_res.group(0))\n\n initial_anchor_res = re.search(initial_anchor_re, line)\n if initial_anchor_res:\n initial_anchors.append(initial_anchor_res.group(0))\n\n punctuator_anchor_res = re.search(punctuator_anchor_re, line)\n if punctuator_anchor_res:\n initial_anchors.append(punctuator_anchor_res.group(0))\n\n gathered_anchors = set([punctuator_anchor_res.group(0) if punctuator_anchor_res else None,\n initial_anchor_res.group(0) if initial_anchor_res else None,\n anchor_res.group(0) if anchor_res else None])\n\n duplicate_res = re.search(duplicate_re, line)\n if duplicate_res and duplicate_res.group(0) not in gathered_anchors:\n duplicates.append(duplicate_res.group(0))\n\n punctuator_duplicate_res = re.search(punctuator_duplicate_re, line)\n if punctuator_duplicate_res and punctuator_duplicate_res.group(0) not in gathered_anchors:\n duplicates.append(punctuator_duplicate_res.group(0))\n\n duplicates_res = re.search(duplicates_re, line)\n if duplicates_res:\n for dupe in duplicates_res.group(0).split(','):\n if dupe not in gathered_anchors:\n duplicates.append(dupe)\n\nprint \"Anchors Republished as duplicates:\"\nfor item in set(initial_anchors).union(set(revisited_anchors)).intersection(set(duplicates)):\n print item\n\nprint \"Anchors Published Twice:\"\nanchors_twice = [i for i in Counter(initial_anchors).iteritems() if i[1] > 1]\nfor item in anchors_twice:\n print item\nprint \"Count: {}\".format(len(anchors_twice))\n\nprint \"Duplicates Published Twice:\"\ndupes_twice = [i for i in Counter(duplicates).iteritems() if i[1] > 1]\nfor item in dupes_twice:\n print item\nprint \"Count: {}\".format(len(dupes_twice))\n\nprint \"Anchors with Duplicates and Never Initially Published:\"\nfor item in [i for i in revisited_anchors if i not in set(initial_anchors)]:\n print item\n\nprint \"Anchor Count: {}\".format(len(initial_anchors))\nprint \"Anchor Revisited Count: {}\".format(len(revisited_anchors))\nprint \"Duplicates Count: {}\".format(len(duplicates))" } ]
4
Pranavvks/Testing-demo
https://github.com/Pranavvks/Testing-demo
20600f2280c1af41e1e08edc93807747557db90c
ffffd97a47360c703996bb7e947a0f00c755d15a
2e29edcdc06bfdb2a119990a266304e8729e31d0
refs/heads/master
2023-02-27T06:15:56.397402
2021-02-06T20:38:05
2021-02-06T20:38:05
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7810218930244446, "alphanum_fraction": 0.7810218930244446, "avg_line_length": 16.25, "blob_id": "4dacbb355287f275c3d9d9317e6f2a27a7a53a62", "content_id": "e7a6ad9ef9b5e25aa1980a16bcd7599a4e224ed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/framework/framework/schema.py", "repo_name": "Pranavvks/Testing-demo", "src_encoding": "UTF-8", "text": "import graphene\nfrom user.schema import Query as PersonQuery\n\nclass Query( PersonQuery):\n pass\n\n\nschema = graphene.Schema(query=Query)" }, { "alpha_fraction": 0.643478274345398, "alphanum_fraction": 0.643478274345398, "avg_line_length": 25.58823585510254, "blob_id": "a1e4cf36084c76a639679b9d3ade68264e981cee", "content_id": "969ada3bc710df5fb852e4844d96be8a4c5d32b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/framework/user/schema.py", "repo_name": "Pranavvks/Testing-demo", "src_encoding": "UTF-8", "text": "import graphene\nfrom graphene_django import DjangoObjectType\nfrom .models import Person\n\nclass PersonType(DjangoObjectType):\n class Meta:\n model = Person\n fields = ('name', 'content')\n \n \nclass Query(graphene.ObjectType):\n person = graphene.Field(PersonType, name=graphene.String())\n \n \n def resolve_person(self, info, **kwargs):\n name = kwargs.get('name')\n return Person.objects.get(name=name)\n " } ]
2
david-micallef/Sandbox
https://github.com/david-micallef/Sandbox
7ae595649ebae15e48b9fabf26830b8e3004d873
54c4d548215a81e810f40364397cbe90379d1d36
5204fb418cbdce89efaaea669c105d87f03232e5
refs/heads/master
2021-01-06T20:39:05.601891
2017-08-29T07:52:19
2017-08-29T07:52:19
99,534,564
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 26.75, "blob_id": "161bcb12547c3a1707652b8874c06fd86d502564", "content_id": "8b72a726b6bf14b4a8b62a2c211ed1bed1854bc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "no_license", "max_line_length": 63, "num_lines": 4, "path": "/README.md", "repo_name": "david-micallef/Sandbox", "src_encoding": "UTF-8", "text": "# Sandbox\nProject for various tests and demos\n\nPython project that prints every second letter of entered name.\n" }, { "alpha_fraction": 0.6123347878456116, "alphanum_fraction": 0.6211453676223755, "avg_line_length": 21.799999237060547, "blob_id": "7b819b4d695e6b151c997cbd1449b18ccd961d62", "content_id": "dfac9bc057fa8a2af78361dc2bbdedabe9785ce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/oddName.py", "repo_name": "david-micallef/Sandbox", "src_encoding": "UTF-8", "text": "\"\"\" David \"\"\"\n\n\nusername = str(input(\"Enter username: \"))\nwhile username is \"\":\n print(\"No username entered.\")\n username = str(input(\"Enter username: \"))\n\nfor i in range(0,len(username),2):\n print(username[i], end= \"\")" } ]
2
ananta399/MLPreprocessor
https://github.com/ananta399/MLPreprocessor
09ae594525c771950f8f1f184ad0724a2b913d7d
9e12ca1c47e292ac2d8f8bd827513f685c3d4a63
a6676278abdb152313f99154489a4c39a4d6501f
refs/heads/master
2022-11-13T16:42:06.026244
2020-07-02T15:56:26
2020-07-02T15:56:26
275,212,458
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8003315329551697, "alphanum_fraction": 0.8049364686012268, "avg_line_length": 83.828125, "blob_id": "44a3be7f41a5e3485f57510f05c82f84bd76d794", "content_id": "d4d2836e9f137518c2ecfda9afb71b19d32714d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5433, "license_type": "no_license", "max_line_length": 654, "num_lines": 64, "path": "/README.md", "repo_name": "ananta399/MLPreprocessor", "src_encoding": "UTF-8", "text": "# OpenCV Preprocessor\nPreprocessor for shark detection ML algorithm that eliminates false positives from Aerial drone based shark footage at the shores. The ML algorithm is not included however, it can be assumed to analyze movement patterns.\n\nTested and designed for a diverse set of drone based shark footage collected by our sponsers. \nObject recognition algorithms could not be used due to the wide field of view. The individual objects are low resolution and have little color gradient as submerged objects take the color of the ocean. The sharks lost all the defining features.\n\n## Approach\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/original.PNG)\n\n(Note, the above image is not representative of the output or the approcah. It just is there to clarify the hurdles) \n\nObject tracker tracks all objects. A ocean is very dynamic and has a lot of \"objects\". So, we needed to identify common false positives and try to eliminate them.\n\nCommon flase positives: Boats, Seals,Algae, Birds, Waves, Glare, Fish, Noise\n\n\nMost of the time was spent experimenting rather than coding.\n\nMajor hurdles:\n\n* Low resolution of sharks given the wide field of view of the camera. For standard height of the drone, the area of a shark in our images was only around 15x5 pixels. Sharks further lost clarity when submerged. Sharks looked like black blobs in the ocean and black blobs looked like sharks.\n\n* Sophisticated human visual processing. As you can see in the above image, our brain “tricks” us into believing the shark is black, however, isolating the shark reveals that it is actually still green. So, our images actually have low color gradient. A submerged shark may look black to the eye however, in an image, it takes the color of whatever water it is in. Water color depends on weather and other conditions.\n\nWe analyzed different color spaces for images in different conditions (having sharks, land, glare, boats, seals, different weather conditions etc). Analysis of the different color channels led us to realize that some features (such as waves) were greatly removed in some color spaces while some features (such as glare) were highly noticeable in some color spaces.\n\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/colorspaces.PNG)\n\nNote that the image above is just an example of the concept and not necessarily the algorithm.\n\nImage is processed under different colorspaces and masked using thresholding. After every individual colorspace processing, image was gradually blurred using the mask to make it blend with the background. Only the region of no interest was blurred, sharks were not, maintaining the quality of the image in the region of interest.\n\nFurthermore, linear combinations of colorspaces were obtained experimentally to produce a noise free image.\n\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/mask1.png)\n\nThe above example image has a shark in the center, a bird in the top right corner, waves, shore, glare, and a boat. The final output image is given below.\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/out.PNG)\n\nAs we can see in the above image, the background, glare, noise, etc are removed. So is the bird.\nThe shark remains but so does the boat shadow. This is because both submerged sharks and shadows are similar and is not differentiated by the preprocessor alone.\nHowever, this is much noise free than the original image.\n\n\n\n## Results on a Matlab Object Tracker\nWe evaluated the results on a Matlab object traker just to evaluate the preprocessor. The tracker parameters were changed to only track objects with duration greater than a second to remove random noise.\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/tracker1.png)\n\nAs we can see, from the middle and bottom images, the tracker tracks only the shark in the preprocessed image while the unprocessed image has a lot of noise.\n\nWe tested our algorithm against 50 selected videos from the video database. To effectively test the success and adaptability of our algorithm, videos in different weather, tidal and other ocean conditions as well as videos containing different objects were selected, including a few videos of vacant oceans. Clips where the camera was stationary were extracted from the videos to accommodate our motion based object tracker, as it is a requirement for our approach. Ideal videos refer to scenarios where the shark is obviously spotted by eye, which translates to minimal glare and the shark not being too deep. Otherwise the video is marked as non-ideal.\n\n![alt text](https://github.com/ananta399/MLPreprocessor/blob/master/readmeImages/tables.PNG)\n\nAlgaes, Seals and other fish were most common false positives. This distinction is left out for other steps in the classification process. Deeply submerged sharks were most common false negatives.\n\n## CONCLUSION\nThe image preprocessor, while needs to be improved, is a good proof-of-concept and can be used (with modifications) to remove false positives from object tracking algorithms or machine learning algorithms. The algorithm, although better under ideal conditions, was able to preprocess videos under different weather conditions, videos with wide field of view and therefore low resolution of objects, and submerged/hazy sharks to qualify for a proof-of-conecpt.\n" }, { "alpha_fraction": 0.7019193768501282, "alphanum_fraction": 0.732821524143219, "avg_line_length": 36.753623962402344, "blob_id": "789f80859123eafcf3fd6ee41ad4328daaaf4723", "content_id": "bc625507954a7ebd808ac2d74d3df69728a9bef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5210, "license_type": "no_license", "max_line_length": 123, "num_lines": 138, "path": "/final.py", "repo_name": "ananta399/MLPreprocessor", "src_encoding": "UTF-8", "text": "import cv2\nimport sys\nimport numpy as np\nimport os\n\n###Input arguments: filename followed by two numbers, indicating the start time and end time in seconds\n###Example: python toSubmit.py DJI_0007.mov 314 362\n###Takes an input RGB image and removes common false positives\n\ndef removeObstacles(src):\n currentImage = src\n radius = 17\n valueChannelHighThreshold = 130\n valueChannelLowThreshold = 0\n saturationChannelHighThreshold = 255\n saturationChannelLowThreshold = 0\n #Slowly Blur the image excluding regions of interest with increasing intensity\n for i in range(0,3):\n radius = radius + 5\n hsv = cv2.cvtColor(currentImage, cv2.COLOR_BGR2HSV)\n color_mask = cv2.inRange(hsv, (0, saturationChannelLowThreshold, valueChannelLowThreshold),\n(255,saturationChannelHighThreshold,valueChannelHighThreshold))\n kernel = np.ones((2,2),np.uint8)\n #erode_color_mask = cv2.erode(color_mask,kernel,iterations = 2) ##Erosion may be helpful to better to get rid of noise\n erode_color_mask = color_mask\n ###Use either Average Blur or Median Blur\n blurred = cv2.blur(currentImage, (radius,radius))\n #blurred = cv2.medianBlur(currentImage, radius) #For Median Blur\n colorMask_opposite = cv2.bitwise_not(erode_color_mask)\n blurred_fill_holes = cv2.bitwise_and(blurred, blurred, mask = colorMask_opposite)\n cv2.imshow(\"removeObstacles mask\", color_mask)\n maskedImg = cv2.bitwise_and(currentImage, currentImage, mask = erode_color_mask)\n temp = cv2.add(blurred_fill_holes, maskedImg)\n return temp\n \n \n###Final blurring of the image before sending to the object tracker.\n###src is the original RGB image while gray is the grayscale image to be sent to the tracker\ndef finalBlur(src, gray):\n valueChannelHighThreshold = 130\n valueChannelLowThreshold = 0\n saturationChannelHighThreshold = 255\n saturationChannelLowThreshold = 0\n radius = 500\n currentImage = src\n hsv = cv2.cvtColor(currentImage, cv2.COLOR_BGR2HSV)\n color_mask = cv2.inRange(hsv, (0, saturationChannelLowThreshold, valueChannelLowThreshold),\n(255,saturationChannelHighThreshold,valueChannelHighThreshold))\n #erode_color_mask = cv2.erode(color_mask,kernel,iterations = 2) ##Erosion may be helpful to better to get rid of noise\n erode_color_mask = color_mask\n ###Use either Average Blur or Median Blur\n kernel = np.ones((4,4),np.uint8) ##Currently unused, but use this for Median Blur\n blurred = cv2.blur(gray, (radius,radius)) #For Average Blur\n #blurred = cv2.medianBlur(currentImage, radius) #For Median Blur\n colorMask_opposite = cv2.bitwise_not(erode_color_mask)\n blurred_fill_holes = cv2.bitwise_and(blurred, blurred, mask = colorMask_opposite)\n maskedImg = cv2.bitwise_and(gray,gray, mask = erode_color_mask)\n temp = cv2.add(blurred_fill_holes, maskedImg)\n return temp\n \n \n##Increases Contrast and Brightness\ndef bright(image):\n alpha = 2\n beta = -100\n new_image = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)\n return new_image\n \n \n####################### MAIN ####################################\nfilename = sys.argv[1] ##Filename of the input video\nfps = 30 ##Frames per second of the input video\ncap = cv2.VideoCapture(filename) ##Either camera or Video File\n# Check if camera opened successfully\nif (cap.isOpened()== False):\n print(\"Error opening video stream or file\")\n#Output Files\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nout3 = cv2.VideoWriter('output_to_motion_object_tracker.avi',cv2.VideoWriter_fourcc('M','J','P','G'), int(fps), (1024,768))\nout4 = cv2.VideoWriter('orgi.avi',cv2.VideoWriter_fourcc('M','J','P','G'), int(fps), (1024,768))\n##Convert seconds into frames\nstart = int(sys.argv[2])\nstart = int(start*fps)\nend = int(sys.argv[3])\nend = int(end*fps)\n##Current frame\ncount = 0\ncap.set(1,start)\n\n# Read until video is completed\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, img = cap.read()\n if ret == True:\n count = count + 1\n if (count > end - start):\n break\n img = cv2.resize(img,(1024,768))\n originalImage = cv2.resize(img,(1024,768))\n img = removeObstacles(img)\n height, width, channels = img.shape\n upper_left = (int(width / 2) - 5, int(height / 2) - 2)\n bottom_right = (int(width / 2) + 2, int(height / 2) + 2)\n \n ##Convert to HSV and LAB color spaces\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h,s,v = cv2.split( hsv);\n l,a,b = cv2.split(lab);\n a_blur = finalBlur(img, a)\n cv2.imshow('Original',img)\n \n ##Perform linear operation on various channels, values and channels selected through experimentation\n vby4 = np.floor_divide(v,4)\n sub = np.subtract(a,vby4)\n cv2.imshow('Linear combination of Channels',sub)\n sub_blur_a_modified = bright(sub) ##Increase the contrast of the output frame\n sub_blur_a = finalBlur(img, sub_blur_a_modified) ##Increasing Contrast increases noise so get rid of them\n cv2.imshow('sub a mod',sub_blur_a_modified)\n \n ###Write output video and original video to file\n outfile3 = cv2.cvtColor(sub_blur_a_modified, cv2.COLOR_GRAY2RGB)\n out3.write(outfile3)\n out4.write(originalImage)\n # Press Q on keyboard to exit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # Break the loop\n else:\n break\n \n \n# When everything done, release the video capture object\ncap.release()\nout3.release()\nout4.release()\ncv2.destroyAllWindows()\n" } ]
2
blisteringherb/quasar
https://github.com/blisteringherb/quasar
6a933ebc616f03037ceb21c9513782d32fd7c07e
86a834ea0d0f39e61f84187c08181df54714c887
d131fa36be601d38631020deb241a7044637f223
refs/heads/master
2020-09-10T16:58:22.134254
2019-11-14T17:39:26
2019-11-14T17:39:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6545960903167725, "alphanum_fraction": 0.6545960903167725, "avg_line_length": 20.117647171020508, "blob_id": "feba2bcf30b4b791037017b80b47ee7473b53d49", "content_id": "440677d42458c2e3212f9489bd3683dc145726c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "permissive", "max_line_length": 66, "num_lines": 17, "path": "/quasar/users.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from .sql_utils import run_sql_file_raw, refresh_materialized_view\n\n\ndef create():\n run_sql_file_raw('./data/sql/derived-tables/users_table.sql')\n\n\ndef refresh():\n refresh_materialized_view('public.cio_latest_status')\n refresh_materialized_view('public.users')\n\n\nif __name__ == '__create__':\n create()\n\nif __name__ == '__refresh__':\n refresh()\n" }, { "alpha_fraction": 0.7289719581604004, "alphanum_fraction": 0.7289719581604004, "avg_line_length": 33, "blob_id": "b840e349448a8f1926c7bbad8b197ea611032349", "content_id": "9895f1c0f0a754245ea73c78894ab8d8764d610d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 107, "license_type": "permissive", "max_line_length": 62, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_payload_event_event_id.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_payload_event\"\nwhere event_id is null\n\n" }, { "alpha_fraction": 0.6276595592498779, "alphanum_fraction": 0.6276595592498779, "avg_line_length": 41.727272033691406, "blob_id": "69e892f7fb998c0db5d373e82c36c95541b12d5f", "content_id": "7e04a3d2e498947421a810300e4193a6ab034908", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 470, "license_type": "permissive", "max_line_length": 55, "num_lines": 11, "path": "/quasar/dbt/models/phoenix_events/snowplow_payload_event.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n event_id,\n payload::jsonb #>> '{utmSource}' AS utm_source,\n payload::jsonb #>> '{utmMedium}' AS utm_medium,\n payload::jsonb #>> '{utmCampaign}' AS utm_campaign,\n payload::jsonb #>> '{url}' AS url,\n payload::jsonb #>> '{campaignId}' AS campaign_id,\n payload::jsonb #>> '{modalType}' AS modal_type,\n payload::jsonb #>> '{searchQuery}' AS search_query,\n _fivetran_synced AS ft_timestamp\n FROM {{ env_var('FT_SNOWPLOW') }}.snowplow_event\n" }, { "alpha_fraction": 0.7363636493682861, "alphanum_fraction": 0.7363636493682861, "avg_line_length": 34, "blob_id": "057b89df0c495df2f5d99f3d174a0022d236d634", "content_id": "04375222d0640a024a95b1adcd3ea45a1f644021", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 110, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_base_event_event_datetime.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_base_event\"\nwhere event_datetime is null\n\n" }, { "alpha_fraction": 0.7264150977134705, "alphanum_fraction": 0.7264150977134705, "avg_line_length": 32.66666793823242, "blob_id": "8941204bf86e76b1d76ff2b1d9542d2331342389", "content_id": "5153f72ec90e1c7758b401faaefe47309393a9de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 106, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_raw_events_event_type.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\nwhere event_type is null\n\n" }, { "alpha_fraction": 0.6090342402458191, "alphanum_fraction": 0.6098130941390991, "avg_line_length": 32.81578826904297, "blob_id": "fca67dd820e6987ab727d35e132fbdeb7454a15a", "content_id": "56698c408efabfc5c3ad19abe677fd1880cf5a72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1284, "license_type": "permissive", "max_line_length": 91, "num_lines": 38, "path": "/docs/compiled/ds_dbt/campaign_activity/reportbacks.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n pd.northstar_id,\n pd.id as post_id,\n pd.signup_id,\n pd.campaign_id,\n pd.\"action\" as post_action,\n pd.\"type\" as post_type,\n pd.status as post_status,\n pd.post_class,\n pd.created_at as post_created_at,\n pd.source as post_source,\n pd.source_bucket as post_source_bucket,\n pd.reportback_volume,\n pd.civic_action,\n pd.scholarship_entry,\n pd.location,\n pd.postal_code,\n CASE\n WHEN (pd.post_class ilike '%%vote%%' AND pd.status = 'confirmed')\n THEN 'self-reported registrations'\n WHEN (pd.post_class ilike '%%vote%%' AND pd.status <> 'confirmed')\n THEN 'voter_registrations'\n WHEN pd.\"type\" ilike '%%photo%%' AND pd.post_class NOT ilike '%%vote%%'\n THEN 'photo_rbs'\n WHEN pd.\"type\" ilike '%%text%%'\n THEN 'text_rbs'\n WHEN pd.\"type\" ilike '%%social%%'\n THEN 'social'\n WHEN pd.\"type\" ilike '%%call%%'\n THEN 'phone_calls'\n ELSE NULL END AS post_bucket\nFROM \"quasar_prod_warehouse\".\"public\".\"posts\" pd\nWHERE pd.id IN\n (SELECT\n min(id)\n FROM \"quasar_prod_warehouse\".\"public\".\"posts\" p\n WHERE p.is_reportback = 'true' AND p.is_accepted = 1\n GROUP BY p.northstar_id, p.campaign_id, p.signup_id, p.post_class, p.reportback_volume)" }, { "alpha_fraction": 0.7447724342346191, "alphanum_fraction": 0.7447724342346191, "avg_line_length": 32.18367385864258, "blob_id": "61beb6a3064590801539bae61312e1678ee88492", "content_id": "8a2461586a02de9265544af55d5c00ad7d3b4d6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1626, "license_type": "permissive", "max_line_length": 88, "num_lines": 49, "path": "/quasar/dbt/models/phoenix_events/snowplow_sessions.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "WITH sessions AS (\nSELECT\n session_id,\n min(device_id) AS device_id,\n min(event_datetime) AS landing_datetime,\n max(event_datetime) AS ending_datetime,\n date_part(\n\t'seconds', max(event_datetime) - min(event_datetime)\n ) AS session_duration_seconds,\n count(DISTINCT CASE WHEN event_name = 'view' THEN \"path\" END) AS num_pages_viewed\nFROM {{ ref('snowplow_phoenix_events') }}\nGROUP BY session_id\n),\nentry_exit_pages AS (\nSELECT DISTINCT\n session_id,\n first_value(\"path\") OVER (PARTITION BY session_id ORDER BY event_datetime \n\tROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS landing_page,\n first_value(event_id) OVER (PARTITION BY session_id ORDER BY event_datetime \n\tROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS event_id,\n last_value(\"path\") OVER (PARTITION BY session_id ORDER BY event_datetime \n\tROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS exit_page\nFROM {{ ref('snowplow_phoenix_events') }}\n),\ntime_between_sessions AS (\nSELECT DISTINCT\n device_id,\n session_id,\n LAG(ending_datetime) OVER (PARTITION BY device_id ORDER BY landing_datetime\n\tROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING\n ) AS prev_session_endtime\nFROM sessions\n)\nSELECT\ns.session_id,\np.event_id,\ns.device_id,\ns.landing_datetime,\ns.ending_datetime,\ns.session_duration_seconds,\ns.num_pages_viewed,\np.landing_page,\np.exit_page,\ndate_part('day', s.landing_datetime - t.prev_session_endtime) AS days_since_last_session\nFROM sessions s\nLEFT JOIN entry_exit_pages p\nON p.session_id = s.session_id\nLEFT JOIN time_between_sessions t\nON t.session_id = s.session_id\n" }, { "alpha_fraction": 0.7281553149223328, "alphanum_fraction": 0.7281553149223328, "avg_line_length": 31.66666603088379, "blob_id": "6929b48d94b860f4a9f6ee9597814ae04cf04712", "content_id": "819a94d847f34dcd3c1f6e45f2cd7c36a15fc5b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 103, "license_type": "permissive", "max_line_length": 57, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_sessions_exit_page.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_sessions\"\nwhere exit_page is null\n\n" }, { "alpha_fraction": 0.6083441376686096, "alphanum_fraction": 0.6083441376686096, "avg_line_length": 33.35555648803711, "blob_id": "1e3e370a5617a1f3c0a46b4118bf08aa18a1f0ae", "content_id": "38454db97d4584dcca4430111fd22c04e579e8fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3092, "license_type": "permissive", "max_line_length": 79, "num_lines": 90, "path": "/quasar/cio_bounced_backfill.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from concurrent.futures import ThreadPoolExecutor as PoolExecutor\nimport os\nimport requests\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy.sql import text\n\nfrom .utils import log\n\npg_vars = {\n 'drivername': os.getenv('PG_DRIVER'),\n 'username': os.getenv('PG_USER'),\n 'password': os.getenv('PG_PASSWORD'),\n 'host': os.getenv('PG_HOST'),\n 'port': os.getenv('PG_PORT'),\n 'database': os.getenv('PG_DATABASE')\n}\n\n\npg_ssl = os.getenv('PG_SSL')\n\n\n# Setup SQL Alchemy postgres connection.\nengine = create_engine(URL(**pg_vars),\n connect_args={'sslmode': pg_ssl})\nconn = engine.connect()\n\n\n# Grab a page from C.io messages API with optional next param for pagination.\ndef get_page(next_page=None):\n params = {'metric': os.getenv('CIO_API_METRIC'),\n 'type': os.getenv('CIO_API_TYPE'),\n 'limit': os.getenv('CIO_API_LIMIT'),\n 'start': next_page}\n user = os.getenv('CIO_API_USER')\n password = os.getenv('CIO_API_PASSWORD')\n uri = os.getenv('CIO_API_URI')\n r = requests.get(uri, params=params, auth=(user, password))\n return r.json()\n\n\n# Insert C.io email_bounced record atomically.\ndef insert_record(message):\n query = text(''.join((\"INSERT INTO cio.email_bounced_backfill(email_id, \"\n \"customer_id, email_address, template_id, subject, \"\n \"timestamp) VALUES (:email_id, :customer_id, \"\n \":email_address, :template_id, :subject, \"\n \"to_timestamp(:timestamp)) ON CONFLICT (email_id, \"\n \" email_address, timestamp) DO NOTHING\")))\n record = {\n 'email_id': message['id'],\n 'customer_id': message['customer_id'],\n 'email_address': message['recipient'],\n 'template_id': message['msg_template_id'],\n 'subject': message['subject'],\n 'timestamp': message['metrics']['sent']\n }\n conn.execute(query, **record)\n log('Message ID {} processed.'.format(message['id']))\n\n\n# Get next page location.\ndef get_bookmark():\n s = \"SELECT * FROM cio.bounced_backfill_track\"\n result = conn.execute(s)\n return result.fetchall()\n\n\n# Keep track of next page location.\ndef insert_bookmark(next_page):\n query = text(''.join((\"INSERT INTO cio.bounced_backfill_track(next_page) \"\n \"VALUES (:next_page)\")))\n conn.execute(\"TRUNCATE cio.bounced_backfill_track\")\n conn.execute(query, next_page=next_page)\n\n\ndef main():\n # Check if this is start of the run. If not, resume from last page.\n if get_bookmark() is None:\n page = get_page()\n else:\n page = get_page(next_page=get_bookmark())\n insert_bookmark(page['next'])\n # While there is a page of results, continue processing.\n while page:\n with PoolExecutor(max_workers=int(os.getenv('POOL_SIZE'))) as executor:\n for _ in executor.map(insert_record, page['messages']):\n pass\n page = get_page(next_page=get_bookmark())\n insert_bookmark(page['next'])\n" }, { "alpha_fraction": 0.5581074357032776, "alphanum_fraction": 0.5631151795387268, "avg_line_length": 34.96894454956055, "blob_id": "b70b15bef627dd247db35ffa160e1c0f9a54af10", "content_id": "eadc597e4674305b03eb4834050a8c5ce7ab9cb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5791, "license_type": "permissive", "max_line_length": 78, "num_lines": 161, "path": "/quasar/northstar_to_user_table.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from datetime import datetime as dt\nimport os\nimport sys\nimport time\nimport json\n\nfrom .northstar_scraper import NorthstarScraper\nfrom .sa_database import Database\nfrom .utils import Duration, validate_date\n\n\"\"\"DS Northstar to Quasar User ETL script.\n\nThis ETL scripts scrapes the DoSomething Thor Northstar User API and ETL's the\noutput to our MySQL Quasar data warehouse.\n\nThe script takes an optional argument for what Northstar page result to start\non. This is mostly used to backfill from a certain page, or from the dawn\nof time. Otherwise, pagination is stored in an small status tracking table\nthat gets updated on ingestion loop.\n\n\"\"\"\n\ndb = Database()\nscraper = NorthstarScraper(os.environ.get('NS_URI'))\n\n\ndef _undict_value(value):\n if isinstance(value, dict):\n return value['value']\n else:\n return value\n\n\n# Returns a proper NULL value when the API returns 'null' string\ndef _null_value(value):\n if json.dumps(value) == 'null':\n return None\n else:\n return json.dumps(value)\n\n\ndef _save_user(user):\n record = {\n 'id': user['id'],\n 'first_name': user['first_name'],\n 'last_name': user['last_name'],\n 'last_initial': user['last_initial'],\n 'photo': user['photo'],\n 'email': user['email'],\n 'mobile': user['mobile'],\n 'facebook_id': user['facebook_id'],\n 'interests': user['interests'],\n 'birthdate': validate_date(user['birthdate']),\n 'addr_street1': _undict_value(user['addr_street1']),\n 'addr_street2': _undict_value(user['addr_street2']),\n 'addr_city': _undict_value(user['addr_city']),\n 'addr_state': _undict_value(user['addr_state']),\n 'addr_zip': _undict_value(user['addr_zip']),\n 'source': user['source'],\n 'source_detail': user['source_detail'],\n 'slack_id': user['slack_id'],\n 'sms_status': user['sms_status'],\n 'sms_paused': user['sms_paused'],\n 'voter_registration_status': user['voter_registration_status'],\n 'language': user['language'],\n 'country': user['country'],\n 'role': user['role'],\n 'last_accessed_at': user['last_accessed_at'],\n 'last_authenticated_at': user['last_authenticated_at'],\n 'last_messaged_at': user['last_messaged_at'],\n 'updated_at': user['updated_at'],\n 'created_at': user['created_at'],\n 'email_subscription_status': user['email_subscription_status'],\n 'feature_flags': _null_value(user['feature_flags']),\n 'school_id': user['school_id']\n }\n query = ''.join((\"INSERT INTO northstar.users (id, \"\n \"first_name, last_name, last_initial, \"\n \"photo, email, mobile, facebook_id, \"\n \"interests, birthdate, addr_street1, \"\n \"addr_street2, addr_city, addr_state, \"\n \"addr_zip, source, \"\n \"source_detail, slack_id, sms_status, \"\n \"sms_paused, voter_registration_status, \"\n \"language, country, \"\n \"role, last_accessed_at, \"\n \"last_authenticated_at, \"\n \"last_messaged_at, updated_at,\"\n \"created_at, email_subscription_status, feature_flags,\"\n \"school_id) VALUES (:id,:first_name,:last_name,\"\n \":last_initial,:photo,:email,:mobile,:facebook_id,\"\n \":interests,:birthdate,:addr_street1,:addr_street2,\"\n \":addr_city,:addr_state,:addr_zip,\"\n \":source,:source_detail,\"\n \":slack_id,:sms_status,:sms_paused,\"\n \":voter_registration_status,:language,:country,\"\n \":role,:last_accessed_at,\"\n \":last_authenticated_at,:last_messaged_at,:updated_at,\"\n \":created_at,:email_subscription_status, \"\n \":feature_flags, :school_id) \"\n \"ON CONFLICT (id, created_at, updated_at) \"\n \"DO UPDATE SET \"\n \"email_subscription_status = :email_subscription_status\"\n \"\"))\n db.query_str(query, record)\n\n\ndef _interval(hours_ago):\n # Return list of ISO8601 formatted timestamps\n # from hours_ago in format (hours_ago, hours_ago-1).\n def _format(hr):\n # Get ISO8601 formatted time from 'hr' hours ago.\n _time = int(time.time()) - (int(hr) * 3600)\n formatted = dt.fromtimestamp(_time).isoformat()\n return formatted\n\n start = _format(hours_ago)\n end = _format(hours_ago - 1)\n return (start, end)\n\n\ndef _process_page(results):\n users = results\n for user in users['data']:\n _save_user(user)\n\n\ndef _backfill(hours_ago):\n duration = Duration()\n # Get list of 1 hour chunks for total backfill hours_ago.\n intervals = [_interval(hour) for hour in\n range(int(hours_ago) + 1) if hour > 0]\n # Backfill from most recent going backwards.\n intervals.reverse()\n\n for start, end in intervals:\n params = {'after[updated_at]': str(start),\n 'before[updated_at]': str(end),\n 'pagination': 'cursor'}\n\n # Set page param and next page to true assuming at least\n # one page of results exist.\n i = 1\n params['page'] = i\n path = '/v2/users'\n next = True\n while next is True:\n response = scraper.get(path, params).json()\n _process_page(response)\n if response['meta']['cursor']['next'] is None:\n next = False\n else:\n i += 1\n params['page'] = i\n\n db.disconnect()\n duration.duration()\n\n\ndef backfill():\n _backfill(sys.argv[1])\n" }, { "alpha_fraction": 0.7387387156486511, "alphanum_fraction": 0.7387387156486511, "avg_line_length": 34.33333206176758, "blob_id": "c330191aae19828881e1f23477c5b45afe718f30", "content_id": "4d3182911bfe51a9efa0f0a024b0a69260c11186", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 111, "license_type": "permissive", "max_line_length": 62, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_payload_event_ft_timestamp.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_payload_event\"\nwhere ft_timestamp is null\n\n" }, { "alpha_fraction": 0.6757441163063049, "alphanum_fraction": 0.6953768134117126, "avg_line_length": 24.658536911010742, "blob_id": "6bed3105785064d6f69bd02507993d98fca47126", "content_id": "5e30a8b72967a465d43fa20f62468ba898032706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3158, "license_type": "permissive", "max_line_length": 156, "num_lines": 123, "path": "/quasar/dbt/models/phoenix_events/schema.md", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n{% docs event_id %}\nThis is a unique identifier for each event\n{% enddocs %}\n\n{% docs event_source %}\nApplication source for event (eg. Phoenix, Northstar)\n{% enddocs %}\n\n{% docs event_datetime %}\nWhen the event occurred in UTC (eg. 2018-01-01 12:00:00)\n{% enddocs %}\n\n{% docs event_name %}\nName of the event (eg. northstar_submitted_register, https://docs.google.com/spreadsheets/d/1lm-fGrIm85nUTxSojqyCt_Ehmm1zEbViFhKpxcJiz1A/edit#gid=406441516)\n{% enddocs %}\n\n{% docs event_type %}\nType of event (pv = 'Page View', se = 'Structured Event')\n{% enddocs %}\n\n{% docs host %}\nURL host where event occurred (eg. www.dosomething.org or identity.dosomething.org)\n{% enddocs %}\n\n{% docs path %}\nURL path event occurred at (eg. /login or /us/campaigns/huddle-for-heroes)\n{% enddocs %}\n\n{% docs query_parameters %}\nOptional query parameters for the request (eg. query=huddle)\n{% enddocs %}\n\n{% docs se_category %}\nCategory of event (eg. focused_field, authentication) - Custom structured event\n{% enddocs %}\n\n{% docs se_action %}\nAction performed / event name (eg. form_submitted, action_failed) - Custom structured event\n{% enddocs %}\n\n{% docs se_label %}\nThe object of the action (eg. first_name, register, voter_reg_status) - Custom structured event\n{% enddocs %}\n\n{% docs session_id %}\nUnique identifier of the user's session\n{% enddocs %}\n\n{% docs session_counter %}\nHow many sessions a user has started\n{% enddocs %}\n\n{% docs browser_size %}\nWhich type of browser a user is using (eg. Mobile, Desktop)\n{% enddocs %}\n\n{% docs northstar_id %}\nThe Northstar ID of the user who generated the event\n{% enddocs %}\n\n{% docs device_id %}\nID of the device used\n{% enddocs %}\n\n{% docs referrer_host %}\nURL host of the referring site (eg. google.com)\n{% enddocs %}\n\n{% docs referrer_path %}\nURL path from referring site (eg. /10-stats-on-teen-drug-and-alcohol-use/)\n{% enddocs %}\n\n{% docs referrer_source %}\nReferrer source name (eg. Google, Facebook)\n{% enddocs %}\n\n{% docs utm_source %}\nTracks where the traffic is coming from. (eg. scholarship_listing, Facebook)\n{% enddocs %}\n\n{% docs utm_medium %}\nHow the traffic got to the platform (eg. referral, CPC)\n{% enddocs %}\n\n{% docs utm_campaign %}\nTracks which campaign the traffic was generated by. Shows up in Google Analytics as Campaign Name (eg. editorial_newsletter)\n{% enddocs %}\n\n{% docs url %}\nURL of campaign (eg. https://dosome.click/nyn5m7)\n{% enddocs %}\n\n{% docs modal_type %}\nNULL or SURVEY_MODAL\n{% enddocs %}\n\n{% docs landing_datetime %}\nWhen the session started in UTC (eg. 2018-01-01 12:00:00)\n{% enddocs %}\n\n{% docs ending_datetime %}\nWhen the session ended in UTC (eg. 2018-01-01 12:00:00)\n{% enddocs %}\n\n{% docs session_duration_seconds %}\nSession duration in seconds\n{% enddocs %}\n\n{% docs num_pages_views %}\nNumber of pages viewed in session\n{% enddocs %}\n\n{% docs landing_page %}\nFirst page the user viewed in the session (eg. /us/facts/11-facts-about-bp-oil-spill)\n{% enddocs %}\n\n{% docs exit_page %}\n\"Which page the user ended or exited their session from (eg. /us/campaigns/green-your-getaway)\"\n{% enddocs %}\n\n{% docs days_since_last_session %}\n\"Days since their last session.\"\n{% enddocs %}\n" }, { "alpha_fraction": 0.5668903589248657, "alphanum_fraction": 0.5668903589248657, "avg_line_length": 37.534481048583984, "blob_id": "9531349d3ccbf5c61c81320b36609dbf19b4e915", "content_id": "e02b83e241aa9147d69533d0bf89eb2cd701487b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2235, "license_type": "permissive", "max_line_length": 78, "num_lines": 58, "path": "/quasar/campaign_info.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "import os\n\nfrom .sa_database import Database\nfrom .sql_utils import run_sql_file\nfrom .utils import Duration\n\n\ndata = {\n 'campaign_info_all': os.getenv('CAMPAIGN_INFO_ALL'),\n 'field_data_field_campaigns': os.getenv('FIELD_DATA_FIELD_CAMPAIGNS'),\n 'node': os.getenv('NODE'),\n 'field_data_field_campaign_type': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'CAMPAIGN_TYPE'))),\n 'field_data_field_run_date': os.getenv('FIELD_DATA_FIELD_RUN_DATE'),\n 'field_data_field_call_to_action': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'CALL_TO_ACTION'))),\n 'field_data_field_reportback_noun': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'REPORTBACK_'\n 'NOUN'))),\n 'field_data_field_reportback_verb': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'REPORTBACK_'\n 'VERB'))),\n 'field_data_field_action_type': os.getenv('FIELD_DATA_FIELD_ACTION_TYPE'),\n 'taxonomy_term_data': os.getenv('TAXONOMY_TERM_DATA'),\n 'field_data_field_cause': os.getenv('FIELD_DATA_FIELD_CAUSE'),\n 'campaign_info': os.getenv('CAMPAIGN_INFO'),\n 'campaigns': os.getenv('CAMPAIGNS'),\n 'campaign_info_international': os.getenv('CAMPAIGN_INFO_INTERNATIONAL')\n}\n\n\ndef create():\n \"\"\"(Re)create materialized views: campaign_info_all, campaign_info,\n campaign_info_international.\n \"\"\"\n duration = Duration()\n run_sql_file('./data/sql/derived-tables/campaign_info.sql', data)\n duration.duration()\n\n\ndef refresh():\n db = Database()\n duration = Duration()\n\n # Setting statement for schema diffs of campaign_info_all\n campaign_all = \"REFRESH MATERIALIZED VIEW \" + data['campaign_info_all']\n db.query(campaign_all)\n db.query('REFRESH MATERIALIZED VIEW public.campaign_info')\n db.query('REFRESH MATERIALIZED VIEW public.campaign_info_international')\n db.disconnect()\n duration.duration()\n\n\nif __name__ == \"__create__\":\n create()\n\nif __name__ == \"__refresh__\":\n refresh()\n" }, { "alpha_fraction": 0.7433628439903259, "alphanum_fraction": 0.7433628439903259, "avg_line_length": 35, "blob_id": "f399546211ad625ece868a67817e7f28ecad30ad", "content_id": "3d90dc523490c8c8de4a43f899b51abf682788d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 113, "license_type": "permissive", "max_line_length": 63, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_phoenix_events_combined_referrer_path.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"phoenix_events_combined\"\nwhere referrer_path is null\n\n" }, { "alpha_fraction": 0.47247904539108276, "alphanum_fraction": 0.4725865423679352, "avg_line_length": 44.155338287353516, "blob_id": "e5a639fe3faccd3f6af9cae038636fccf423e6c1", "content_id": "9ae7f37e6e43b99ba8d53d533cd0bf18b004de85", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9302, "license_type": "permissive", "max_line_length": 75, "num_lines": 206, "path": "/quasar/cio_queue.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport pydash\nimport sys\n\nfrom .sa_database import Database\nfrom .queue import QuasarQueue\nfrom .utils import log, logerr\n\n\nclass CioQueue(QuasarQueue):\n\n def __init__(self):\n self.amqp_uri = os.environ.get('AMQP_URI')\n self.blink_queue = os.environ.get('BLINK_QUEUE')\n self.blink_exchange = os.environ.get('BLINK_EXCHANGE')\n super().__init__(self.amqp_uri, self.blink_queue,\n self.blink_exchange)\n self.db = Database()\n\n # Save customer sub data and dates.\n def _add_sub_event(self, data):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp'],\n 'event_type': data['event_type']\n }\n query = ''.join((\"INSERT INTO cio.customer_event_scratch \"\n \"(email_id, customer_id, email_address, \"\n \"event_id, timestamp, \"\n \"event_type) VALUES (:email_id,\"\n \":customer_id,:email_address,:event_id,\"\n \"to_timestamp(:timestamp),:event_type)\"))\n self.db.query_str(query, record)\n return data['event_id']\n\n # Save customer unsub data and dates.\n def _add_unsub_event(self, data):\n if pydash.get(data, 'template_id'):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'template_id': data['data']['template_id'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp'],\n 'event_type': data['event_type']\n }\n query = ''.join((\"INSERT INTO cio.customer_event_scratch \"\n \"(email_id, customer_id,\"\n \"email_address, template_id, event_id,\"\n \"timestamp, event_type) \"\n \"VALUES (:email_id,:customer_id,\"\n \":email_address,:template_id,:event_id,\"\n \"to_timestamp(:timestamp),:event_type)\"))\n self.db.query_str(query, record)\n else:\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp'],\n 'event_type': data['event_type']\n }\n query = ''.join((\"INSERT INTO cio.customer_event_scratch \"\n \"(email_id, customer_id,\"\n \"email_address, event_id, \"\n \"timestamp, event_type) \"\n \"VALUES (:email_id,:customer_id,\"\n \":email_address,:event_id,\"\n \"to_timestamp(:timestamp),:event_type)\"))\n self.db.query_str(query, record)\n log(''.join((\"Added customer event from \"\n \"C.IO event id {}.\")).format(data['event_id']))\n\n # Save email event data and dates, e.g. email_click.\n def _add_email_event(self, data):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'template_id': data['data']['template_id'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp'],\n 'event_type': data['event_type']\n }\n query = ''.join((\"INSERT INTO cio.email_event_scratch \"\n \"(email_id, customer_id, email_address, \"\n \"template_id, event_id, timestamp, \"\n \"event_type) VALUES \"\n \"(:email_id,:customer_id,:email_address,\"\n \":template_id,:event_id,\"\n \"to_timestamp(:timestamp),:event_type)\"))\n self.db.query_str(query, record)\n log(''.join((\"Added email event from \"\n \"C.IO event id {}.\")).format(data['event_id']))\n\n # Save email sent event.\n def _add_email_sent_event(self, data):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'template_id': data['data']['template_id'],\n 'subject': data['data']['subject'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp']\n }\n query = ''.join((\"INSERT INTO cio.email_sent_scratch \"\n \"(email_id, customer_id, email_address, \"\n \"template_id, subject, event_id, \"\n \"timestamp) VALUES \"\n \"(:email_id,:customer_id,:email_address,\"\n \":template_id,:subject,:event_id,\"\n \"to_timestamp(:timestamp))\"))\n self.db.query_str(query, record)\n log(''.join((\"Added email event from \"\n \"C.IO event id {}.\")).format(data['event_id']))\n\n # Save email event data and dates, e.g. email_click.\n def _add_email_click_event(self, data):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'template_id': data['data']['template_id'],\n 'subject': data['data']['subject'],\n 'href': data['data']['href'],\n 'link_id': data['data']['link_id'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp'],\n 'event_type': data['event_type']\n }\n query = ''.join((\"INSERT INTO cio.email_event_scratch \"\n \"(email_id, customer_id, email_address, \"\n \"template_id, subject, href, link_id, \"\n \"event_id, timestamp, \"\n \"event_type) VALUES \"\n \"(:email_id,:customer_id,:email_address,\"\n \":template_id,:subject,:href,:link_id,\"\n \":event_id,to_timestamp(:timestamp),\"\n \":event_type)\"))\n self.db.query_str(query, record)\n log(''.join((\"Added email event from \"\n \"C.IO event id {}.\")).format(data['event_id']))\n\n # Save email bounced event.\n def _add_email_bounced_event(self, data):\n record = {\n 'email_id': data['data']['email_id'],\n 'customer_id': data['data']['customer_id'],\n 'email_address': data['data']['email_address'],\n 'template_id': data['data']['template_id'],\n 'subject': data['data']['subject'],\n 'event_id': data['event_id'],\n 'timestamp': data['timestamp']\n }\n query = ''.join((\"INSERT INTO cio.email_bounced_scratch \"\n \"(email_id, customer_id, email_address, \"\n \"template_id, subject, event_id, \"\n \"timestamp) VALUES \"\n \"(:email_id,:customer_id,:email_address,\"\n \":template_id,:subject,:event_id,\"\n \"to_timestamp(:timestamp))\"))\n self.db.query_str(query, record)\n log(''.join((\"Added email bounced event from \"\n \"C.IO event id {}.\")).format(data['event_id']))\n\n def process_message(self, message_data):\n if pydash.get(message_data, 'data.meta.message_source') == 'rogue':\n message_id = pydash.get(message_data, 'data.data.id')\n log(\"Ack'ing Rogue message id {}\".format(message_id))\n else:\n data = message_data['data']\n event_type = pydash.get(data, 'event_type')\n # Set for checking email event types.\n email_event = {\n 'email_bounced',\n 'email_converted',\n 'email_opened',\n 'email_unsubscribed'\n }\n try:\n if event_type == 'customer_subscribed':\n self._add_sub_event(data)\n elif event_type == 'customer_unsubscribed':\n self._add_unsub_event(data)\n elif event_type == 'email_clicked':\n self._add_email_click_event(data)\n elif event_type == 'email_sent':\n self._add_email_sent_event(data)\n elif event_type == 'email_bounced':\n self._add_email_bounced_event(data)\n elif event_type in email_event:\n self._add_email_event(data)\n else:\n pass\n except KeyError as e:\n logerr(\"C.IO message missing {}\".format(e))\n except:\n logerr(\"Something went wrong with C.IO consumer!\")\n sys.exit(1)\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.642241358757019, "avg_line_length": 17.91666603088379, "blob_id": "f9b978700b0a88ae411afda71ca9b76dc926280d", "content_id": "a15efd21afc5d10058a9623b9330cf27b1ecf158", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 232, "license_type": "permissive", "max_line_length": 63, "num_lines": 12, "path": "/docs/compiled/ds_dbt/schema_test/unique_snowplow_raw_events_event_source.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom (\n\n select\n event_source\n\n from \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\n where event_source is not null\n group by event_source\n having count(*) > 1\n\n) validation_errors\n\n" }, { "alpha_fraction": 0.7797619104385376, "alphanum_fraction": 0.7797619104385376, "avg_line_length": 55.16666793823242, "blob_id": "931bff7434fc790cb3db6ce6671d3a0f26974763", "content_id": "c54fb0351efc962271557c6fe9dea15645c23d72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 336, "license_type": "permissive", "max_line_length": 95, "num_lines": 6, "path": "/docs/compiled/ds_dbt/news_subscription/email_subscription_topics_raw.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT DISTINCT\n\t_id as northstar_id,\n\t(audit #>> '{email_subscription_topics,updated_at,date}')::timestamp AS newsletter_updated_at,\n\tjson_array_elements(u.email_subscription_topics)::TEXT AS newsletter_topic\nFROM northstar_ft_userapi.northstar_users_snapshot u\nWHERE audit #>> '{email_subscription_topics,updated_at,date}' IS NOT NULL" }, { "alpha_fraction": 0.7314814925193787, "alphanum_fraction": 0.7314814925193787, "avg_line_length": 33.33333206176758, "blob_id": "b1b614329e1caaea56b617c01b1536fb8ce1494b", "content_id": "5c8a43583538f00027857902824223ba8c7ef9d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 108, "license_type": "permissive", "max_line_length": 63, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_phoenix_events_combined_event_id.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"phoenix_events_combined\"\nwhere event_id is null\n\n" }, { "alpha_fraction": 0.7431192398071289, "alphanum_fraction": 0.7431192398071289, "avg_line_length": 33.66666793823242, "blob_id": "18fef856be2ab24a9c739b9d25d263fbd2817920", "content_id": "af3dc05adc436822463b4bc3e8ecc20c44d8e120", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 109, "license_type": "permissive", "max_line_length": 57, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_sessions_ending_datetime.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_sessions\"\nwhere ending_datetime is null\n\n" }, { "alpha_fraction": 0.6045589447021484, "alphanum_fraction": 0.6045589447021484, "avg_line_length": 18.764705657958984, "blob_id": "bf7925003f7183f99024b14e9f6fcafd47d2bc78", "content_id": "3a5b3d75b32f5a794196495aa9ea890dfd29c192", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1009, "license_type": "permissive", "max_line_length": 42, "num_lines": 51, "path": "/quasar/dbt/models/phoenix_events/phoenix_events_combined.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n p.event_id,\n p.event_datetime,\n p.event_name,\n p.event_source,\n p.\"path\",\n p.\"host\",\n NULL AS query_parameters,\n NULL AS clicked_link_url,\n p.page_utm_source,\n p.page_utm_medium,\n p.page_utm_campaign,\n p.referrer_host,\n p.referrer_path,\n p.referrer_source,\n p.campaign_id,\n p.campaign_name,\n p.modal_type,\n NULL AS search_query,\n p.session_id,\n p.browser_size,\n p.northstar_id,\n p.device_id\nFROM\n public.puck_phoenix_events p\nUNION ALL\nSELECT\n s.event_id,\n s.event_datetime,\n s.event_name,\n s.event_source,\n s.\"path\",\n s.\"host\",\n s.query_parameters,\n s.clicked_link_url,\n s.page_utm_source,\n s.page_utm_medium,\n s.page_utm_campaign,\n s.referrer_host,\n s.referrer_path,\n s.referrer_source,\n s.campaign_id,\n s.campaign_name,\n s.modal_type,\n s.search_query,\n s.session_id,\n s.browser_size,\n s.northstar_id,\n s.device_id\nFROM\n {{ ref('snowplow_phoenix_events') }} s\n\n" }, { "alpha_fraction": 0.7542277574539185, "alphanum_fraction": 0.7542277574539185, "avg_line_length": 58, "blob_id": "3626a5df0f7c5e5c67311340739976c7a3d191dd", "content_id": "9468098badd3fc72f2ee6abc66eaae2bc2e64c48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 887, "license_type": "permissive", "max_line_length": 289, "num_lines": 15, "path": "/quasar/dbt/models/gambit_messages/schema.md", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n{% docs agent_id %}\nIf set, the content type of the picture the member is sending us. Exp. image/png.\n{% enddocs %}\n\n{% docs attachment_content_type %}\nIf set, the handle of the Front agent this outbound support message is from.\n{% enddocs %}\n\n{% docs topic %}\nHolds a reference to the **last** campaign topic the member's conversation was in. This is useful to allow members to talk to Gambit and get quick responses (through Rivescript), without Gambit completely forgetting what state the member's interaction with a campaign topic was in.\n{% enddocs %}\n\n{% docs campaign_id %}\nIf set, holds a reference to the **last** campaign topic the member's conversation was in. This is useful to allow members to talk to Gambit and get quick responses (through Rivescript), without Gambit completely forgetting what state the member's interaction with a campaign topic was in.\n{% enddocs %}\n" }, { "alpha_fraction": 0.655339777469635, "alphanum_fraction": 0.655339777469635, "avg_line_length": 19.600000381469727, "blob_id": "31e783ee6a339d868522bbb9d8dd64cdc218e25e", "content_id": "013322a6558788866d6825ac1ee6addd113a1d4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "permissive", "max_line_length": 72, "num_lines": 20, "path": "/quasar/mel.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from .sql_utils import run_sql_file_raw, refresh_materialized_view\n\n\ndef create():\n run_sql_file_raw('./data/sql/derived-tables/mel.sql')\n\n\ndef create_for_dbt_validation():\n run_sql_file_raw('./data/sql/derived-tables/mel_dbt_validation.sql')\n\n\ndef refresh():\n refresh_materialized_view('public.member_event_log')\n\n\nif __name__ == \"__create__\":\n create()\n\nif __name__ == \"__refresh__\":\n refresh()\n" }, { "alpha_fraction": 0.7289156913757324, "alphanum_fraction": 0.7289156913757324, "avg_line_length": 28.294116973876953, "blob_id": "97d5ad6f2c0e4af5cf3d7abb341e1e20f968bae9", "content_id": "91ff55c99f6bd161bb3d8a379dfe757545a89e34", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 498, "license_type": "permissive", "max_line_length": 49, "num_lines": 17, "path": "/quasar/dbt/models/users_table/cio_latest_status.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n\tcio.customer_id,\n\tmax(CASE WHEN \n\t\t\tcio.event_type = 'customer_unsubscribed' \n\t\t\tTHEN 'customer_unsubscribed' \n\t\t\tELSE 'customer_subscribed' END) AS event_type,\n\tmax(cio.\"timestamp\") AS \"timestamp\"\nFROM cio.customer_event cio\nINNER JOIN \n\t(SELECT \n\t\tctemp.customer_id,\n\t\tmax(ctemp.\"timestamp\") AS max_update\n\tFROM cio.customer_event ctemp\n\tGROUP BY ctemp.customer_id) cio_max \n\t\tON cio_max.customer_id = cio.customer_id \n\t\tAND cio_max.max_update = cio.\"timestamp\"\nGROUP BY cio.customer_id\n" }, { "alpha_fraction": 0.746583878993988, "alphanum_fraction": 0.746583878993988, "avg_line_length": 52.66666793823242, "blob_id": "803eeceaa1af90fd26feb71257d90bb768e52d0e", "content_id": "5443ccd2e06897ee65eece5d3e0a97bf235a68b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 805, "license_type": "permissive", "max_line_length": 83, "num_lines": 15, "path": "/quasar/dbt/models/campaign_activity/turbovote.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT id AS post_id, \n\tdetails::jsonb->>'hostname' AS hostname,\n\tdetails::jsonb->>'referral_code' AS referral_code,\n\tdetails::jsonb->>'partner_comms_opt_in' AS partner_comms_opt_in,\n\t(details::jsonb->>'created-at')::timestamp AS created_at,\n\t(details::jsonb->>'updated-at')::timestamp AS updated_at,\n\tsource_details,\n\tdetails::jsonb->>'voter_registration_status' AS voter_registration_status,\n\tdetails::jsonb->>'voter_registration_source' AS voter_registration_source,\n\tdetails::jsonb->>'voter_registration_method' AS voter_registration_method,\n\tdetails::jsonb->>'voter_registration_preference' AS voter_registration_preference,\n\tdetails::jsonb->>'email_subscribed' AS email_subscribed,\n\tdetails::jsonb->>'sms_subscribed' AS sms_subscribed\nFROM {{ env_var('FT_ROGUE') }}.posts\nWHERE source = 'turbovote'\n" }, { "alpha_fraction": 0.7051994204521179, "alphanum_fraction": 0.7122665047645569, "avg_line_length": 35.0363655090332, "blob_id": "96c8610ad63ed4d2c251594e8a47c22134a20407", "content_id": "40babeb2696c96e42c5b89501bf15d005d91d313", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1981, "license_type": "permissive", "max_line_length": 110, "num_lines": 55, "path": "/docs/compiled/ds_dbt/users_table/users.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n\tu.id AS northstar_id,\n\tu.created_at,\n\tumax.max_last_auth AS last_logged_in,\n\tumax.max_last_access AS last_accessed,\n\tumax.max_last_message AS last_messaged_at,\n\tu.drupal_id AS drupal_uid,\n\tu.\"source\",\n\tu.email,\n\tu.facebook_id,\n\tu.mobile,\n\tCASE WHEN \n\t\tu.birthdate < '1900-01-01' OR \n\t\tu.birthdate > (date('now') - INTERVAL '10 years') \n\t\tTHEN NULL ELSE u.birthdate END AS birthdate,\n\tu.first_name,\n\tu.last_name,\n\tu.voter_registration_status,\n\tu.addr_street1 AS address_street_1,\n\tu.addr_street2 AS address_street_2,\n\tu.addr_city AS city,\n\tu.addr_state AS state,\n\tu.addr_zip AS zipcode,\n\tu.country,\n\tu.\"language\",\n\temail_status.event_type AS cio_status,\n\temail_status.\"timestamp\" AS cio_status_timestamp,\n\tu.sms_status,\n\tu.source_detail,\n\tsubstring(u.source_detail from '(?<=utm_medium\\:)(\\w*)') AS utm_medium,\n\tsubstring(u.source_detail from '(?<=utm_source\\:)(\\w*)') AS utm_source,\n\tsubstring(u.source_detail from '(?<=utm_campaign\\:)(\\w*)') AS utm_campaign,\n\t(u.feature_flags #>> '{badges}')::boolean as badges,\n\t(u.feature_flags #>> '{refer-friends}')::boolean as refer_friends,\n\tCASE WHEN \n\t\tu.sms_status in ('active','less','pending') OR \n\t\temail_status.event_type = 'customer_subscribed' \n\t\tTHEN TRUE ELSE FALSE END AS subscribed_member,\n\tumax.max_update AS last_updated_at\nFROM northstar.users u\nINNER JOIN \n\t(SELECT\n\t\tutemp.id,\n\t\tmax(utemp.updated_at) AS max_update,\n\t\tmax(utemp.last_accessed_at) AS max_last_access,\n\t\tmax(utemp.last_authenticated_at) AS max_last_auth,\n\t\tmax(utemp.last_messaged_at) AS max_last_message\n\tFROM northstar.users utemp\n\tGROUP BY utemp.id) umax ON umax.id = u.id AND umax.max_update = u.updated_at\nLEFT JOIN \"quasar_prod_warehouse\".\"public\".\"cio_latest_status\" email_status ON email_status.customer_id = u.id\nWHERE u.\"source\" IS DISTINCT FROM 'runscope'\nAND u.\"source\" IS DISTINCT FROM 'runscope-client'\nAND u.email IS DISTINCT FROM '[email protected]'\nAND u.email IS DISTINCT FROM '[email protected]'\nAND (u.email NOT ILIKE '%%@example.org%%' OR u.email IS NULL)" }, { "alpha_fraction": 0.7542372941970825, "alphanum_fraction": 0.7542372941970825, "avg_line_length": 36.66666793823242, "blob_id": "9cc2851e6420f55ec5c86c19fc382d03eb64264e", "content_id": "458a1e0a09078ed56e3bcf98dfba74ab497dacf1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 118, "license_type": "permissive", "max_line_length": 57, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_sessions_session_duration_seconds.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_sessions\"\nwhere session_duration_seconds is null\n\n" }, { "alpha_fraction": 0.48757636547088623, "alphanum_fraction": 0.48757636547088623, "avg_line_length": 35.64179229736328, "blob_id": "d27bab4b85d47883a23e251067b487857fab32bb", "content_id": "a24b0638b7d899812fedae33b22959cb3c05cf8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2455, "license_type": "permissive", "max_line_length": 79, "num_lines": 67, "path": "/quasar/ghost_killer.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from .database import Database\nimport time\n\ndb = Database()\n\n\ndef remove_ghost_posts():\n # Get list of all posts to remove.\n posts = db.query(''.join((\"SELECT DISTINCT p.id \"\n \"FROM rogue.signups s \"\n \"INNER JOIN (SELECT g.id \"\n \"FROM rogue.signups g WHERE \"\n \"g.why_participated = \"\n \"'why_participated_ghost_test') ghost \"\n \"ON s.id = ghost.id \"\n \"INNER JOIN rogue.posts p \"\n \"ON p.signup_id = s.id\")))\n\n # Copy all posts into ghost posts table to remove from official counts.\n for post in posts:\n db.query_str(''.join((\"INSERT INTO rogue.ghost_posts SELECT * FROM \"\n \"rogue.posts p WHERE p.id = %s\")),\n (post,))\n\n # Remove posts from posts table.\n for post in posts:\n db.query_str(''.join((\"DELETE FROM rogue.posts p WHERE \"\n \"p.id = %s\")),\n (post,))\n\n\ndef remove_ghost_signups():\n # Get list of all signups to remove.\n signups = db.query(''.join((\"SELECT DISTINCT su.id FROM \"\n \"rogue.signups su INNER JOIN \"\n \"(SELECT DISTINCT s.id FROM rogue.signups s \"\n \"WHERE s.why_participated = \"\n \"'why_participated_ghost_test') ghost_ids \"\n \"ON ghost_ids.id = su.id\")))\n\n # Copy all signups into ghost signups table to remove from official counts.\n for signup in signups:\n db.query_str(''.join((\"INSERT INTO rogue.ghost_signups SELECT * FROM \"\n \"rogue.signups s WHERE s.id = %s\")),\n (signup,))\n\n # Remove signups from signups table.\n for signup in signups:\n db.query_str(''.join((\"DELETE FROM rogue.signups s WHERE \"\n \"s.id = %s\")),\n (signup,))\n\n\ndef main():\n start_time = time.time()\n \"\"\"Keep track of start time of script.\"\"\"\n\n remove_ghost_posts()\n remove_ghost_signups()\n db.disconnect()\n end_time = time.time() # Record when script stopped running.\n duration = end_time - start_time # Total duration in seconds.\n print('duration: ', duration)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7413740754127502, "alphanum_fraction": 0.746870219707489, "avg_line_length": 33.11458206176758, "blob_id": "792ea8fc7f0a363cb29f720d05967b407190a4d7", "content_id": "bba96e9990448149755b0d7ebfefd924a05458e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6550, "license_type": "permissive", "max_line_length": 346, "num_lines": 192, "path": "/README.md", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "# Quasar\n\n## DoSomething.Org Data Platform\n\n### Extended Description \n\n* All Infrastructure Tools and Code\n* All ETL Code and Scripts\n* Data Warehousing Code\n* A Bright Light and Hope towards illuminating the dark corners of social injustice with the power of Data\n\n## Getting Started\nThese instructions will get you a copy of the project up and running on your local macOS machine for development and testing purposes. See deployment for notes on how to deploy the project on a live system.\n\n### Prerequisites\n\nThese instructions use [pipenv](https://docs.pipenv.org/en/latest/) to manage dependencies and virtual environments.\n\nSetup Homebrew and Python 3 via:\n```\nhttp://docs.python-guide.org/en/latest/starting/install3/osx/\n```\n\nInstall Pipenv via:\n```\nbrew install pipenv\n```\n\n### Installing\n\nInstall Python requirements:\n\n```\ncd $QUASAR_PROJECT_DIR\npipenv install\n```\n\n### Development\n\n#### Environment\n\nTo test changes, Pipenv provides a default virtual environment. Access it using:\n```\npipenv shell\n```\n\nYou can then test commands after running `make build`.\n\nTo exit the virtual environment, simple type:\n```\nexit\n```\n\n*Note*: Your environment variables aren't pulled into the virtual environment by default, so you may have to `source` any env files.\n\n#### PostgreSQL (Docker) - [Troubleshooting](/docs/postgresql-docker-troubleshooting.md)\n\nWe use Docker to pull a PostgreSQL image based on version/image tag.\n(Instructions here modified from [here](https://hackernoon.com/dont-install-postgres-docker-pull-postgres-bee20e200198).)\n\n* Install [Docker for Desktop](https://hub.docker.com/editions/community/docker-ce-desktop-mac) for MacOS.\n* Create a Docker Hub account if you don't already have one. \n* From your Terminal provide of choice type `docker login`. Authenticate with your Docker Hub account, keeping in mind your username for the CLI isn't your email address, it's your profile name, which you can find by going to `https://hub.docker.com`, and seeing your profile name in the upper right corner.\n* Setup a directory to make sure your Docker Postgres data is persisted: `mkdir -p $HOME/docker/volumes/postgres`\n* Add the following aliases (modify to taste) to your `~/.bash_profile` script and then run `source ~/.bash_profile`:\n\t* Specify image tag (check PostgreSQL version in this file, full image list [here](https://hub.docker.com/_/postgres/)): ```export QUASAR_PG_DOCKER_IMAGE=\"postgres:11\"```\n\t* Command to pull down images based on image tag: ```alias qu=\"docker pull $QUASAR_PG_DOCKER_IMAGE\"```\n\t* Command to start up Postgres container. Default username and database are `postgres`, and password, controlled by `POSTGRES_PASSWORD` is `postgres` in this case: ```alias qp=\"docker run --rm --name quasar-pg -e POSTGRES_PASSWORD=postgres -d -p 5432:5432 -v $HOME/docker/volumes/postgres:/var/lib/postgresql/data $QUASAR_PG_DOCKER_IMAGE\"```\n\t* Command to kill running image: ```alias qpk=\"docker kill quasar-pg\"```\n* Run `qu` to checkout the Postgres Docker image.\n* Run `qp` to run bring up Postgres Docker image.\n* You can kill the Docker image with `qpk`.\n\n#### DBT Profile\nYou need to setup a DBT profile file (defaut location is `~/.dbt/profile.yml`).\n\nAn example profile is provided [here](https://github.com/DoSomething/quasar/blob/master/docs/example-dbt-profile.yml), which has the doc block needed for `dbt docs generate`.\n\n## Usage\n\n```\ncd $QUASAR_PROJECT_DIR\npipenv install\nmake build\n```\n\nSee `setup.py` for list of entry-points. E.g.\n\nEntry points are how CLI commands are generated for python code. \nFor instance, instead of having to run `python cio_queue_process.py` and\nhave that python file contain all of the runtime code, you can provide\na preferred CLI command and link to an `entry point`, that has the command\nyou wish to run. For instance for\n```\n$ cio_import\n```\nThe entry point looks like this:\n```\ncio_import = quasar.cio_queue_process:main\n```\nIt follows the format:\n```\ncommand_to_run = dir_path.filename:code_to_run\n```\nMore info on Python setup.py file can be found here:\n```\nhttps://docs.python.org/3/distutils/setupscript.html\n```\n\n\n## Running the tests\n\nWe currently don't have any tests setup. :frowning:\n\n### End to end tests\n\n### Coding style tests\nMultiple options are available here, but usually we stick to PEP8 syntax checking. \nYou can set one up in your editor/IDE of choice.\nIf you like to stick to the CLI or run a manual check,\n`pycodestyle` is included as a Pipenv dev package, which can be installed via:\n```\npipenv install --dev\n```\n\nWe use [Stickler CI](https://stickler-ci.com/) for linting on PR's before merging to master.\n\n### Unit tests\n\n## Deployment\n\nWe currently deploy with Jenkins. The commands for the Jenkins deployment job \nbash shell, using Pipenv for install are:\n```\n#!/bin/bash -e\n\nsource ~/.profile\ngit checkout ${BRANCH} (for QA) OR git checkout master && git pull (for Prod)\npipenv install\nsource $(pipenv --venv)/bin/activate\nmake build\ndeactivate\n```\nWe have to use a somewhat hacky pipenv virtualenv activation here since `pipenv shell` only \nruns interactively and fails out in the Jenkins job.\n\nDeployments are automatic to QA once a branch is merged into `master`. Prod deployments\nare handled manually from the Jenkins job once QA testing is done.\n\n## Running Jenkins Jobs using Pipenv\n\nWe use Pipenv to manage Quasar code, _and_ run the our commands in Jenkins jobs. The details\nfor DBT vs non-DBT jobs are close, but with a crucial difference.\n\nTo setup a non-DBT jobs, here's the syntax:\n```\n#!/bin/bash -e\n\nsource ~/.profile\nsource ~/quasar-env.src\ncd /home/quasar/workspace/\"Deploy Branch\" (for QA) or cd /home/quasar/workspace/\"Deploy Master\" (for Prod)\npipenv run COMMAND ARGS\n```\n\nTo setup DBT jobs, you need to include the proper path to the DBT directory:\n```\n#!/bin/bash -e\n\nsource ~/.profile\nsource ~/quasar-env.src\ncd /home/quasar/workspace/\"Deploy Branch\"/quasar/dbt (for QA) or cd /home/quasar/workspace/\"Deploy Master\"/quasar/dbt (for Prod)\npipenv run dbt ARGS\n```\n\n## Built With\n\n[SPECIFICATIONS.md](SPECIFICATIONS.md)\n\n## Contributing\n\n[CONTRIBUTING.md](CONTRIBUTING.md) // add process notes? \n[Pull request template](PULL_REQUEST_TEMPLATE) \n[Issue template](issue_template.md) \n\n## Versioning\n\nWe use the [CalVer](https://calver.org/#youtube-dl) versioning release similar to what the youtube-dl project uses.\n\nFormat is: `YYYY.MM.MM.MINORVERSION`, e.g. `2019.01.01.00` for the first release in 2019.\n\n## License\nThis project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details.\n" }, { "alpha_fraction": 0.7615384459495544, "alphanum_fraction": 0.7615384459495544, "avg_line_length": 36.28571319580078, "blob_id": "ca6b3a08b4e155c0d8ee0b63001014c3ce468198", "content_id": "70bec866992b9db2e0aadd06a8bdd03441c6f8d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 260, "license_type": "permissive", "max_line_length": 83, "num_lines": 7, "path": "/docs/compiled/ds_dbt/campaign_info/campaign_info_international.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n\tc.id AS campaign_id,\n\tc.internal_title AS campaign_name,\n\ti.*\nFROM \"quasar_prod_warehouse\".\"public\".\"campaign_info_all\" i\nLEFT JOIN ft_dosomething_rogue.campaigns c ON i.campaign_run_id = c.campaign_run_id\nWHERE campaign_language IS DISTINCT FROM 'en'" }, { "alpha_fraction": 0.7339449524879456, "alphanum_fraction": 0.7339449524879456, "avg_line_length": 33.66666793823242, "blob_id": "eba9ad18edae856e23c878d3b8e69fda947b7e55", "content_id": "561b2368566abdfa7efe92213269cae9e23346b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 109, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_raw_events_referrer_host.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\nwhere referrer_host is null\n\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 23, "blob_id": "fc1cd87a545a2cae924375b2d088a14ec4abd124", "content_id": "199a63ba6a4b6461d568a3f00e90e2b579c66cc4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 48, "license_type": "permissive", "max_line_length": 38, "num_lines": 2, "path": "/quasar/dbt/models/post_actions/post_actions.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT *\nFROM {{ env_var('FT_ROGUE') }}.actions\n" }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 16.69230842590332, "blob_id": "abab3a3af499dfc0a2f7ab2a4009c0526b289830", "content_id": "15dc0eb92e09da068b407a1d9d790c623931154d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "permissive", "max_line_length": 68, "num_lines": 13, "path": "/quasar/create_post_actions.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "import os\n\nfrom .sql_utils import run_sql_file\n\ndata = {'ft_rogue_actions': os.getenv('FT_ROGUE_ACTIONS')}\n\n\ndef main():\n run_sql_file('./data/sql/derived-tables/post_actions.sql', data)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6654676198959351, "alphanum_fraction": 0.6654676198959351, "avg_line_length": 34.870967864990234, "blob_id": "d51d35db99e1679b1ce2d99635d20fb2e123095f", "content_id": "4a9db556071c91523ac15161e54d70244babe3b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1112, "license_type": "permissive", "max_line_length": 73, "num_lines": 31, "path": "/quasar/dbt/models/campaign_activity/signups.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n sd.northstar_id AS northstar_id,\n sd.id AS id,\n sd.campaign_id AS campaign_id,\n sd.campaign_run_id AS campaign_run_id,\n sd.why_participated AS why_participated,\n sd.\"source\" AS \"source\",\n sd.details,\n\tCASE WHEN sd.\"source\" = 'niche' THEN 'niche'\n\t WHEN sd.\"source\" ilike '%%sms%%' THEN 'sms'\n\t WHEN sd.\"source\" in ('rock-the-vote', 'turbovote') THEN 'voter-reg'\n\t ELSE 'web' END AS source_bucket,\n sd.created_at AS created_at,\n sd.source_details,\n CASE \n\t\tWHEN source_details ILIKE '%%\\}'\n\t\tTHEN (CAST(source_details as json) ->> 'utm_medium') \n\t\tELSE NULL END AS utm_medium,\n\tCASE \n\t\tWHEN source_details ILIKE '%%\\}'\n\t\tTHEN (CAST(source_details as json) ->> 'utm_source') \n\t\tELSE NULL END AS utm_source,\n\tCASE \n\t\tWHEN source_details ILIKE '%%\\}'\n\t\tTHEN (CAST(source_details as json) ->> 'utm_campaign') \n\t\tELSE NULL END AS utm_campaign\nFROM {{ env_var('FT_ROGUE') }}.signups sd\nWHERE sd._fivetran_deleted = 'false'\nAND sd.deleted_at IS NULL\nAND sd.\"source\" IS DISTINCT FROM 'rogue-oauth'\nAND sd.why_participated IS DISTINCT FROM 'Testing from Ghost Inspector!'\n" }, { "alpha_fraction": 0.531682014465332, "alphanum_fraction": 0.5385944843292236, "avg_line_length": 41.34146499633789, "blob_id": "f49ac85a4b93497a5728ba641403f5e91807af49", "content_id": "bb06a4c2e51c0dd2d6bbb6ca71abdd0ab00afcd2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1736, "license_type": "permissive", "max_line_length": 77, "num_lines": 41, "path": "/quasar/northstar_scraper.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from oauthlib.oauth2 import BackendApplicationClient\nimport os\nfrom requests_oauthlib import OAuth2Session\nfrom .scraper import Scraper\n\n\nclass NorthstarScraper(Scraper):\n\n def __init__(self, url):\n Scraper.__init__(self, url, params={\n 'limit': 100, 'pagination': 'cursor',\n 'include': ''.join((\"last_name,email,mobile,\"\n \"birthdate,addr_street1,\"\n \"addr_street2,age,school_id\"))})\n self.auth_headers = self.fetch_auth_headers()\n\n def fetch_auth_headers(self):\n oauth = OAuth2Session(client=BackendApplicationClient(\n client_id=os.environ.get('NS_CLIENT_ID')))\n scopes = ['admin', 'user']\n ns_client_id = os.environ.get('NS_CLIENT_ID')\n ns_client_secret = os.environ.get('NS_CLIENT_SECRET')\n new_token = oauth.fetch_token(self.url + '/v2/auth/token',\n client_id=ns_client_id,\n client_secret=ns_client_secret,\n scope=scopes)\n return {'Authorization': 'Bearer ' + str(new_token['access_token'])}\n\n def authenticated(func):\n def _authenticated(self, *args, **kwargs):\n response = func(self, *args, **kwargs)\n if response.status_code == 401:\n self.auth_headers = self.fetch_auth_headers()\n response = func(self, *args, **kwargs)\n return response\n return _authenticated\n\n @authenticated\n def get(self, path, query_params=''):\n return super().get(path, headers=self.auth_headers,\n params=query_params)\n" }, { "alpha_fraction": 0.7270511984825134, "alphanum_fraction": 0.7384240627288818, "avg_line_length": 46.346153259277344, "blob_id": "7a6ae04bc876c55a988349bbe9456cfeebbf8390", "content_id": "936402f839acead9809298f27dcbde31ce8baeb1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1231, "license_type": "permissive", "max_line_length": 138, "num_lines": 26, "path": "/provision.sh", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "export DEBIAN_FRONTEND=noninteractive\n\necho 'deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main' >> /etc/apt/sources.list.d/pgdg.list\nwget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -\nsudo apt-get -y update\n\n# Install Basics: Utilities and some Python dev tools\nsudo apt-get -y install build-essential git vim curl wget unzip postgresql-10\n\n# listen for localhost connections\nPOSTGRE_VERSION=10\nsudo sed -i \"s/#listen_addresses = 'localhost'/listen_addresses = '*'/g\" /etc/postgresql/$POSTGRE_VERSION/main/postgresql.conf\n\n# identify users via \"md5\", rather than \"ident\", allowing us to make postgres\n# users separate from system users. \"md5\" lets us simply use a password\necho \"host all all 0.0.0.0/0 md5\" | sudo tee -a /etc/postgresql/$POSTGRE_VERSION/main/pg_hba.conf\nsudo service postgresql start\n\n# create new user \"root\" with defined password \"root\" not a superuser\nPASSWORD=password\nsudo -u postgres psql -c \"CREATE ROLE root LOGIN ENCRYPTED PASSWORD '$PASSWORD' SUPERUSER CREATEDB CREATEROLE INHERIT;\"\n\n# create new database \"database\"\nsudo -u postgres psql -c \"CREATE DATABASE database\"\n\nsudo service postgresql restart\n" }, { "alpha_fraction": 0.629305899143219, "alphanum_fraction": 0.6339331865310669, "avg_line_length": 41.28260803222656, "blob_id": "4f84b81493da0d3cac933aa3cba0ca11cbe045d5", "content_id": "bb538f78744be156c20e8de2e7de76cbaae5af4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1945, "license_type": "permissive", "max_line_length": 103, "num_lines": 46, "path": "/setup.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\nfrom pipenv.project import Project\nfrom pipenv.utils import convert_deps_to_pip\n\npfile = Project(chdir=False).parsed_pipfile\nrequirements = convert_deps_to_pip(pfile['packages'], r=False)\n\nsetup(\n name=\"quasar\",\n version=\"2019.11.14.0\",\n packages=find_packages(),\n install_requires=requirements,\n entry_points={\n 'console_scripts': [\n 'bertly_refresh = quasar.bertly:refresh',\n 'campaign_info_recreate = quasar.campaign_info:create',\n 'campaign_info_refresh = quasar.campaign_info:refresh',\n 'cio_consume = quasar.cio_consumer:main',\n 'cio_import = quasar.cio_import_scratch_records:cio_import',\n 'cio_bounced_backfill = quasar.cio_bounced_backfill:main',\n 'cio_sent_backfill = quasar.cio_sent_backfill:main',\n 'etl_monitoring = quasar.etl_monitoring:run_monitoring',\n 'gdpr = quasar.gdpr_comply:gdpr_from_file',\n 'mel_create = quasar.mel:create',\n 'mel_create_for_dbt_validation = quasar.mel:create_for_dbt_validation',\n 'mel_refresh = quasar.mel:refresh',\n 'northstar_backfill = quasar.northstar_to_user_table:backfill',\n 'northstar_full_backfill = quasar.northstar_to_user_table_full_backfill:backfill',\n 'post_actions_create = quasar.create_post_actions:main',\n 'rogue_ghost_killer = quasar.ghost_killer:main',\n 'users_create = quasar.users:create',\n 'users_refresh = quasar.users:refresh',\n 'user_activity_create = quasar.user_activity:create',\n 'user_activity_create_for_dbt_validation = quasar.user_activity:create_for_dbt_validation',\n 'user_activity_refresh = quasar.user_activity:refresh'\n ],\n },\n author=\"\",\n author_email=\"\",\n description=\"\",\n license=\"MIT\",\n keywords=[],\n url=\"\",\n classifiers=[\n ],\n)\n" }, { "alpha_fraction": 0.7339449524879456, "alphanum_fraction": 0.7339449524879456, "avg_line_length": 33.66666793823242, "blob_id": "636bb776b049bdf9c0be7a38ff172c9266a824bd", "content_id": "60f882594f8a4331df23507195aaa90d4deabc6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 109, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_raw_events_referrer_path.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\nwhere referrer_path is null\n\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6294642686843872, "avg_line_length": 17.25, "blob_id": "cf86e65ae9457736d1c35ea6cc807e11b52ebcdc", "content_id": "2b5626ae79ac984e7fd533279feccc5969712ba3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 224, "license_type": "permissive", "max_line_length": 67, "num_lines": 12, "path": "/docs/compiled/ds_dbt/schema_test/unique_phoenix_events_combined_event_id.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom (\n\n select\n event_id\n\n from \"quasar_prod_warehouse\".\"public\".\"phoenix_events_combined\"\n where event_id is not null\n group by event_id\n having count(*) > 1\n\n) validation_errors\n\n" }, { "alpha_fraction": 0.6137787103652954, "alphanum_fraction": 0.6137787103652954, "avg_line_length": 33.21428680419922, "blob_id": "7c361a35df9af55566b68554f118663daae764ba", "content_id": "7c8780c330c58143cd34235920bd7bb1e9d22a62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "permissive", "max_line_length": 70, "num_lines": 56, "path": "/quasar/cio_import_scratch_records.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from .sa_database import Database\nfrom .utils import Duration, log\n\ndb = Database()\nduration = Duration()\n\n\ndef import_records_event(table):\n # Import records from cio staging tables that are populated\n # by the cio consumer into primary queried tables that have\n # event_type based primary key.\n scratch = table + '_scratch'\n query = ''.join((\"INSERT INTO {} SELECT * FROM {} \"\n \"ON CONFLICT (email_id, customer_id, timestamp, \"\n \"event_type) DO NOTHING\"\n \"\")).format(table, scratch)\n db.query(query)\n\n\ndef import_records(table):\n # Import records from cio staging tables that are populated\n # by the cio consumer into primary queried tables.\n scratch = table + '_scratch'\n query = ''.join((\"INSERT INTO {} SELECT * FROM {} \"\n \"ON CONFLICT (email_id, customer_id, timestamp) \"\n \"DO NOTHING\"\n \"\")).format(table, scratch)\n db.query(query)\n\n\ndef truncate_scratch(table):\n # Truncate staging tables so consumer can resume updating\n # tables after ingestion.\n scratch = table + '_scratch'\n query = \"TRUNCATE TABLE {}\".format(scratch)\n db.query(query)\n\n\ndef cio_import():\n # List of cio tables to process.\n tables = ['cio.email_sent', 'cio.email_bounced']\n event_tables = ['cio.customer_event', 'cio.email_event']\n for table in tables:\n log(\"Importing records for table {}.\".format(table))\n import_records(table)\n scratch = table + '_scratch'\n log(\"Truncating table {}.\".format(scratch))\n truncate_scratch(table)\n for table in event_tables:\n log(\"Importing records for table {}.\".format(table))\n import_records_event(table)\n scratch = table + '_scratch'\n log(\"Truncating table {}.\".format(scratch))\n truncate_scratch(table)\n db.disconnect()\n duration.duration()\n" }, { "alpha_fraction": 0.7435897588729858, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 36.33333206176758, "blob_id": "0c78e2d26a861804ecb34e3dd09aeb73ce25bc39", "content_id": "b86a0af0eadd6f0c32f9666794b5a9b216d19cc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 117, "license_type": "permissive", "max_line_length": 65, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_phoenix_sessions_combined_num_pages_views.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"phoenix_sessions_combined\"\nwhere num_pages_views is null\n\n" }, { "alpha_fraction": 0.5471349358558655, "alphanum_fraction": 0.5730129480361938, "avg_line_length": 14.45714282989502, "blob_id": "129e46cb0b50c917dc6cd00275a70061a5a8788f", "content_id": "6c39f92eae4c62f3aeed05c7b2a18618e09a6f98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 541, "license_type": "permissive", "max_line_length": 32, "num_lines": 35, "path": "/Pipfile", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "[[source]]\nname = \"pypi\"\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\n\n[dev-packages]\npycodestyle = \"*\"\npylint = \"*\"\n\n[packages]\nfixtures = \"*\"\nrequests = \"*\"\nlxml = \"*\"\ngnureadline = \"*\"\npsycopg2-binary = \"*\"\nawscli = \"*\"\noauthlib = \"*\"\nrequests-oauthlib = \"*\"\npika = \">=0.13.0,<1.0.0\"\naiofiles = \"*\"\naiohttp = \"*\"\nboto3 = \"*\"\npandas = \"*\"\nslackclient = \"*\"\nnumpy = \"*\"\nxmltodict = \"*\"\npydash = \"*\"\npython-dateutil = \"==2.6.1\"\nSQLAlchemy = \"*\"\ndbt = \"*\"\npipenv = \"*\"\nsnowflake-connector-python = \"*\"\n\n[requires]\npython_version = \"3.7\"\n" }, { "alpha_fraction": 0.7363636493682861, "alphanum_fraction": 0.7363636493682861, "avg_line_length": 34, "blob_id": "32fb26475f363f6686c1fcacb4882eeb8eea5f31", "content_id": "3870f5773ac09e839b45b4cc67f06aad8d561ee2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 110, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_raw_events_event_datetime.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\nwhere event_datetime is null\n\n" }, { "alpha_fraction": 0.7530674934387207, "alphanum_fraction": 0.7530674934387207, "avg_line_length": 33.3684196472168, "blob_id": "69476698809e35bb7c5018a3703ebfb7f9a851ea", "content_id": "94339888b4ab84f2187fc040a2c399087b222a62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 652, "license_type": "permissive", "max_line_length": 105, "num_lines": 19, "path": "/docs/compiled/ds_dbt/campaign_info/campaign_info.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n\tc.id AS campaign_id,\n\tc.campaign_run_id,\n\tc.internal_title AS campaign_name,\n\tc.cause AS campaign_cause,\n\tc.start_date AS campaign_run_start_date,\n\tc.end_date AS campaign_run_end_date,\n\tc.created_at AS campaign_created_date,\n\tCOALESCE(i.campaign_node_id, c.id) AS campaign_node_id,\n\ti.campaign_node_id_title,\n\ti.campaign_run_id_title,\n\ti.campaign_action_type,\n\ti.campaign_cause_type,\n\ti.campaign_noun,\n\ti.campaign_verb,\n\ti.campaign_cta\nFROM ft_dosomething_rogue.campaigns c\nLEFT JOIN \"quasar_prod_warehouse\".\"public\".\"campaign_info_all\" i ON i.campaign_run_id = c.campaign_run_id\nWHERE i.campaign_language = 'en' OR i.campaign_language IS NULL" }, { "alpha_fraction": 0.5212429761886597, "alphanum_fraction": 0.5237008333206177, "avg_line_length": 31.735631942749023, "blob_id": "035859e0bd6cb9165d363fd0d022e0879b8a956b", "content_id": "2ec1e7f16fa01b17c44e131e3f63d746df1f9afd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5696, "license_type": "permissive", "max_line_length": 78, "num_lines": 174, "path": "/quasar/database.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport psycopg2\n\nfrom .utils import QuasarException, logerr\n\n# Psycopg2 vars.\nopts = {\n 'user': os.environ.get('PG_USER'),\n 'host': os.environ.get('PG_HOST'),\n 'port': os.environ.get('PG_PORT'),\n 'password': os.environ.get('PG_PASSWORD'),\n 'database': os.environ.get('PG_DATABASE'),\n 'sslmode': os.environ.get('PG_SSL')\n}\n\n\ndef _connect(opts):\n conn = None\n try:\n conn = psycopg2.connect(**opts)\n except psycopg2.InterfaceError as e:\n raise QuasarException(e)\n finally:\n return conn\n\n\nclass Database:\n\n def __init__(self, options={}):\n opts.update(options)\n self.connect()\n\n def connect(self):\n self.connection = _connect(opts)\n if self.connection is None:\n print(\"Error, couldn't connect to database with options:\", opts)\n else:\n self.cursor = self.connection.cursor()\n\n def disconnect(self):\n self.cursor.close()\n self.connection.close()\n return self.connection\n\n def roll_reconnect(self):\n self.connection.rollback()\n self.disconnect()\n self.connect()\n\n def query(self, query):\n \"\"\"Parse and run DB query.\n\n Return On error, raise exception and log why.\n \"\"\"\n try:\n self.cursor.execute(query)\n self.connection.commit()\n try:\n results = self.cursor.fetchall()\n return results\n except psycopg2.ProgrammingError:\n results = {}\n return results\n except psycopg2.DatabaseError as e:\n print(self.cursor.query)\n raise QuasarException(e)\n\n def query_str(self, query, string):\n \"\"\"Parse and run DB query.\n\n Return On error, raise exception and log why.\n \"\"\"\n try:\n self.cursor.execute(query, string)\n self.connection.commit()\n try:\n results = self.cursor.fetchall()\n return results\n except psycopg2.ProgrammingError:\n results = {}\n return results\n except psycopg2.DatabaseError as e:\n print(self.cursor.query)\n raise QuasarException(e)\n\n def query_str_rogue(self, query, string, record,\n event_id=None):\n \"\"\"Parse and run DB query, on failure backup data.\n\n On query failure, assuming a single column table with data type jsonb,\n with column name \"record\", backup entire JSON record.\n\n Optional event_id for logging provided.\n \"\"\"\n try:\n self.cursor.execute(query, string)\n self.connection.commit()\n try:\n results = self.cursor.fetchall()\n return results\n except psycopg2.ProgrammingError:\n results = {}\n return results\n except psycopg2.DatabaseError:\n logerr(\"The query: {} FAILED!\".format(self.cursor.query))\n self.disconnect()\n self.connect()\n logerr(\"Backing up message {}.\".format(event_id))\n self.cursor.execute(''.join((\"INSERT INTO \"\n \"rogue.error_message VALUES (%s)\")),\n (json.dumps(record),))\n\n\nclass NorthstarDatabase(Database):\n\n def __init__(self, options={}):\n super().__init__(options)\n\n def query(self, query, record):\n \"\"\"Parse and run DB query.\n\n Return On error, raise exception and log why.\n \"\"\"\n try:\n self.cursor.execute(query)\n self.connection.commit()\n try:\n results = self.cursor.fetchall()\n return results\n except psycopg2.ProgrammingError:\n results = {}\n return results\n except psycopg2.DatabaseError:\n print(self.cursor.query)\n self.connection = _connect(opts)\n if self.connection is None:\n print(\"Error, couldn't connect to database with opts:\", opts)\n else:\n self.cursor = self.connection.cursor()\n self.cursor.execute(''.join((\"INSERT INTO \"\n \"northstar.unprocessed_users \"\n \"(northstar_record) VALUES \"\n \"(%s)\")), (json.dumps(record),))\n self.connection.commit()\n print(\"ID {} not processed. Backing up.\".format(record['id']))\n\n def query_str(self, query, string, record):\n \"\"\"Parse and run DB query.\n\n Return On error, raise exception and log why.\n \"\"\"\n try:\n self.cursor.execute(query, string)\n self.connection.commit()\n try:\n results = self.cursor.fetchall()\n return results\n except psycopg2.ProgrammingError:\n results = {}\n return results\n except psycopg2.DatabaseError:\n print(self.cursor.query)\n self.connection = _connect(opts)\n if self.connection is None:\n print(\"Error, couldn't connect to database with opts:\", opts)\n else:\n self.cursor = self.connection.cursor()\n self.cursor.execute(''.join((\"INSERT INTO \"\n \"northstar.unprocessed_users \"\n \"(northstar_record) VALUES \"\n \"(%s)\")), (json.dumps(record),))\n self.connection.commit()\n print(\"ID {} not processed. Backing up.\".format(record['id']))\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 20.5, "blob_id": "147e5b8bf984aea93f00d9b982258644637dd969", "content_id": "adad40fae922e973c93b004f925f91abedaf8af4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 42, "license_type": "permissive", "max_line_length": 33, "num_lines": 2, "path": "/docs/compiled/ds_dbt/post_actions/post_actions.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT *\nFROM ft_dosomething_rogue.actions" }, { "alpha_fraction": 0.7211538553237915, "alphanum_fraction": 0.7211538553237915, "avg_line_length": 32, "blob_id": "03f825b3f472f4b3a315f8a4f2df6c962ab203db", "content_id": "0237f6bcc0ec12b260272eecf41c9b85812850d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 104, "license_type": "permissive", "max_line_length": 59, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_snowplow_raw_events_event_id.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\nwhere event_id is null\n\n" }, { "alpha_fraction": 0.6684587597846985, "alphanum_fraction": 0.6684587597846985, "avg_line_length": 17.600000381469727, "blob_id": "62ddf6ace5035e80b24390523e04f0d81586cc41", "content_id": "50850bc2b1fa9cf6a2a2e066a68b2b3b2169aae7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 558, "license_type": "permissive", "max_line_length": 50, "num_lines": 30, "path": "/quasar/dbt/models/phoenix_events/snowplow_raw_events.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n\tb.event_id,\n\tb.event_source,\n\tb.event_datetime,\n\tb.event_name,\n\tb.event_type,\n\tb.\"host\",\n\tb.\"path\",\n b.query_parameters,\n\tb.se_category,\n\tb.se_action,\n\tb.se_label,\n\tb.session_id,\n\tb.session_counter,\n\tb.browser_size,\n\tb.northstar_id,\n\tb.device_id,\n\tb.referrer_host,\n\tb.referrer_path,\n\tb.referrer_source,\n\tp.utm_source,\n\tp.utm_medium,\n\tp.utm_campaign,\n\tp.url AS clicked_link_url,\n\tp.campaign_id,\n\tp.modal_type,\n\tp.search_query\n FROM {{ ref('snowplow_base_event') }} b\n LEFT JOIN {{ ref('snowplow_payload_event') }} p \n ON b.event_id = p.event_id\n" }, { "alpha_fraction": 0.6778115630149841, "alphanum_fraction": 0.68953537940979, "avg_line_length": 56.57500076293945, "blob_id": "6b109c0bd57403e4e3214358d552fcd8996edd3a", "content_id": "af5b2ec1b3076310575fc94850ce1288a534f0af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2303, "license_type": "permissive", "max_line_length": 100, "num_lines": 40, "path": "/quasar/dbt/models/campaign_info/campaign_info_all.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT c.field_campaigns_target_id as campaign_node_id,\n n2.title as campaign_node_id_title,\n c.entity_id as campaign_run_id,\n n1.title as campaign_run_id_title,\n fdfct.field_campaign_type_value as campaign_type,\n c.language as campaign_language,\n fdfrd.field_run_date_value as campaign_run_start_date,\n fdfrd.field_run_date_value2 as campaign_run_end_date,\n to_timestamp(n1.created) as campaign_created_date,\n fdfrn.field_reportback_noun_value as campaign_noun,\n fdfrv.field_reportback_verb_value as campaign_verb,\n array_to_string(array_agg(distinct ttd2.name), ', ') as campaign_cause_type,\n array_to_string(array_agg(distinct fdfcta.field_call_to_action_value), ', ') as campaign_cta,\n array_to_string(array_agg(distinct ttd1.name), ', ') as campaign_action_type \nFROM {{ env_var('FIELD_DATA_FIELD_CAMPAIGNS') }} c \nLEFT JOIN {{ env_var('NODE') }} n1 \n ON n1.nid = c.entity_id \nLEFT JOIN {{ env_var('NODE') }} n2 \n ON n2.nid = c.field_campaigns_target_id \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_CAMPAIGN_TYPE') }} fdfct \n ON c.field_campaigns_target_id = fdfct.entity_id \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_RUN_DATE') }} fdfrd \n ON c.entity_id = fdfrd.entity_id and c.language = fdfrd.language \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_CALL_TO_ACTION') }} fdfcta \n ON c.field_campaigns_target_id = fdfcta.entity_id and c.language = fdfcta.language \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_REPORTBACK_NOUN') }} fdfrn \n ON c.field_campaigns_target_id = fdfrn.entity_id and c.language = fdfrn.language \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_REPORTBACK_VERB') }} fdfrv \n ON c.field_campaigns_target_id = fdfrv.entity_id and c.language = fdfrv.language \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_ACTION_TYPE') }} fdfat \n ON fdfat.entity_id = c.field_campaigns_target_id \nLEFT JOIN {{ env_var('TAXONOMY_TERM_DATA') }} ttd1 \n ON fdfat.field_action_type_tid = ttd1.tid \nLEFT JOIN {{ env_var('FIELD_DATA_FIELD_CAUSE') }} fdfc \n ON fdfc.entity_id = c.field_campaigns_target_id \nLEFT JOIN {{ env_var('TAXONOMY_TERM_DATA') }} ttd2 \n ON fdfc.field_cause_tid = ttd2.tid \nWHERE c.bundle = 'campaign_run' \nGROUP BY 1,2,3,4,5,6,7,8,9,10,11 \nORDER BY c.field_campaigns_target_id, fdfrd.field_run_date_value\n" }, { "alpha_fraction": 0.6498536467552185, "alphanum_fraction": 0.6520915627479553, "avg_line_length": 37.72666549682617, "blob_id": "c5c4a4e211b314ffe8401644787403d1fe87c7ee", "content_id": "fe033d28be08ac68fa3b087143348ba0efeac869", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 5809, "license_type": "permissive", "max_line_length": 216, "num_lines": 150, "path": "/quasar/dbt/models/member_event_log/member_event_log.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n MD5(concat(a.northstar_id, a.\"timestamp\", a.action_id, a.action_serial_id)) AS event_id,\n a.northstar_id AS northstar_id,\n a.\"timestamp\" AS \"timestamp\",\n a.\"action\" AS action_type,\n a.action_id AS action_id,\n a.\"source\" AS \"source\",\n a.action_serial_id AS action_serial_id,\n a.channel AS channel,\n CASE \n \tWHEN date_trunc('month', a.\"timestamp\") = date_trunc('month', u.created_at) \n \tTHEN 'New' \n \tELSE 'Returning' END \n \tAS \"type\",\n MIN(\"timestamp\") \n \tOVER \n \t(PARTITION BY a.northstar_id, date_trunc('month', a.\"timestamp\")) \n \tAS first_action_month\nFROM ( \nSELECT\n DISTINCT s.northstar_id AS northstar_id,\n s.created_at AS \"timestamp\",\n 'signup' AS \"action\",\n '1' AS action_id, \n s.\"source\" AS \"source\",\n s.id::varchar AS action_serial_id,\n(CASE WHEN s.\"source\" ILIKE '%%sms%%' THEN 'sms'\nWHEN s.\"source\" NOT LIKE '%%sms%%'AND s.\"source\" NOT LIKE '%%email%%' AND s.\"source\" NOT LIKE '%%niche%%' OR s.\"source\" IN ('rock-the-vote', 'turbovote') THEN 'web'\nWHEN s.\"source\" ILIKE '%%email%%' THEN 'email'\nWHEN s.\"source\" ILIKE '%%niche%%' THEN 'niche_coregistration'\nWHEN s.\"source\" NOT LIKE '%%sms%%'AND s.\"source\" NOT LIKE '%%email%%' AND s.\"source\" NOT LIKE '%%niche%%' AND s.\"source\" NOT IN ('rock-the-vote', 'turbovote') AND s.\"source\" IS NOT NULL THEN 'other' END) AS \"channel\"\nFROM {{ ref('signups') }} s\nWHERE s.\"source\" IS DISTINCT FROM 'importer-client'\nAND s.\"source\" IS DISTINCT FROM 'rock-the-vote'\nAND s.\"source\" IS DISTINCT FROM 'turbovote'\nUNION ALL\nSELECT\n DISTINCT p.northstar_id AS northstar_id,\n p.created_at AS \"timestamp\",\n 'post' AS \"action\",\n '2' AS action_id,\n p.\"source\" AS \"source\",\n p.id::varchar AS action_serial_id,\n(CASE WHEN p.\"source\" ILIKE '%%sms%%' THEN 'sms'\nWHEN p.\"source\" ILIKE '%%phoenix%%' OR p.\"source\" IS NULL OR p.\"source\" ILIKE '%%turbovote%%' THEN 'web'\nWHEN p.\"source\" ILIKE '%%app%%' THEN 'mobile_app'\nWHEN p.\"source\" NOT LIKE '%%phoenix%%' AND p.\"source\" NOT LIKE '%%sms%%' AND p.\"source\" IS NOT NULL AND p.\"source\" NOT LIKE '%%app%%' AND p.\"source\" NOT LIKE '%%turbovote%%' THEN 'other' END) AS \"channel\"\nFROM {{ ref('posts') }} p\nWHERE p.status IN ('accepted', 'confirmed', 'register-OVR', 'register-form', 'pending')\nUNION ALL\nSELECT DISTINCT \n u_access.id AS northstar_id,\n u_access.last_accessed_at AS \"timestamp\",\n 'site_access' AS \"action\",\n '3' AS action_id,\n NULL AS \"source\",\n '0' AS action_serial_id,\n 'web' AS channel\nFROM northstar.users u_access\nWHERE u_access.last_accessed_at IS NOT NULL\nAND u_access.\"source\" IS DISTINCT FROM 'runscope'\nAND u_access.\"source\" IS DISTINCT FROM 'runscope-client'\nAND u_access.email IS DISTINCT FROM '[email protected]'\nAND u_access.email IS DISTINCT FROM '[email protected]'\nAND (u_access.email NOT ILIKE '%%@example.org%%' OR u_access.email IS NULL) \nUNION ALL\nSELECT DISTINCT \n u_login.id AS northstar_id,\n u_login.last_authenticated_at AS \"timestamp\",\n 'site_login' AS \"action\",\n '4' AS action_id,\n NULL AS \"source\",\n '0' AS action_serial_id,\n 'web' AS channel\nFROM northstar.users u_login\nWHERE u_login.last_authenticated_at IS NOT NULL \nAND u_login.\"source\" IS DISTINCT FROM 'runscope'\nAND u_login.\"source\" IS DISTINCT FROM 'runscope-client'\nAND u_login.email IS DISTINCT FROM '[email protected]'\nAND u_login.email IS DISTINCT FROM '[email protected]'\nAND (u_login.email NOT ILIKE '%%@example.org%%' OR u_login.email IS NULL) \nUNION ALL \nSELECT\n DISTINCT u.id AS northstar_id,\n u.created_at AS \"timestamp\",\n 'account_creation' AS action, \n '5' AS action_id,\n u.\"source\" AS \"source\",\n '0' AS action_serial_id, \n (CASE WHEN u.\"source\" ILIKE '%%sms%%' THEN 'sms'\n WHEN u.\"source\" ILIKE '%%phoenix%%' OR u.\"source\" IS NULL THEN 'web'\n WHEN u.\"source\" ILIKE '%%niche%%' THEN 'niche_coregistration'\n WHEN u.\"source\" NOT LIKE '%%niche%%' AND u.\"source\" NOT LIKE '%%sms%%' AND u.\"source\" NOT LIKE '%%phoenix%%' AND u.\"source\" IS NOT NULL THEN 'other' END) AS \"channel\"\nFROM\n (SELECT \n u_create.id,\n max(u_create.\"source\") AS \"source\",\n min(u_create.created_at) AS created_at\n FROM northstar.users u_create\nWHERE u_create.\"source\" IS DISTINCT FROM 'importer-client'\nAND u_create.\"source\" IS DISTINCT FROM 'runscope'\nAND u_create.\"source\" IS DISTINCT FROM 'runscope-client'\nAND u_create.email IS DISTINCT FROM '[email protected]'\nAND u_create.email IS DISTINCT FROM '[email protected]'\nAND (u_create.email NOT ILIKE '%%@example.org%%' OR u_create.email IS NULL) \n GROUP BY u_create.id) u\nUNION ALL \nSELECT\n DISTINCT g.user_id AS northstar_id,\n g.created_at AS \"timestamp\",\n 'messaged_gambit' AS \"action\", \n '6' AS action_id,\n 'SMS' AS \"source\",\n g.message_id AS action_serial_id,\n 'sms' AS \"channel\"\nFROM\n {{ ref('gambit_messages_inbound') }} g\nWHERE \n\tg.user_id IS NOT NULL\n\tAND g.macro <> 'subscriptionStatusStop' \nUNION ALL \n SELECT\n DISTINCT cio.customer_id AS northstar_id,\n cio.\"timestamp\" AS \"timestamp\",\n 'clicked_link' AS \"action\",\n '7' AS action_id,\n cio.template_id::CHARACTER AS \"source\",\n cio.event_id AS action_serial_id, \n 'email' AS \"channel\"\n FROM\n cio.email_event cio\n WHERE \n cio.event_type = 'email_clicked'\n AND cio.customer_id IS NOT NULL\nUNION ALL \nSELECT DISTINCT\n b.northstar_id AS northstar_id,\n b.click_time AS \"timestamp\",\n CONCAT('bertly_link_', b.interaction_type) AS \"action\",\n '10' AS action_id,\n 'bertly' AS \"source\",\n b.click_id AS action_serial_id,\n b.\"source\" AS \"channel\"\nFROM {{ ref('bertly_clicks') }} b\nINNER JOIN {{ ref('users') }} u\nON b.northstar_id = u.northstar_id\nWHERE b.northstar_id IS NOT NULL\nAND b.interaction_type IS DISTINCT FROM 'preview'\n) AS a\nLEFT JOIN {{ ref('users') }} u ON u.northstar_id = a.northstar_id\n" }, { "alpha_fraction": 0.698305070400238, "alphanum_fraction": 0.698305070400238, "avg_line_length": 31.11111068725586, "blob_id": "fc119ade7dd941b0c71cf27140d9bad34b60e386", "content_id": "ef97caf166976b26c548b682790da091ba3e419b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 295, "license_type": "permissive", "max_line_length": 89, "num_lines": 9, "path": "/docs/compiled/ds_dbt/schema_test/relationships_snowplow_raw_events_northstar_id__id__ref_users_.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\n\nselect count(*)\nfrom (\n select northstar_id as id from \"quasar_prod_warehouse\".\"public\".\"snowplow_raw_events\"\n) as child\nleft join (\n select id as id from \"quasar_prod_warehouse\".\"public\".\"users\"\n) as parent on parent.id = child.id\nwhere child.id is not null\n and parent.id is null\n\n" }, { "alpha_fraction": 0.6671949028968811, "alphanum_fraction": 0.6671949028968811, "avg_line_length": 24.200000762939453, "blob_id": "936aa54fb46adb5adbf6f891aff494a0bb69bb5f", "content_id": "a6ee79553d0da343357b041de463b647d534a75f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 631, "license_type": "permissive", "max_line_length": 86, "num_lines": 25, "path": "/quasar/dbt/models/phoenix_events/phoenix_sessions_combined.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n p.session_id,\n p.event_id,\n p.device_id,\n p.landing_datetime,\n p.end_datetime as ending_datetime,\n EXTRACT(EPOCH FROM (end_datetime - landing_datetime)) AS session_duration_seconds,\n NULL as num_pages_viewed,\n p.landing_page,\n NULL as exit_page,\n NULL as days_since_last_session\nFROM public.puck_phoenix_sessions p\nUNION ALL\nSELECT\n s.session_id,\n s.event_id,\n s.device_id,\n s.landing_datetime,\n s.ending_datetime,\n s.session_duration_seconds,\n s.num_pages_viewed,\n s.landing_page,\n s.exit_page,\n s.days_since_last_session\nFROM {{ ref('snowplow_sessions') }} s\n\n" }, { "alpha_fraction": 0.6412078142166138, "alphanum_fraction": 0.6447601914405823, "avg_line_length": 20.69230842590332, "blob_id": "4b621ab878b4da89da0077f0fcecca183f6372ba", "content_id": "570ffa24bb1c271fb0e991d4260c222c466b366c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 563, "license_type": "permissive", "max_line_length": 46, "num_lines": 26, "path": "/data/sql/misc/gdpr_northstar_removal.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "UPDATE :users\n\tSET birthdate = date_trunc('year', birthdate)\n\tWHERE id = ':nsid';\n\nUPDATE :users\n\tSET first_name = NULL,\n\t\tlast_name = NULL,\n\t\tlast_initial = NULL,\n\t\tphoto = NULL,\n\t\temail = NULL,\n\t\tmobile = NULL,\n\t\tfacebook_id = NULL,\n\t\tinterests = NULL,\n\t\taddr_street1 = NULL,\n\t\taddr_street2 = NULL,\n\t\taddr_source = NULL,\n\t\tslack_id = NULL,\n\t\tsms_status = NULL,\n\t\tsms_paused = NULL,\n\t\tdrupal_id = NULL,\n\t\t\"role\" = NULL,\n\t\tlast_accessed_at = NULL,\n\t\tlast_authenticated_at = NULL,\n\t\tlast_messaged_at = NULL,\n\t\temail_subscription_status = NULL \n\tWHERE id = ':nsid';" }, { "alpha_fraction": 0.6131078004837036, "alphanum_fraction": 0.6131078004837036, "avg_line_length": 21.5238094329834, "blob_id": "a1b23ef6986e1a0ec83cd9bc749ab8a092d9c26f", "content_id": "97b0b9b0f47dc3f3747847320d26d7b1e632e6ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "permissive", "max_line_length": 67, "num_lines": 21, "path": "/quasar/user_activity.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "from .sql_utils import run_sql_file_raw, refresh_materialized_view\n\n\ndef create():\n run_sql_file_raw('./data/sql/derived-tables/user_activity.sql')\n\n\ndef create_for_dbt_validation():\n run_sql_file_raw(''.join((\"./data/sql/derived-tables/\"\n \"user_activity_dbt_validation.sql\")))\n\n\ndef refresh():\n refresh_materialized_view('public.user_activity')\n\n\nif __name__ == \"__create__\":\n create()\n\nif __name__ == \"__refresh__\":\n refresh()\n" }, { "alpha_fraction": 0.6596082448959351, "alphanum_fraction": 0.6664901971817017, "avg_line_length": 26.77941131591797, "blob_id": "1cbe856638f39bde1d6df89562f3913f082cd258", "content_id": "ece791126357c07d686a822b2fde81e6c085a3ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1889, "license_type": "permissive", "max_line_length": 78, "num_lines": 68, "path": "/quasar/dbt/models/campaign_activity/posts.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n\tpd.northstar_id as northstar_id,\n\tpd.id AS id,\n\tpd.\"type\" AS \"type\",\n\ta.\"name\" AS \"action\",\n\tpd.status AS status,\n\tpd.quantity AS quantity,\n\tpd.campaign_id,\n\tCASE\n\t\tWHEN pd.id IS NULL THEN NULL\n\t\tWHEN a.\"name\" = 'voter-reg OTG'\n\t\tTHEN pd.quantity\n\t\tELSE 1 END AS reportback_volume,\n\tpd.\"source\" AS \"source\",\n\tCASE\n\t\tWHEN pd.\"source\" IS NULL THEN NULL\n\t\tWHEN pd.\"source\" ilike '%%sms%%' THEN 'sms'\n\t\tELSE 'web' END AS source_bucket,\n\tCASE\n\t\tWHEN pd.\"type\" = 'phone-call'\n\t\tTHEN (pd.details::json ->> 'call_timestamp')::timestamptz\n\t\tELSE COALESCE(rtv.created_at, tv.created_at, pd.created_at)\n\t\tEND AS created_at,\n\tpd.url AS url,\n\tpd.text,\n\tCASE\n\t\tWHEN s.\"source\" = 'importer-client'\n\t\tAND pd.\"type\" = 'share-social'\n\t\tAND pd.created_at < s.created_at\n\t\tTHEN -1\n\t\tELSE pd.signup_id END AS signup_id,\n\tCASE\n\t\tWHEN pd.id IS NULL\n\t\tTHEN NULL\n\t\tELSE CONCAT(pd.\"type\", ' - ', a.\"name\") END AS post_class,\n\tCASE WHEN pd.status IN ('accepted', 'pending')\n\t\tAND a.\"name\" NOT ILIKE '%%vote%%'\n\t\tTHEN 1\n\t\tWHEN pd.status IN ('accepted', 'confirmed', 'register-OVR', 'register-form')\n\t\tAND a.\"name\" ILIKE '%%vote%%'\n\t\tTHEN 1\n\t\tELSE NULL END AS is_accepted,\n\tpd.action_id,\n\tpd.location,\n\tpd.postal_code,\n\ta.reportback AS is_reportback,\n\ta.civic_action,\n\ta.scholarship_entry\nFROM {{ env_var('FT_ROGUE') }}.posts pd\nINNER JOIN {{ ref('signups') }} s\n\tON pd.signup_id = s.id\nLEFT JOIN {{ ref('turbovote') }} tv\n\tON tv.post_id::bigint = pd.id::bigint\nLEFT JOIN\n\t(SELECT\n\t\tDISTINCT r.*,\n\t\tCASE\n\t\t\tWHEN r.started_registration < '2017-01-01'\n\t\t\tTHEN r.started_registration + interval '4 year'\n\t\t\tELSE r.started_registration END AS created_at\n\t\tFROM {{ ref('rock_the_vote') }} r\n\t) rtv\n\tON rtv.post_id::bigint = pd.id::bigint\nLEFT JOIN {{ env_var('FT_ROGUE') }}.actions a\n\tON pd.action_id = a.id\nWHERE pd.deleted_at IS NULL\nAND pd.\"text\" IS DISTINCT FROM 'test runscope upload'\nAND a.\"name\" IS NOT NULL\n" }, { "alpha_fraction": 0.7456140518188477, "alphanum_fraction": 0.7456140518188477, "avg_line_length": 35.33333206176758, "blob_id": "63f2b61d2310585d068368264f8989bb4e4d0984", "content_id": "79bc8d5e5cee1792aa467098867117789263060e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 114, "license_type": "permissive", "max_line_length": 63, "num_lines": 3, "path": "/docs/compiled/ds_dbt/schema_test/not_null_phoenix_events_combined_event_datetime.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "\n\n\n\nselect count(*)\nfrom \"quasar_prod_warehouse\".\"public\".\"phoenix_events_combined\"\nwhere event_datetime is null\n\n" }, { "alpha_fraction": 0.7086614370346069, "alphanum_fraction": 0.711614191532135, "avg_line_length": 38.07692337036133, "blob_id": "7d561c2e0e4eb9b70ca9aacd811fb83ff9727351", "content_id": "5c5a43946acad903becc3a4d59c5bd6ab1c188da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1016, "license_type": "permissive", "max_line_length": 258, "num_lines": 26, "path": "/quasar/dbt/models/phoenix_events/snowplow_base_event.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n event_id AS event_id,\n app_id AS event_source,\n collector_tstamp AS event_datetime,\n se_property AS event_name,\n \"event\" AS event_type,\n page_urlhost AS host,\n page_urlpath AS \"path\",\n page_urlquery AS query_parameters,\n se_category,\n se_action,\n se_label,\n domain_sessionid AS session_id,\n domain_sessionidx AS session_counter,\n dvce_type AS browser_size,\n user_id AS northstar_id,\n domain_userid AS device_id,\n refr_urlhost AS referrer_host,\n refr_urlpath AS referrer_path,\n refr_source AS referrer_source\n FROM {{ env_var('FT_SNOWPLOW') }}.\"event\"\n WHERE event_id NOT IN\n (SELECT event_id\n FROM {{ env_var('FT_SNOWPLOW') }}.ua_parser_context u\n WHERE u.useragent_family SIMILAR TO\n '%%(bot|crawl|slurp|spider|archiv|spinn|sniff|seo|audit|survey|pingdom|worm|capture|(browser|screen)shots|analyz|index|thumb|check|facebook|YandexBot|Twitterbot|a_archiver|facebookexternalhit|Bingbot|Googlebot|Baiduspider|360(Spider|User-agent)|Ghost)%%')\n" }, { "alpha_fraction": 0.5785837769508362, "alphanum_fraction": 0.5820379853248596, "avg_line_length": 31.16666603088379, "blob_id": "e6675410b51bdd96bd8a0fb8bc0d45ba4ab7a528", "content_id": "85f9a0fd79dc444d1ceff1df52df035f0da14f0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1158, "license_type": "permissive", "max_line_length": 68, "num_lines": 36, "path": "/quasar/gdpr_comply.py", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "import csv\nimport os\nimport sys\n\nfrom .sql_utils import run_sql_file\nfrom .utils import log\n\n\ndef remove_northstar(nsid):\n # Removes Northstar user data for GDPR compliance.\n data = {'users': os.getenv('NORTHSTAR_USERS'), 'nsid': nsid}\n run_sql_file('./data/sql/misc/gdpr_northstar_removal.sql', data)\n\n\ndef remove_cio(nsid):\n # Removes CIO user data for GDPR compliance.\n data = {'customer_event': os.getenv('CIO_CUSTOMER_EVENT'),\n 'email_bounced': os.getenv('CIO_EMAIL_BOUNCED'),\n 'email_event': os.getenv('CIO_EMAIL_EVENT'),\n 'email_sent': os.getenv('CIO_EMAIL_SENT'),\n 'event_log': os.getenv('CIO_EVENT_LOG'),\n 'nsid': nsid}\n run_sql_file('./data/sql/misc/gdpr_cio_removal.sql', data)\n\n\ndef gdpr_from_file():\n with open(sys.argv[1]) as csvfile:\n ids = csv.reader(csvfile, delimiter=',')\n for id in ids:\n # First line might contain \"id\" as column name\n if id[0] == 'id':\n pass\n else:\n log(\"Removing Northstar ID {}\".format(id))\n remove_northstar(id[0])\n remove_cio(id[0])\n" }, { "alpha_fraction": 0.7008196711540222, "alphanum_fraction": 0.7008196711540222, "avg_line_length": 33.85714340209961, "blob_id": "345e90a9e8d222f065b645bb00885d2efd40c3d5", "content_id": "15df0c29f48bd1f624c66ddab5b598b3b1eec1b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 244, "license_type": "permissive", "max_line_length": 88, "num_lines": 7, "path": "/quasar/dbt/models/campaign_info/campaign_info_international.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT \n\tc.id AS campaign_id,\n\tc.internal_title AS campaign_name,\n\ti.*\nFROM {{ ref('campaign_info_all') }} i\nLEFT JOIN {{ env_var('FT_ROGUE') }}.campaigns c ON i.campaign_run_id = c.campaign_run_id\nWHERE campaign_language IS DISTINCT FROM 'en'\n" }, { "alpha_fraction": 0.6253520846366882, "alphanum_fraction": 0.6253520846366882, "avg_line_length": 25.259260177612305, "blob_id": "fd6f3d4378614c330b2aba7d714d4f9fda12b489", "content_id": "a5954d55626364bc4c081475a230ff5898c3b7dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 710, "license_type": "permissive", "max_line_length": 79, "num_lines": 27, "path": "/quasar/dbt/models/phoenix_events/snowplow_phoenix_events.sql", "repo_name": "blisteringherb/quasar", "src_encoding": "UTF-8", "text": "SELECT\n e.event_id,\n e.event_datetime,\n CASE WHEN e.event_name IS NULL\n AND e.event_type = 'pv'\n THEN 'view' ELSE e.event_name END AS event_name,\n e.event_source,\n e.\"path\",\n e.\"host\",\n e.query_parameters,\n e.clicked_link_url,\n e.utm_source AS page_utm_source,\n e.utm_medium AS page_utm_medium,\n e.utm_campaign AS page_utm_campaign,\n e.referrer_host,\n e.referrer_path,\n e.referrer_source,\n e.campaign_id,\n i.campaign_name,\n e.modal_type,\n e.search_query,\n e.session_id,\n e.browser_size,\n e.northstar_id,\n e.device_id\nFROM {{ ref('snowplow_raw_events') }} e\nLEFT JOIN {{ ref('campaign_info') }} i ON i.campaign_id = e.campaign_id::bigint\n\n" } ]
59
Xinkle/pyreportcard
https://github.com/Xinkle/pyreportcard
65bee3715f2f04fd4a88f9af513fbd99ac49b901
a6b024b533bb4fc5f382324e1d98409c79ab94ee
1b144a4b3be272530d776690a935aa3acc8eead7
refs/heads/master
2021-01-19T10:24:34.257530
2017-04-10T22:44:41
2017-04-10T22:44:41
87,863,708
0
0
null
2017-04-10T22:34:23
2017-04-07T14:07:04
2017-03-29T00:48:52
null
[ { "alpha_fraction": 0.699367105960846, "alphanum_fraction": 0.7056962251663208, "avg_line_length": 21.571428298950195, "blob_id": "33df6dd34a8316a1bc20d922e47c501e0ee4e0d2", "content_id": "8458c4dd0102f9f464ac9d3eacb353f63ac16bf9", "detected_licenses": [ "MIT", "CC-BY-3.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "permissive", "max_line_length": 44, "num_lines": 14, "path": "/config.py", "repo_name": "Xinkle/pyreportcard", "src_encoding": "UTF-8", "text": "from config_secret import SecretConfig\n\n\nclass Config:\n SECRET_KEY = SecretConfig.SECRET_KEY\n\n # MongoDB config\n MONGO_DBNAME = SecretConfig.MONGO_DBNAME\n MONGO_HOST = SecretConfig.MONGO_HOST\n MONGO_PORT = SecretConfig.MONGO_PORT\n\n # Cloning config\n CLONE_TMP_DIR = 'tmp'\n CLONE_TIMEOUT = 30\n" } ]
1
epfl-dlab/BT-eval
https://github.com/epfl-dlab/BT-eval
c0df920eed12cc7050d414c8cbb309c05b59f99f
0cf2d2d7d0e194a7df4e3b9237a7f0624d1450f0
dd29d87afd4d54542333ca4029dad4cb0fefc978
refs/heads/main
2023-05-03T17:24:20.352507
2021-06-01T12:43:59
2021-06-01T12:43:59
371,016,890
6
0
null
null
null
null
null
[ { "alpha_fraction": 0.6037898659706116, "alphanum_fraction": 0.611541748046875, "avg_line_length": 37.70000076293945, "blob_id": "8f55a2a9c09a18370e476d3bdf9de6798ef45499", "content_id": "0e2a19769e4e0956cc838d3448bfa8015776b90d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "permissive", "max_line_length": 112, "num_lines": 30, "path": "/TrueSkill.py", "repo_name": "epfl-dlab/BT-eval", "src_encoding": "UTF-8", "text": "import itertools\nimport trueskill as ts\nfrom trueskill import rate_1vs1\n\n\ndef TrueSkill(df, mu=ts.MU, sigma=ts.SIGMA,\n beta=ts.BETA, tau=ts.TAU,\n draw_prob=ts.DRAW_PROBABILITY):\n\n trueskill_env = ts.TrueSkill(mu=mu, sigma=sigma, beta=beta, tau=tau, draw_probability=draw_prob)\n competitors = df.columns\n\n system_ratings = {}\n for x in competitors:\n system_ratings[x] = trueskill_env.create_rating()\n\n for (sys_a, sys_b) in itertools.combinations(competitors, 2):\n scores_a, scores_b = df[sys_a], df[sys_b]\n for Xs_a, Xs_b in zip(scores_a, scores_b):\n if Xs_a > Xs_b:\n sys_a_rating, sys_b_rating = rate_1vs1(system_ratings[sys_a], system_ratings[sys_b])\n elif Xs_a < Xs_b:\n sys_b_rating, sys_a_rating = rate_1vs1(system_ratings[sys_b], system_ratings[sys_a])\n else:\n sys_b_rating, sys_a_rating = rate_1vs1(system_ratings[sys_b], system_ratings[sys_a], drawn=True)\n\n system_ratings[sys_a] = sys_a_rating\n system_ratings[sys_b] = sys_b_rating\n\n return [system_ratings[sys].mu for sys in df.columns]\n" }, { "alpha_fraction": 0.5612813234329224, "alphanum_fraction": 0.5807799696922302, "avg_line_length": 31.636363983154297, "blob_id": "5709cee1d81049ae169d3777ff7013237934b06b", "content_id": "f96c53b79ca56c09c47a460fa2254fbcb70a5242", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1436, "license_type": "permissive", "max_line_length": 104, "num_lines": 44, "path": "/Elo.py", "repo_name": "epfl-dlab/BT-eval", "src_encoding": "UTF-8", "text": "import itertools\n\n\nclass Elo:\n def __init__(self, k, g=1, homefield=0):\n self.ratingDict = {}\n self.k = k\n self.g = g\n self.homefield = homefield\n\n def add_player(self, name, rating=1500):\n self.ratingDict[name] = rating\n\n def game_over(self, winner, loser, winnerHome=False):\n if winnerHome:\n result = self.expectResult(self.ratingDict[winner] + self.homefield, self.ratingDict[loser])\n else:\n result = self.expectResult(self.ratingDict[winner], self.ratingDict[loser] + self.homefield)\n\n self.ratingDict[winner] = self.ratingDict[winner] + (self.k * self.g) * (1 - result)\n self.ratingDict[loser] = self.ratingDict[loser] + (self.k * self.g) * (0 - (1 - result))\n\n def expectResult(self, p1, p2):\n exp = (p2 - p1) / 400.0\n return 1 / ((10.0**(exp)) + 1)\n\n\ndef ELO(df, k=20, g=1, homefield=0):\n # n_competitors = df.shape[1]\n competitors = df.columns\n\n elo_eval = Elo(k, g, homefield)\n for x in competitors:\n elo_eval.add_player(x)\n\n for (sys_a, sys_b) in itertools.combinations(competitors, 2):\n scores_a, scores_b = df[sys_a], df[sys_b]\n for Xs_a, Xs_b in zip(scores_a, scores_b):\n if Xs_a > Xs_b:\n elo_eval.game_over(sys_a, sys_b)\n else:\n elo_eval.game_over(sys_b, sys_a)\n\n return [elo_eval.ratingDict[sys] for sys in df.columns]\n" }, { "alpha_fraction": 0.5728021860122681, "alphanum_fraction": 0.591934859752655, "avg_line_length": 47.075469970703125, "blob_id": "fa34fddbe04c854fc079a9946e513be77e713535", "content_id": "31762e774fdf4c13212f6f9b333cbddc65b0fd50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10192, "license_type": "permissive", "max_line_length": 165, "num_lines": 212, "path": "/simulations.py", "repo_name": "epfl-dlab/BT-eval", "src_encoding": "UTF-8", "text": "from collections import defaultdict\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\nfrom bt import BT\nfrom Elo import ELO\nfrom TrueSkill import TrueSkill\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\n\n\ndef sample_dataset(n_regular_types, n_reverse_types, sys_strength, noise=1, n=100):\n systems_scores = defaultdict(list)\n for _ in range(n_regular_types):\n scale = 100 * np.random.rand()\n for sys, alpha in sys_strength.items():\n n_samples = int(n / n_regular_types)\n systems_scores[sys].extend(stats.norm.rvs(loc=alpha + scale, scale=noise, size=n_samples))\n\n # for _ in range(n_reverse_types):\n # scale = 100 * np.random.rand()\n # for sys, alpha in sys_strength.items():\n # inverse_perf = 100 - alpha\n # systems_scores[sys].extend(stats.norm.rvs(loc=inverse_perf + scale, scale=noise, size=n))\n\n return pd.DataFrame.from_dict(systems_scores)\n\n\ndef add_outliers(df, sys_strengths, percent_outliers):\n row_list = []\n if percent_outliers > 0:\n n_outliers = max(int(percent_outliers * df.shape[0]), 1)\n else:\n n_outliers = int(percent_outliers * df.shape[0])\n strengths = list(sys_strengths.values())\n while len(row_list) < n_outliers:\n observations = np.random.rand(len(sys_strengths.keys()))\n if stats.kendalltau(observations, strengths)[0] < 0.:\n row_list.append(dict(zip(sys_strengths.keys(), 100 * observations)))\n\n return pd.concat([df, pd.DataFrame(row_list)], axis=0)\n\n\ndef evaluate(df, sys_strength, method):\n mean_scores = dict(zip(df.columns, df.mean(axis=0).to_list()))\n median_scores = dict(zip(df.columns, df.median(axis=0).to_list()))\n if method['name'] == 'BT':\n bt_scores = dict(zip(df.columns, BT(df)))\n elif method['name'] == 'ELO':\n bt_scores = dict(zip(df.columns, ELO(df, method['k'])))\n else:\n bt_scores = dict(zip(df.columns, TrueSkill(df, method['mu'], method['sigma'], method['beta'])))\n\n bt, mean, median = [], [], []\n for s in sys_strength.keys():\n bt.append(bt_scores[s])\n median.append(median_scores[s])\n mean.append(mean_scores[s])\n strengths = list(sys_strength.values())\n\n return stats.kendalltau(strengths, mean)[0], stats.kendalltau(strengths, median)[0], stats.kendalltau(strengths, bt)[0]\n\n\ndef run_simulations(n_regular_list, percentage_reverse, percent_outliers_list, n_systems_list, n_samples_list, method='BT'):\n n_repeat = 10\n mean_perf, median_perf, bt_perf = [], [], []\n number_samples, number_regular_types, percent_outliers, number_reverse_types, noise, n_systems = [], [], [], [], [], []\n for n_reg in n_regular_list:\n for rev_percent in percentage_reverse:\n n_rev = int(rev_percent * n_reg) # + 1\n for outlier_percent in percent_outliers_list:\n for n_sys in n_systems_list:\n for n_samples in n_samples_list:\n for _ in range(n_repeat):\n strengths = np.random.rand(n_sys)\n strengths /= np.sum(strengths)\n sys_strengths = dict(zip(['sys_{}'.format(i) for i in range(n_sys)], 10 * strengths))\n dataset = sample_dataset(n_reg, n_rev, sys_strengths, n=n_samples)\n dataset = add_outliers(dataset, sys_strengths, outlier_percent)\n # print(dataset)\n # exit()\n res = evaluate(dataset, sys_strengths, method=method)\n mean, median, bt = res\n\n mean_perf.append(mean)\n median_perf.append(median)\n bt_perf.append(bt)\n percent_outliers.append(outlier_percent)\n number_samples.append(dataset.shape[0])\n number_regular_types.append(n_reg)\n number_reverse_types.append(rev_percent)\n noise.append(0.1)\n n_systems.append(n_sys)\n\n return pd.DataFrame.from_dict({'Mean': mean_perf, 'Median': median_perf, 'BT': bt_perf,\n 'n_samples': number_samples, 'n_regular': number_regular_types, 'n_outliers': percent_outliers, 'n_reverse': number_reverse_types,\n 'noise': noise, 'n_systems': n_systems})\n\n\ndef obtain_x_y_yerr(df, name_x, name_y):\n x = df.groupby(name_x).mean().index.to_list()\n y = df.groupby(name_x).mean()[name_y].to_list()\n # m = df.groupby(name_x).quantile(0.10)[name_y]\n # M = df.groupby(name_x).quantile(0.90)[name_y]\n # yerr = (M - m) / 2.\n yerr = 2 * 1.96 * df.groupby(name_x).sem()[name_y].to_numpy()\n return x, y, yerr\n\n\nif __name__ == '__main__':\n n_regular_list = [1, 3, 5, 10]\n percentage_reverse = [0.]\n n_systems_list = [2, 3, 5, 10, 25, 50]\n percent_outliers_list = [0., 0.01, 0.025, 0.05, 0.075]\n n_samples = [10, 30, 100, 200]\n res_df = run_simulations(n_regular_list, percentage_reverse, percent_outliers_list, n_systems_list, n_samples)\n\n print(res_df.mean(axis=0))\n easy_cases = res_df[(res_df['n_outliers'] == 0.) & (res_df['n_reverse'] == 0.)]\n easy_cases = easy_cases[easy_cases['n_regular'] == 1]\n\n fig, axes = plt.subplots(1, 6, figsize=(30, 5), sharey=True)\n ft = 15\n ax = axes[0]\n x_axis = 'n_systems'\n x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(easy_cases, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Number of systems (easy cases)', fontsize=ft)\n ax.set_ylabel('Kendall\\'s tau with true strengths', fontsize=ft)\n\n # ax.set_ylabel('Kendall\\'s tau with true strengths', fontsize=17)\n\n # res_df['difficulty'] = res_df['n_outliers'] # + res_df['n_reverse']\n ax = axes[1]\n outliers_df = res_df[res_df['n_regular'] == 1]\n x_axis = 'n_outliers'\n x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(outliers_df, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Percentage of outliers (1 test type)', fontsize=ft)\n # ax.set_ylabel('Kendall\\'s tau with true strengths', fontsize=18)\n\n ax = axes[2]\n regular_df = res_df[res_df['n_outliers'] == 0.]\n x_axis = 'n_regular'\n x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(regular_df, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Test instances types (no outliers)', fontsize=ft)\n # ax.set_ylabel('Kendall\\'s tau with true strengths', fontsize=18)\n\n ax = axes[3]\n # regular_df = res_df[res_df['n_outliers'] == 0.]\n x_axis = 'n_regular'\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Test instances types (with outliers)', fontsize=ft)\n\n ax = axes[4]\n # outliers_df = res_df[res_df['n_regular'] == 1]\n x_axis = 'n_outliers'\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Percentage of outliers (varying test types)', fontsize=ft)\n\n ax = axes[5]\n x_axis = 'n_systems'\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Mean')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:blue')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'Median')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:green')\n x, y, yerr = obtain_x_y_yerr(res_df, x_axis, 'BT')\n ax.errorbar(x, y, yerr, elinewidth=2, linewidth=2, fmt='-o', ms=7, color='tab:red')\n ax.set_xlabel('Number of systems (all cases)', fontsize=ft)\n\n legend_elem = [Line2D([0], [0], linestyle='-', linewidth=2, c='tab:blue', label='Mean'),\n Line2D([0], [0], linestyle='-', linewidth=2, c=\"tab:green\", label='Median'),\n Line2D([0], [0], linestyle='-', linewidth=2, c=\"tab:red\", label='BT')]\n\n fig.legend(handles=legend_elem, ncol=3, loc='upper center', frameon=False, fontsize=21, bbox_to_anchor=(0.55, 1.15))\n #fig.legend(handles=legend_elem, fontsize=13)\n fig.tight_layout(pad=1.1)\n # fig.savefig(\"simulations.pdf\", bbox_inches=\"tight\")\n\n plt.show()\n # Plot for increasing n_systems for easy cases\n\n # Plot with increasing difficulty: outliers + rev\n\n # Plot for increasing n_system all cases\n\n # Plot for increasing noise easy cases\n\n # Plot for increasing noise all cases\n" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.8130081295967102, "avg_line_length": 60.5, "blob_id": "13d33e43731972e5c0014a35573ea5a4bccdd8a8", "content_id": "169514c2dc8eb98ced46efd3ee11b2227503731d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "permissive", "max_line_length": 112, "num_lines": 2, "path": "/README.md", "repo_name": "epfl-dlab/BT-eval", "src_encoding": "UTF-8", "text": "# BT-eval\nCode to reproduce experiments of the ACL 2021 publication on the evaluation of NLP systems with the BT mechanism\n" }, { "alpha_fraction": 0.5071967244148254, "alphanum_fraction": 0.5257025361061096, "avg_line_length": 27.60784339904785, "blob_id": "43a7ffbc6f06f35433984fa4816bee069c46c0aa", "content_id": "842c678ba70e6eedee685967b7a82b46e589f7aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "permissive", "max_line_length": 113, "num_lines": 51, "path": "/bt.py", "repo_name": "epfl-dlab/BT-eval", "src_encoding": "UTF-8", "text": "import itertools\nimport numpy as np\nimport pandas as pd\n\n\ndef BT(df, epsilon=1e-9):\n n_competitors = df.shape[1]\n competitors = df.columns\n\n win_matrix = np.zeros((n_competitors, n_competitors))\n\n for pair in itertools.combinations(range(n_competitors), 2):\n idx_a, idx_b = pair\n competitor_a = competitors[idx_a]\n competitor_b = competitors[idx_b]\n\n win_ab = np.sum([int(score_a > score_b) for score_a, score_b in zip(df[competitor_a], df[competitor_b])])\n win_ba = df.shape[0] - win_ab\n\n win_matrix[idx_a][idx_b] = win_ab\n win_matrix[idx_b][idx_a] = win_ba\n\n W = np.sum(win_matrix, axis=1)\n p = [0.5] * n_competitors\n\n while True:\n new_p = [0.5] * n_competitors\n for i in range(n_competitors):\n summing_term = 0\n for j in range(n_competitors):\n if i == j:\n continue\n\n summing_term += (win_matrix[i][j] + win_matrix[j][i]) / (p[i] + p[j])\n\n new_p[i] = W[i] / summing_term\n\n new_p /= np.sum(new_p)\n diff = np.sum([(x - y) ** 2 for x, y in zip(p, new_p)])\n if diff < epsilon:\n return new_p\n p = new_p\n\n\nif __name__ == '__main__':\n player_a = [2, 5, 2, 3, 4]\n player_b = [1, 2, 3, 4, 1]\n player_c = [2, 4, 5, 2, 2]\n df = pd.DataFrame.from_dict({'player_a': player_a, 'player_b': player_b, 'player_c': player_c})\n res = BT(df)\n print(res)\n" } ]
5
bkang003/UNGC
https://github.com/bkang003/UNGC
6a0c234842d8d161c3e14bb10b96d389ec78f093
c14d0fe172d6a666c0990da12306b1a54fae8b1f
0bfe8cb2dd6a0dda69c81348330b3ffce17cc72f
refs/heads/master
2020-04-05T09:27:14.212805
2018-12-05T13:55:01
2018-12-05T13:55:01
156,757,371
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6135371327400208, "alphanum_fraction": 0.6324599981307983, "avg_line_length": 24.462963104248047, "blob_id": "bf3bf9365a0e66149e59b4479da6b89eab9257fd", "content_id": "77e240a6b91eaa27eaf50faf31001920c630fc50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 71, "num_lines": 54, "path": "/test.py", "repo_name": "bkang003/UNGC", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 11:44:04 2018\n\n@author: BryanK\n\"\"\"\nimport pickle\n\n# Initialize a country search by CountryCode\n\na_list = ('GBR','ARG','BRA')\n# a_list = ('GBR','')\nnews_list = []\n\npath = os.path.expanduser(\"/Users/BryanK//Documents/Github/UNGC/*.csv\")\nall_rec = iglob(path, recursive=True) \n#dataframes = (pd.read_csv(f) for f in all_rec)\n#df = pd.concat(dataframes, ignore_index=True)\n\n# Select corresponding country in the dataset.\n# Read in taxonomy from 'triggerlist.txt'\nfor f in all_rec:\n print('\\nProcessing',f + '\\n')\n news_list.extend(process(pd.read_csv(f),*a_list))\ntrig_dict = read_trigger_config('triggerlist.txt')\n\n# Filter News with the trigger in the trig_dict\nnum_line = 500\n#print(num_line)\nfilter_stories(news_list,trig_dict,num_line)\npickle.dump(news_list, open('2013_news.p','wb'))\n\n\n\"\"\"\nPrint first num_line sample text with '-------' as separator.\n\"\"\"\n#for i in range(num_line):\n# if news_list[i].get_taxonomy() !=[]:\n# print(news_list[i].get_countryCode(), news_list[i].get_url())\n# print(news_list[i].get_taxonomy())\n# print('--'*10)\n\n#def test_fun(*args):\n# result = 0\n# for x in args:\n# result += x \n# return result\n#\n#class Animal:\n# def __init__(self,num_legs):\n# self.num_legs = num_legs\n# self.name = \n#animal_list = [1,2,3]" }, { "alpha_fraction": 0.5787234306335449, "alphanum_fraction": 0.6063829660415649, "avg_line_length": 23.128204345703125, "blob_id": "4305fb35baad1e212ca4e066336703098124c9b0", "content_id": "28db38e1671d62d885b4bd07a90865be4fbeed54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/json_tryout.py", "repo_name": "bkang003/UNGC", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 22:06:05 2018\n\n@author: BryanK\n\"\"\"\nimport json\n\nsimple = dict(int_list=[1, 2, 3],text='string',number=3.44,boolean=True,none=None)\nfrom datetime import datetime\n\nclass A(object):\n def __init__(self, simple):\n self.simple = simple \n def __eq__(self, other):\n if not hasattr(other, 'simple'):\n return False\n return self.simple == other.simple\n def __ne__(self, other):\n if not hasattr(other, 'simple'):\n return True\n return self.simple != other.simple\n \ncomplex = dict(a=A(simple), when=datetime(2016, 3, 7))\n\nprint(json.dumps(simple,indent=4))\n\ndef jdefault(o):\n return o.__dict__\n\nclass User(object):\n def __init__(self, name, password):\n self.name = name\n self.password = password\nalice = User('Alice A. Adams', 'secret')\n\na_str = json.dumps(alice, default=jdefault)\nprint(a_str)" }, { "alpha_fraction": 0.5517494678497314, "alphanum_fraction": 0.5619035959243774, "avg_line_length": 30.921875, "blob_id": "eef8268e55265526ca79960cf1ed63ca81db7007", "content_id": "901687efb83faff09f514adecfb6629cef4204ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8174, "license_type": "no_license", "max_line_length": 95, "num_lines": 256, "path": "/classDef.py", "repo_name": "bkang003/UNGC", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 29 11:23:17 2018\n\n@author: BryanK\n\"\"\"\n\n#import feedparser\nimport string\nimport time\nimport pandas as pd\nfrom glob import iglob\nimport os\n#import threading\n# from project_util import translate_html\n#from mtTkinter import *\nfrom datetime import datetime\nimport newspaper\nfrom newspaper import Article\n# os.chdir('/home/yy2891/indicator+GKG_2013_2018_RAW')\n# b=[x for x in glob.glob('*.csv')]\n\n#import pytz\n\n#======================\n# Code for retrieving and parsing\n# Google and Yahoo News feeds\n# Do not change this code\n#====================== \\\n\n\ndef process(df, *args): #input filename\n \"\"\"\n Fetches news items from export.csv file\n Returns a list of News.\n \"\"\"\n# try:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n# pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n# # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n# # pubdate.replace(tzinfo=None)\n# except ValueError:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n# df = pd.read_csv('20131126.csv')\n \n ret = []\n for index,row in df.iterrows():\n if str(row['Actor1CountryCode']).upper() in args:\n gevent_id = row['GLOBALEVENTID']\n country_Code = row['Actor1CountryCode']\n tone = row['AvgTone']\n dateAdded = row['DATEADDED']\n url = row['SOURCEURL']\n \n news = News(gevent_id,country_Code,tone,dateAdded,url)\n ret.append(news)\n print('\\nThere are %d items in News.'% len(ret))\n return ret\n\n \nclass News:\n def __init__(self,gevent_id,countryCode,tone,dateAdded,url):\n self.gevent_id = gevent_id\n self.countryCode = countryCode\n self.tone = tone\n self.dateAdded = dateAdded\n self.url = url\n self.text = None\n self.publish_date = None\n self.taxonomy = []\n\n def get_gevent_id(self):\n return self.gevent_id\n def get_countryCode(self):\n return self.countryCode\n def get_tone(self):\n return self.tone\n def get_dateAdded(self):\n return self.dateAdded\n def get_url(self):\n return self.url\n def get_text(self):\n return self.text\n def get_publish_date(self):\n return self.publish_date\n def get_taxonomy(self):\n return self.taxonomy\n \n def set_taxonomy(self,taxonomy):\n self.taxonomy.append(taxonomy)\n \n def clean_text(self):\n article = Article(self.url)\n try:\n article.download()\n article.parse()\n self.text,self.publish_date = article.text, article.publish_date\n print('Success.')\n except:\n self.text,self.publish_date = None, None\n print('No text found.')\n\nclass Trigger:\n def evaluate(self, story):\n \"\"\"\n Returns True if an alert should be generated\n for the given news item, or False otherwise.\n \"\"\"\n # DO NOT CHANGE THIS!\n raise NotImplementedError\n \nclass PhraseTrigger(Trigger):\n def __init__(self, phrase):\n self.phrase = phrase\n def get_phrase(self):\n return self.phrase\n def evaluate(self, story):\n return self.is_phrase_in(story)\n \n def is_phrase_in(self, text):\n raw_text = text.lower()\n raw_phrase = self.phrase.lower()\n \n for char in raw_text:\n if char in string.punctuation:\n raw_text = raw_text.replace(char,' ')\n raw_list = raw_text.split()\n phrase_list = raw_phrase.split()\n \n if phrase_list[0] not in raw_list:\n return False\n else:\n temp_index = raw_list.index(phrase_list[0])\n return phrase_list == raw_list[temp_index:temp_index + len(phrase_list)]\n\nclass TextTrigger(PhraseTrigger):\n def __init__(self, phrase):\n PhraseTrigger.__init__(self, phrase)\n def get_phrase(self):\n return self.phrase\n def evaluate(self, story):\n return self.is_phrase_in(story.get_text())\n\nclass AndTrigger(Trigger):\n def __init__(self, *args):\n self.args = args\n \n def get_args(self):\n phrase_list = [arg.get_phrase() for arg in self.args]\n return '+'.join(phrase_list)\n def evaluate(self, story):\n true_list = [T.evaluate(story) for T in self.args]\n result = (True, False)[False in true_list]\n return result\n# return self.T1.evaluate(story) and self.T2.evaluate(story) and self.T3.evaluate(story)\n\nclass OrTrigger(Trigger):\n def __init__(self,*args):\n self.args = args\n def get_args(self):\n phrase_list = [arg.get_phrase() for arg in self.args]\n return '+'.join(phrase_list)\n def evaluate(self, story):\n true_list = [T.evaluate(story) for T in self.args]\n result = (False,True)[True in true_list]\n return result\n \nclass NotTrigger(Trigger):\n def __init__(self, T):\n self.T = T\n def get_T(self):\n return self.T\n def evaluate(self, story):\n return not self.T.evaluate(story)\n\n \ndef read_trigger_config(filename):\n \"\"\"\n filename: the name of a trigger configuration file\n\n Returns: a list of trigger objects specified by the trigger configuration\n file.\n \"\"\"\n trigger_file = open(filename, 'r')\n lines = []\n for line in trigger_file:\n line = line.rstrip()\n if not (len(line) == 0 or line.startswith('//')):\n lines.append(line)\n \n # TODO: Problem 11\n # line is the list of lines that you need to parse and for which you need\n # to build triggers\n# trigger_list = []\n trigger_dict = {}\n \n for line in lines:\n l_item = line.split('+')\n# if l_item[0] == 'ADD':\n# for item in l_item[1:]:\n# trigger_list.append(trigger_dict[item])\n if l_item[1] == 'TEXT':\n trigger_dict[l_item[0]] = TextTrigger(l_item[2])\n elif l_item[1] == 'AND':\n arg_tuple = tuple(TextTrigger(item) for item in l_item[2:])\n trigger_dict[l_item[0]] = AndTrigger(*arg_tuple)\n \n# if l_item[1] == 'TITLE':\n# trigger_dict[l_item[0]] = TitleTrigger(l_item[2]) \n# elif l_item[1] == 'DESCRIPTION':\n# trigger_dict[l_item[0]] = DescriptionTrigger(l_item[2])\n# elif l_item[1] == 'AFTER':\n# trigger_dict[l_item[0]] = AfterTrigger(l_item[2])\n# elif l_item[1] == 'BEFORE':\n# trigger_dict[l_item[0]] = BeforeTrigger(l_item[2])\n# elif l_item[1] == 'NOT':\n# T_not = trigger_dict[l_item[2]]\n# trigger_dict[l_item[0]] = NotTrigger(T_not)\n# elif l_item[1] == 'AND':\n# T_and1 = trigger_dict[l_item[2]]\n# T_and2 = trigger_dict[l_item[3]]\n# trigger_dict[l_item[0]] = AndTrigger(T_and1,T_and2)\n# elif l_item[1] == 'OR':\n# T_or1 = trigger_dict[l_item[2]]\n# T_or2 = trigger_dict[l_item[3]]\n# trigger_dict[l_item[0]] = OrTrigger(T_or1,T_or2)\n \n #print(lines) # for now, print it so you see what it contains!\n return trigger_dict \n \ndef filter_stories(stories,trigger_dict,num_line):\n \"\"\"\n Takes in a list of News instances.\n\n Returns: a list of only the stories for which a trigger in triggerlist fires.\n \"\"\"\n# trig_story = []\n temp_stories = stories[:num_line]\n for index, story in enumerate(temp_stories):\n print('\\n'+str(index),end=' ')\n print(story.get_gevent_id(),story.get_dateAdded())\n story.clean_text()\n if story.get_text() == None:\n pass\n else:\n for key,trig in trigger_dict.items():\n try:\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) \\\n else print('False',end=' ')\n except AttributeError:\n pass\n# print('Error occured',end=' ')\n #trig_story.append(story.get_text())\n# return trig_story\n\n\n" } ]
3
xhhjin/greproxy
https://github.com/xhhjin/greproxy
4809744506e035fe872846b165bcfcb75ec7562c
8787fb675b17c2bc8f5fe6463044e0e9f9784666
ee4f01e23fef5e709342e3b7e75287fd6bb759c6
refs/heads/master
2021-01-22T20:19:18.721325
2015-09-13T13:59:09
2015-09-13T13:59:09
42,397,724
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7814598083496094, "alphanum_fraction": 0.7916395664215088, "avg_line_length": 101.53333282470703, "blob_id": "589f20107e058e2206e6903ed64683c6ab5f63db", "content_id": "8a50ab40d37fd230c738a5f1151e23bf0a1332b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5255, "license_type": "no_license", "max_line_length": 711, "num_lines": 45, "path": "/README.md", "repo_name": "xhhjin/greproxy", "src_encoding": "UTF-8", "text": "A reverse proxy run on Appengine, tiny and fast. Especially fits small personal blogs.\n\n# Features:\n\n1.Using memcache to speed up the request. About than 90% of the request hits the memcache and the response delay can be as short as 10 millisecond.(用memcache加速)\n\n2.Employed administration page for controlling the memcache and view accesslog in a convenience way.(管理员控制缓存)\n\n3.Cache the POST data when connection between GAE and the source webhost goes wrong. And then rePOST the data manually later. This would protect user data, like comments to blog.(连接出错可缓存POST数据)\n\n4.A small addition widget for abbreviating url is employed.(一个山寨的缩短网址功能)\n\n5.可以用于在教育网内的站点向全球发布,也可用于反向代理国外网站。但仍常常受限于ghs的可靠性。\n\n\n\n# Setup Instructions: \n\n1.Download the files (4 files and a folder) from downloads page.\n\n2.Modify edu.py and cachecontrol.py in the beginning.\n\n3.Upload it to your Google Appengine, Do not forget to modify app-id.\n\n(下载,修改edu.py文件开头的源代码设置区,上传GAE;修改DNS,将主域名绑定到GAE,辅助的被代理域名指向被代理站。)\n\nNOTICE:The reverse proxy is written based on my wordpress blog, which has low viewers and tipically static. So websites with high trafic or with a lot of dynamic pages (i.e. forums) would significantly slow down the performance.(静态页比较有用,论坛之类很慢。)\n\nTipically you should set the source website 2 domains TARGET_URL_SHORTER and PROXY_SER_URL in your virtual host cpanel, and then point your dns server TARGET_URL_SHORTER to your virtual host, point PROXY_SER_URL to ghs.google.com(or 216.239.36.21, any google host IP).Here my source domain is edu.lostriver.net, my GAE domain is www.lostriver.net.(典型情况虚拟主机上要帮定2个域名,只有一个TARGET_URL_SHORTER(被代理网站)用DNS指过去,GAE绑主域名). Set APP_ID to access your website throught http(s)://APP_ID.appspot.com. But all these settings are not necessary. You could modify it by your own.\n\nAbout memcache: Google Appengine memcache have a relatively high performance - within 10 ms delay and very fast speed. Theoretically memcache would be kept forever. while during my trial , it could keeps about 1 day or several days. The expire time is set as long as possible as default. You can also modify the expire time by add a argument to the memcache.set() function. for example: edu.py:193: if not memcache.set(item, to_be_cached, 2600): means keep the memcache item for 1 hour.(memcache如果不设置过期一般也只有1天或几天。)\n\nDemo: Due to GFW, I deleted the dns item pointing to GAE, you could visit http://lostriver-net.appspot.com/ to see the performance.\n\nAbout administration: Visit http://APP_ID.appspot.com/cachecontrol.py or http://PROXY_SER_URL/cachecontrol.py。then login. Administrators did not use memcache, instead, they fetch from the source every time.\n\n The first textarea you type /pageid to view or delete the memcache, returns None when cache does not exist. Do not forget the prefix'/'. 'Clear' means to clear ALL THE MEMCACHE including pending posts mentioned below.(要管理某个网页缓存,只需要输入/pageid) \n\n Accesslog gives a quick but not so accurate view to recent access, in nearly Apache accesslog. Uses memcache to store the date. It is not encoded into HTML so do not show very well in some browers. Try to view the webpage source in your brower.(NOW HAVE BEEN REMOVED FOR FASTER PERFORMANCE.) \n\n Click ViewPendingPost those POST did not accomplete due to network errors is listed. Click the number to retry the POST. Click DeletePendingPost means deleting ALL PENDINGPOSTS. Anyway, it is not very stable to store pending posts into memcache. If possible I would turn to datebase.(用memcache保存了未完成的POST,点击条目可以手动完成) \n\nShortlink:in the first input area you input sth like jiql then in paste the URI in the right textarea including \"http://\" prefix. Then visit http://SHORTLINK_URL/jiql would return the URI by a 302 relocation. here I have http://s.lostriver.net/ Visit shortlinks does not exist would show a page which show your shortlink history.(两个框分别输入段网址名和包含 http:// 前缀的目标地址,如果有人访问不存在的地址,404页是短网址的历史纪录。)\n\nPROBLEMS AND BUGS: There are many many bugs in cachecontrol.py, anyway it is not very important. The primary problem is the fetch server error, mainly caused by 3 reason: The delay between the source site and GAE is too long, and the bandwidth is too slow. In addition, Google limit the fetch content to 1MB. All these problems have solutions now but I didnot bring it into true. Large files could break into small pieces through the HTTP range header. Use rpc fetch service to fetch all the pieces and combine them into one item, cache it and response it. I would accomplish it later or never. If you are interested in the reverse proxy , you can join in and contribute it. (网速慢会出错,超过1M会出错,可以通过rpc分段请求完成,完善方向。)\n\n\n\n" }, { "alpha_fraction": 0.5298869013786316, "alphanum_fraction": 0.5365508794784546, "avg_line_length": 45.312103271484375, "blob_id": "240a94951979cfadf707658ab6ce132b1a50f49b", "content_id": "2128b5158724f507d55e0a6f070f902377cb3c06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14936, "license_type": "no_license", "max_line_length": 213, "num_lines": 314, "path": "/edu.py", "repo_name": "xhhjin/greproxy", "src_encoding": "UTF-8", "text": "from google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp.util import run_wsgi_app\r\nimport logging\r\nimport datetime\r\nimport pickle\r\nimport re\r\nimport wsgiref.handlers\r\n\r\nfrom google.appengine.api import memcache\r\nfrom google.appengine.api import urlfetch\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext.webapp import template\r\nfrom google.appengine.api import urlfetch_errors\r\nfrom google.appengine.runtime import apiproxy_errors\r\nfrom google.appengine.api import memcache\r\nfrom google.appengine.api import users\r\n\r\n######################## Setting area below ########################\r\n\r\nTARGET_URL_SHORTER = \"edu.lostriver.net\" #the domain which GAE fetch from\r\nPROXY_SER_URL = \"www.lostriver.net\" #domain of your GAE app\r\nAPP_ID = \"lostriver-net\" #the app-id of GAE\r\nSHORTLINK_URL = \"s.lostriver.net\" #the short-links domain\r\n\r\n#Set whether using MEMCACHE or not\r\nIF_USE_MEMCACHE = True\r\n\r\n#Set forcing https when visit through *.appspot.com\r\nIF_FORCE_HTTPS = False\r\n\r\n#Set which HTTP response status to be cached. \r\nTO_BE_CACHED_STATUS = frozenset([\r\n 200,\r\n 301,\r\n 302,\r\n 404\r\n])\r\n######################## Setting area above ########################\r\n\r\nTARGET_URL = \"http://\" + TARGET_URL_SHORTER\r\nAPP_ID_HOST = APP_ID + \".appspot.com\"\r\n\r\n#Below are HEADERS already sent by Google\r\nIGNORE_RESPONSE_HEADERS = frozenset([\r\n 'cache-control',\r\n 'content-type',\r\n 'set-cookie',\r\n])\r\n\r\nclass Bot_Rule(webapp.RequestHandler):\r\n def get(self):\r\n self.response.headers[\"Content-Type\"] = \"text/plain\"\r\n self.response.out.write(\"User-agent: *\\r\\nDisallow: /\")\r\n\r\nclass Short_links(db.Model):\r\n url_short = db.StringProperty(required=True)\r\n url_redirect_to = db.StringProperty(required=True)\r\n count = db.IntegerProperty(required=True)\r\n create_time = db.DateTimeProperty(required=True)\r\n\r\nclass MainPage(webapp.RequestHandler):\r\n\r\n def loggingreq(self, response_status, response_content_length, if_use_cache):\r\n header_referer = ''\r\n header_user_agent = ''\r\n for name, address in self.request.headers.items():\r\n if name.lower() == \"referer\":\r\n header_referer = address\r\n elif name.lower() == \"user-agent\":\r\n header_user_agent = address\r\n request_get = {'ip_addr' : self.request.remote_addr,\r\n 'time' : datetime.datetime.now(),\r\n 'req_method' : self.request.environ[\"REQUEST_METHOD\"],\r\n 'req_url' : self.request.url,\r\n 'req_PROTOCOL' : self.request.environ[\"SERVER_PROTOCOL\"],\r\n 'resp_status' : response_status,\r\n 'resp_length' : response_content_length,\r\n 'referer' : header_referer,\r\n 'user_agent' : header_user_agent,\r\n 'if_cache' : if_use_cache\r\n }\r\n count = memcache.get('AccessLogNo')\r\n if count is not None:\r\n memcache.incr('AccessLogNo')\r\n count = count + 1\r\n else:\r\n memcache.set('AccessLogNo', 1)\r\n count = 1\r\n memcache.set(\"AccessLogNo\" + repr(count), request_get)\r\n \r\n def myError(self, status, description):\r\n # header\r\n self.response.set_status(500, None)\r\n # body\r\n content = '<h1>Oops!</h1><p>Error Code: %d<p>Message: <br><br>%s' % (status, description)\r\n self.response.out.write(content)\r\n# self.loggingreq(500, 0, False)\r\n\r\n def get_cached_response(self, item):\r\n modified_response_content = {}\r\n modified_response_content = memcache.get(item)\r\n if modified_response_content is not None:\r\n if_content = True\r\n for name, address in self.request.headers.items():\r\n for name2, address2 in modified_response_content.items():\r\n if name.lower() == \"if-none-match\" \\\r\n and name2.lower() == \"etag\" \\\r\n and address == address2:\r\n if_content = False\r\n modified_response_content[\"code\"] = 304\r\n self.content_response(modified_response_content, if_content)\r\n '''self.loggingreq(modified_response_content[\"code\"], \\\r\n len(modified_response_content[\"main_content\"]), \\\r\n True)'''\r\n return True\r\n else:\r\n return False\r\n\r\n def fetch_content(self, fetch_method, item, modified_request_headers):\r\n url = TARGET_URL + item #self.request.path_qs\r\n for _ in range(3):\r\n try:\r\n result = urlfetch.fetch(url=url,\r\n payload=self.request.body,\r\n method=fetch_method,\r\n headers = modified_request_headers,\r\n allow_truncated = False,\r\n follow_redirects = False,\r\n deadline = 10)\r\n break\r\n except urlfetch_errors.ResponseTooLargeError:\r\n self.myError(500, 'Fetch server error, Sorry, Google\\'s limit, file size up to 1MB.')\r\n return\r\n except Exception:\r\n continue\r\n else:\r\n self.myError(500, '''INTERNAL ERROR <br>There are something wrong at present. But you may just <A href=\"#\" onclick=\"window.location.reload()\">click here to refresh</a> the page and it would be OK!<br>\r\n <br>读取服务器失败<br>出问题了。。。可能由于网络繁忙所致,<A href=\"#\" onclick=\"window.location.reload()\">点此刷新</a>一下就好!<br>\r\n <br><img src=\"http://code.google.com/appengine/images/appengine-silver-120x30.gif\" alt=\"由 Google App Engine 提供支持\" />''')\r\n return\r\n modified_response_headers = result.headers\r\n modified_response_headers[\"code\"] = result.status_code\r\n modified_response_headers[\"main_content\"] = result.content \r\n return modified_response_headers\r\n\r\n\r\n def content_response(self, response_content, if_content):\r\n self.response.set_status(response_content[\"code\"], None)\r\n #Set-Cookie. Refer to Solrex.cn in solving the muti-cookie problem in GAppProxy\r\n for name, address in response_content.items():\r\n if name.lower() == \"set-cookie\":\r\n scs = re.sub(r',([^,;]+=)', r'\\n\\1', address).split('\\n')\r\n for sc in scs:\r\n if self.request.host == APP_ID_HOST:\r\n self.response.headers.add_header(\"Set-Cookie\", re.sub(PROXY_SER_URL, \\\r\n APP_ID_HOST, \\\r\n sc.strip()))\r\n else:\r\n self.response.headers.add_header(\"Set-Cookie\", sc.strip())\r\n #Modify and add HEADERS\r\n if name.lower()!=\"main_content\" and name.lower()!=\"code\":\r\n if name.lower()==\"location\":\r\n address = re.sub(TARGET_URL_SHORTER, \\\r\n self.request.environ[\"HTTP_HOST\"], address)\r\n if self.request.host == APP_ID_HOST:\r\n if self.request.environ[\"SERVER_PORT\"] == \"443\":\r\n address = re.sub(\"http://\" + PROXY_SER_URL, \\\r\n \"https://\" + APP_ID_HOST, \\\r\n address)\r\n address = re.sub(PROXY_SER_URL, \\\r\n APP_ID_HOST, address)\r\n else:\r\n address = address\r\n if name.lower() not in IGNORE_RESPONSE_HEADERS:\r\n self.response.headers.add_header(name, address)\r\n elif name.lower() != 'set-cookie':\r\n self.response.headers[name] = address\r\n #Main content is here\r\n if if_content:\r\n if self.request.host == APP_ID_HOST:\r\n if self.request.environ[\"SERVER_PORT\"] == \"443\":\r\n response_content[\"main_content\"] = re.sub(\"http://\" + PROXY_SER_URL, \\\r\n \"https://\" + APP_ID_HOST, \\\r\n response_content[\"main_content\"])\r\n response_content[\"main_content\"] = re.sub(PROXY_SER_URL, \\\r\n APP_ID_HOST, \\\r\n response_content[\"main_content\"])\r\n #Strip DRIP Host ADs\r\n '''response_content[\"main_content\"]=re.sub( \\\r\n r\"<CENTER>[\\s\\S]*?</CENTER>\", '', response_content[\"main_content\"])\r\n response_content[\"main_content\"]=re.sub( \\\r\n r\"<CENTER>[\\s\\S]*?</CENTER>\", '', response_content[\"main_content\"])'''\r\n self.response.out.write(response_content[\"main_content\"])\r\n\r\n def cache_content(self, item, to_be_cached):\r\n if to_be_cached[\"code\"] in TO_BE_CACHED_STATUS:\r\n if not memcache.set(item, to_be_cached):\r\n logging.error(\"Memcache set failed.\")\r\n\r\n def head(self, base_url):\r\n fetched_content = self.fetch_content(urlfetch.HEAD, self.request.path_qs, self.request.headers)\r\n if fetched_content is not None:\r\n self.content_response(fetched_content, 0)\r\n\r\n def post(self, base_url):\r\n fetched_content = self.fetch_content(urlfetch.POST, self.request.path_qs, self.request.headers)\r\n if fetched_content is not None:\r\n self.content_response(fetched_content, 1)\r\n if (fetched_content[\"code\"] is 200) and IF_USE_MEMCACHE:\r\n for name, address in self.request.headers.items():\r\n if name.lower() == \"referer\":\r\n item = re.sub(r'(^http://)' + PROXY_SER_URL + '/' , '/', address)\r\n #refresh the memcache of referer page\r\n #often do not work very well due to the structure of site.\r\n result = self.fetch_content(urlfetch.GET, item, {})\r\n if result is not None:\r\n self.cache_content(item ,result)\r\n# self.loggingreq(fetched_content[\"code\"], len(fetched_content[\"main_content\"]), False)\r\n else:\r\n count = memcache.get(\"pending_post_no\")\r\n if count is not None:\r\n memcache.incr('pending_post_no')\r\n count = count + 1\r\n else:\r\n #Store pending post(incomplete fetch due to network problems) in memcache\r\n memcache.set('pending_post_no', 1)\r\n count = 1\r\n pending_post = {'ip_addr' : self.request.remote_addr,\r\n 'time' : datetime.datetime.now(),\r\n 'req_url' : self.request.path_qs,\r\n 'content' : self.request.body\r\n }\r\n memcache.set(\"pending_post_no\" + repr(count) + \"info\", pending_post)\r\n pending_post_headers = {}\r\n for name, address in self.request.headers.items():\r\n pending_post_headers[name] = address\r\n memcache.set(\"pending_post_no\" + repr(count) + \"headers\", \\\r\n pending_post_headers)\r\n\r\n \r\n def get(self, base_url):\r\n if ((self.request.host == SHORTLINK_URL) or \\\r\n (self.request.host == \"s.\" + APP_ID + \".appspot.com\")):\r\n short_item = db.get(db.Key.from_path('Short_links', 'N:%s' % self.request.path_qs))\r\n if not short_item:\r\n self.response.set_status(404, None)\r\n not_found_error_cont = \\\r\n'''<html><head>\r\n<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\r\n<title>ShortLinks - 404 - %s</title>\r\n<body>\r\n<h1>Shared by %s<br></h1>\r\n<h3>404 Not Found<br></h3>\r\nSorry but what you are looking for does not exist.<br>\r\nVisit home page <a href=\"%s\">%s</a><br>\r\n<hr>\r\n<center>History</center>\r\n<table width=\"800\" border=\"1\" align=\"center\">\r\n<tr><td>Share link</td><td>Total click</td><td>Create time(UTC)</td></tr>\r\n''' % (APP_ID, APP_ID, \"http://\" + PROXY_SER_URL, \"http://\" + PROXY_SER_URL)\r\n\t self.response.out.write(not_found_error_cont)\r\n q = db.GqlQuery(\"select * from Short_links order by create_time desc\")\r\n results = q.fetch(1000)\r\n for r in results:\r\n self.response.out.write('''<tr><td><a rel=\"nofollow\" target=\"_blank\" href=\"''' \\\r\n + r.url_short + '\">' + r.url_redirect_to + \\\r\n '</a></td>')\r\n self.response.out.write('<td>' + str(r.count) + '</td>')\r\n self.response.out.write('<td>' + str(r.create_time) + '</td></tr>')\r\n self.response.out.write('''</table><br><center>CopyRight2009 <font color=blue>lostriver.net</font><br>\r\n\t\t\t\t\tPowered by <a rel=\"nofollow\" target=\"_blank\" href=\"http://code.google.com/intl/zh-CN/appengine/\">Google AppEngine</a></center></body></html>''')\r\n else:\r\n self.response.out.write('''<html><head><title>Redirecting...</title></head>\r\n<body><h1>Redirecting...</h1><script type=\"text/javascript\"><!--\r\nwindow.parent.location = \"%s\"\r\n//--></script></body></html>''' % short_item.url_redirect_to)\r\n short_item.count +=1\r\n short_item.put()\r\n# self.loggingreq(302, 0, False)\r\n elif self.request.host == APP_ID_HOST and self.request.environ[\"SERVER_PORT\"] == \"80\" and IF_FORCE_HTTPS:\r\n self.redirect(\"https://\" + APP_ID_HOST + \"/\")\r\n else:\r\n item = self.request.path_qs\r\n# memcache.flush_all()\r\n if (not users.is_current_user_admin()) and IF_USE_MEMCACHE:\r\n if not self.get_cached_response(item):\r\n fetched_content = self.fetch_content(urlfetch.GET, item, {})\r\n if fetched_content is not None: \r\n self.content_response(fetched_content, True)\r\n self.cache_content(item, fetched_content)\r\n '''self.loggingreq(fetched_content[\"code\"], \\\r\n len(fetched_content[\"main_content\"]), \\\r\n False)'''\r\n else:\r\n fetched_content = self.fetch_content(urlfetch.GET ,item, self.request.headers)\r\n if fetched_content is not None:\r\n if fetched_content[\"code\"] == 304:\r\n self.content_response(fetched_content, False)\r\n else:\r\n self.content_response(fetched_content, True)\r\n '''self.loggingreq(fetched_content[\"code\"], \\\r\n len(fetched_content[\"main_content\"]), \\\r\n False)'''\r\n\r\napplication = webapp.WSGIApplication([\r\n (r\"([\\s\\S]*)\", MainPage)\r\n ], debug=True)\r\n\r\ndef main():\r\n run_wsgi_app(application)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" }, { "alpha_fraction": 0.5561703443527222, "alphanum_fraction": 0.5600994229316711, "avg_line_length": 43.51459884643555, "blob_id": "1fd51f7855f826b7386aa34126adb106410b3f3f", "content_id": "d8cadf2aafb26aa8eff0f6b67530f6d85986b5f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12551, "license_type": "no_license", "max_line_length": 213, "num_lines": 274, "path": "/cachecontrol.py", "repo_name": "xhhjin/greproxy", "src_encoding": "UTF-8", "text": "from google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp.util import run_wsgi_app\r\nimport logging\r\nimport datetime\r\nimport pickle\r\nimport re\r\nimport wsgiref.handlers\r\n\r\nfrom google.appengine.api import memcache\r\nfrom google.appengine.api import urlfetch\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext.webapp import template\r\nfrom google.appengine.api import urlfetch_errors\r\nfrom google.appengine.runtime import apiproxy_errors\r\nfrom google.appengine.api import memcache\r\nfrom google.appengine.api import users\r\n\r\n#Modivy below\r\n\r\nTARGET_URL_SHORTER = \"edu.lostriver.net\"\r\nTARGET_URL = \"http://\" + TARGET_URL_SHORTER\r\nPROXY_SER_URL = \"www.lostriver.net\"\r\nSHORTLINK_URL = \"http://s.lostriver.net/\"\r\n\r\n#Modify above\r\n\r\nFIXED_RESPONSE_HEADERS = frozenset([\r\n 'last-modified',\r\n 'content-type',\r\n 'x-pingback'\r\n])\r\nFIXED_REQUEST_HEADERS = frozenset([\r\n 'host',\r\n 'if-none-match',\r\n 'if-modified-since',\r\n])\r\n\r\n#Below are HEADERS already sent by Google\r\nIGNORE_RESPONSE_HEADERS = frozenset([\r\n 'cache-control',\r\n 'content-type',\r\n 'set-cookie',\r\n])\r\n\r\nclass Short_links(db.Model):\r\n url_short = db.StringProperty(required=True)\r\n url_redirect_to = db.StringProperty(required=True)\r\n count = db.IntegerProperty(required=True)\r\n create_time = db.DateTimeProperty(required=True)\r\n\r\nmodified_request_headers = {}\r\n\r\nclass MainPage(webapp.RequestHandler):\r\n\r\n def myError(self, status, description):\r\n # header\r\n self.response.set_status(500, None)\r\n # body\r\n content = '<h1>Oops!</h1><p>Error Code: %d<p>Message: <br><br>%s' % (status, description)\r\n self.response.out.write(content)\r\n\r\n def fetch_content(self, fetch_method, item, modified_request_headers, modified_request_body):\r\n url = TARGET_URL + item #self.request.path_qs\r\n for _ in range(3):\r\n try:\r\n result = urlfetch.fetch(url=url,\r\n payload=modified_request_body,\r\n method=fetch_method,\r\n headers = modified_request_headers,\r\n allow_truncated = False,\r\n follow_redirects = False)\r\n break\r\n except urlfetch_errors.ResponseTooLargeError:\r\n self.myError(500, 'Fetch server error, Sorry, Google\\'s limit, file size up to 1MB.')\r\n return\r\n except Exception:\r\n continue\r\n else:\r\n self.myError(500, '''INTERNAL ERROR <br>There are something wrong at present. But you may just <A href=\"#\" onclick=\"window.location.reload()\">click here to refresh</a> the page and it would be OK!<br>\r\n <br>读取服务器失败<br>出问题了。。。可能由于网络繁忙所致,<A href=\"#\" onclick=\"window.location.reload()\">点此刷新</a>一下就好!<br>\r\n <br><img src=\"http://code.google.com/appengine/images/appengine-silver-120x30.gif\" alt=\"由 Google App Engine 提供支持\" />''')\r\n return\r\n modified_response_headers = result.headers\r\n modified_response_headers[\"code\"] = result.status_code\r\n modified_response_headers[\"main_content\"] = result.content \r\n return modified_response_headers\r\n\r\n def post(self, base_url):\r\n user = users.get_current_user()\r\n if users.is_current_user_admin():\r\n if \"cached_url\" in self.request.POST:\r\n url = TARGET_URL + self.request.POST[\"cached_url\"]\r\n \r\n modified_request_headers = {}\r\n if self.request.POST[\"op\"] == \"View\":\r\n self.response.out.write(\"<!-- content\")\r\n self.response.out.write(memcache.get(self.request.POST[\"cached_url\"]))\r\n self.response.out.write(\" -->\")\r\n \r\n elif self.request.POST[\"op\"] == \"Del\":\r\n for name in FIXED_RESPONSE_HEADERS:\r\n if not memcache.delete(self.request.POST[\"cached_url\"]):\r\n logging.error(\"Memcache delete failed.\")\r\n self.response.out.write(\"Memcache delete failed.\")\r\n if not memcache.delete(self.request.POST[\"cached_url\"] + \"content\"):\r\n logging.error(\"Memcache delete failed.\")\r\n self.response.out.write(\"Memcache delete failed.\")\r\n else:\r\n self.response.out.write(\"Memcache delete Success!\")\r\n \r\n elif self.request.POST[\"op\"] == \"Clear\":\r\n if not memcache.flush_all():\r\n logging.error(\"Memcache clear failed.\")\r\n self.response.out.write(\"Memcache clear failed.\")\r\n else:\r\n self.response.out.write(\"Memcache clear Success!\")\r\n \r\n elif self.request.POST[\"op\"] == \"ViewAccessLog\":\r\n self.response.headers.add_header(\"Contnet-Type\", \"text/plain\")\r\n total_count = memcache.get(\"AccessLogNo\")\r\n count_no = []\r\n for count in range(1, total_count + 1):\r\n count_no.append(str(count))\r\n access_all = memcache.get_multi(count_no, key_prefix='AccessLogNo')\r\n# self.response.out.write(access_all)\r\n for count in range(1, total_count + 1):\r\n try:\r\n access_entry = access_all[str(count)]\r\n# self.response.out.write(access_entry)\r\n for name, address in access_entry.items():\r\n self.response.out.write(' ')\r\n self.response.out.write(address)\r\n except:\r\n self.response.out.write(\"Accesslog Read Error\")\r\n self.response.out.write(\"\\r\\n\")\r\n \r\n elif self.request.POST[\"op\"] == \"DeleteAccessLog\":\r\n if memcache.delete(\"AccessLogNo\") == 2:\r\n self.response.out.write(\"AccessLog delete successful!\")\r\n else:\r\n self.response.out.write(\"AccessLog delete Error\")\r\n \r\n elif self.request.POST[\"op\"] == \"ViewPendingPost\":\r\n self.response.out.write('''<form action=\"cachecontrol.py\" method=\"post\">''')\r\n for count in range(1, memcache.get(\"pending_post_no\") + 1):\r\n self.response.out.write('''<input type=\"Submit\" name=\"op\" value=\"''')\r\n self.response.out.write(count)\r\n self.response.out.write('''\" style=\"width:40px;\"/><br>''')\r\n access_entry = memcache.get(\"pending_post_no\" + repr(count) + \"info\")\r\n if access_entry is not None:\r\n for name, address in access_entry.items():\r\n self.response.out.write(' ')\r\n self.response.out.write(address)\r\n self.response.out.write(\"<br>\\r\\n\")\r\n self.response.out.write('''</form>''')\r\n \r\n elif self.request.POST[\"op\"] == \"DeletePendingPost\":\r\n if memcache.delete(\"pending_post_no\") == 2:\r\n self.response.out.write(\"PendingPost delete successful!\")\r\n else:\r\n self.response.out.write(\"PendingPost delete Error\")\r\n \r\n elif self.request.POST[\"op\"] == \"ViewShortLinks\":\r\n q = db.GqlQuery(\"select * from Short_links order by create_time desc\")\r\n results = q.fetch(1000)\r\n for r in results:\r\n self.response.out.write(r.url_short + \" \" + r.url_redirect_to + \" \")\r\n self.response.out.write(r.count)\r\n self.response.out.write(\" \")\r\n self.response.out.write(r.create_time)\r\n self.response.out.write(\"<br>\")\r\n\r\n elif self.request.POST[\"op\"] == \"DeleteShortLinks\":\r\n short_item = db.get(db.Key.from_path('Short_links', 'N:/%s' % self.request.POST[\"url_short\"]))\r\n if short_item is not None:\r\n short_item.delete()\r\n self.response.out.write(\"DeleteShortLink Success!\")\r\n else:\r\n self.response.out.write(\"ShortLink Delete Error\")\r\n\r\n\r\n elif self.request.POST[\"op\"] == \"CreateShortLinks\":\r\n short_item = Short_links(url_short = '/' + self.request.POST[\"url_short\"], \\\r\n url_redirect_to = self.request.POST[\"url_redirect_to\"], \\\r\n count = 0, \\\r\n create_time = datetime.datetime.now(), \\\r\n key_name='N:/%s' % self.request.POST[\"url_short\"])\r\n short_item.put()\r\n self.response.out.write(\"Set shortlink success\")\r\n\r\n elif self.request.POST[\"op\"] == \"ViewAShortlink\":\r\n self.response.headers.add_header(\"Contnet-Type\", \"text/plain\")\r\n total_count = memcache.get(\"AccessLogNo\")\r\n count_no = []\r\n for count in range(1, total_count + 1):\r\n count_no.append(str(count))\r\n access_all = memcache.get_multi(count_no, key_prefix='AccessLogNo')\r\n# self.response.out.write(access_all)\r\n for count in range(1, total_count + 1):\r\n try:\r\n access_entry = access_all[str(count)]\r\n# self.response.out.write(access_entry)\r\n if access_entry[\"req_url\"] == SHORTLINK_URL + self.request.POST[\"url_short\"]:\r\n for name, address in access_entry.items():\r\n self.response.out.write(' ')\r\n self.response.out.write(address)\r\n except:\r\n self.response.out.write(\"Accesslog Read Error\")\r\n self.response.out.write(\"\\r\\n\")\r\n\r\n elif int(self.request.POST[\"op\"]) in range(100):\r\n count = self.request.POST[\"op\"]\r\n modified_request_content = memcache.get(\"pending_post_no\" + count + \"info\")\r\n modified_request_headers = memcache.get(\"pending_post_no\" + count + \"headers\")\r\n self.response.out.write(modified_request_content['req_url'] + \"<br>\")\r\n if modified_request_headers is not None:\r\n if modified_request_content is not None:\r\n post_result = self.fetch_content(urlfetch.POST, modified_request_content['req_url'], \\\r\n modified_request_headers, modified_request_content['content'])\r\n if post_result:\r\n memcache.delete(\"pending_post_no\" + count + \"info\")\r\n self.response.out.write(\"POST Successful!\")\r\n self.response.out.write(post_result)\r\n self.response.out.write(\"\\r\\n\")\r\n self.response.out.write(memcache.get_stats())\r\n \r\n else:\r\n self.redirect(\"/cachecontrol.py\")\r\n \r\n def get(self, base_url):\r\n user = users.get_current_user()\r\n if users.is_current_user_admin():\r\n self.response.out.write(\r\n '''<html><body>\r\n <form action=\"cachecontrol.py\" method=\"post\">\r\n <input type=\"text\" name=\"cached_url\" value=\"\" style=\"width:500px;\"/><br>\r\n <input type=\"submit\" name=\"op\" value=\"View\" />\r\n <input type=\"submit\" name=\"op\" value=\"Del\" />\r\n <input type=\"submit\" name=\"op\" value=\"Clear\" />\r\n <br>\r\n <input type=\"submit\" name=\"op\" value=\"ViewAccessLog\" />\r\n <input type=\"submit\" name=\"op\" value=\"DeleteAccessLog\" />\r\n <br>\r\n <input type=\"submit\" name=\"op\" value=\"ViewPendingPost\" />\r\n <input type=\"submit\" name=\"op\" value=\"DeletePendingPost\" />\r\n <br>\r\n <input type=\"text\" name=\"url_short\" value=\"\" style=\"width:100px;\"/>\r\n <input type=\"text\" name=\"url_redirect_to\" value=\"\" style=\"width:500px;\"/><br>\r\n <input type=\"submit\" name=\"op\" value=\"CreateShortLinks\" />\r\n <input type=\"submit\" name=\"op\" value=\"ViewShortLinks\" />\r\n <input type=\"submit\" name=\"op\" value=\"DeleteShortLinks\" />\r\n <input type=\"submit\" name=\"op\" value=\"ViewAShortlink\" />\r\n </form>\r\n ''')\r\n self.response.out.write(\"Welcome, %s! (<a href=\\\"%s\\\">sign out</a>)\" %\r\n (user.nickname(), users.create_logout_url(\"/\")))\r\n self.response.out.write(\"<br><a href=\\\"%s\\\">Sign in or register</a>.</body></html>\" %\r\n users.create_login_url(\"/cachecontrol.py\"))\r\n else:\r\n self.response.out.write(\"<a href=\\\"%s\\\">Sign in or register</a>.\" %\r\n users.create_login_url(\"/cachecontrol.py\"))\r\n\r\n \r\n\r\napplication = webapp.WSGIApplication([\r\n (r\"([\\s\\S]*)\", MainPage)\r\n ], debug=True)\r\n\r\ndef main():\r\n run_wsgi_app(application)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" } ]
3
scaryrawr/vtt-generator
https://github.com/scaryrawr/vtt-generator
3882c475f4b3f613477778098d8f4c1269fbbb34
6e8f9d991575f780f39192148ca5f6bf4c4cafa2
917f875fbd372b7256e3c465dc90f9b26b747b7a
refs/heads/main
2021-08-29T05:21:26.025290
2020-04-12T04:24:19
2020-04-12T04:24:19
254,254,466
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7278003692626953, "alphanum_fraction": 0.7527568340301514, "avg_line_length": 36.456520080566406, "blob_id": "3ce89da816e19e4035a0af775a75bf76cf8c3029", "content_id": "f9b6f22edb9c8831e11227dfe2b2793ac0807cf9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1723, "license_type": "permissive", "max_line_length": 186, "num_lines": 46, "path": "/README.md", "repo_name": "scaryrawr/vtt-generator", "src_encoding": "UTF-8", "text": "# VTT Generator\n\nGenerates VTT files for a video using FFMPEG and Azure Cognitive Services.\n\n## Requirements\n\n- [ffmpeg](https://www.ffmpeg.org)\n - Linux, use your package manager\n - Mac, can use [brew](https://formulae.brew.sh/formula/ffmpeg)\n - Windows, can use [chocolatey](https://chocolatey.org/packages/ffmpeg)\n- `pip3 install ffmpeg-python`\n- `pip3 install azure-cognitiveservices-speech`\n- [Azure Cognitive Services Speech](https://azure.microsoft.com/en-us/services/cognitive-services/speech-to-text/)\n\nCopy config.example.yml to config.yml and fill with your Azure Information:\n\n```yml\nkey: azurekey\nregion: azureregion\n```\n\n## Run\n\n`./vtt-gen.py --input path/to/video --output path/to/subtiles.vtt`\n\nVTT file tested with [VLC](https://www.videolan.org/vlc/index.html) and [GNOME Videos](https://wiki.gnome.org/Apps/Videos)\n\n## Validate\n\n- Generation is automatic, so please proofread the output\n- Check the file output is valid using [Live WebVTT Validator](https://quuz.org/webvtt/)\n\n## WebVTT Documentation\n\nThe output of this script is very basic, but can be customized, please see the following links for working with the vtt file.\n\n- [MDN WebVTT](https://developer.mozilla.org/en-US/docs/Web/API/WebVTT_API)\n- [W3 WebVTT](https://www.w3.org/TR/webvtt1/)\n\n## Video Services Caption Files\n\nLinks to web pages for helping upload caption files:\n\n- [Vimeo](https://vimeo.zendesk.com/hc/en-us/articles/224968828-Captions-and-subtitles)\n- [YouTube](https://support.google.com/youtube/answer/2734796?hl=en&ref_topic=7296214)\n- [Facebook](https://www.facebook.com/help/261764017354370) - Requires SRT file, can use [amorvincitomnia/vtt-to-srt.py](https://github.com/amorvincitomnia/vtt-to-srt.py) or another tool\n" }, { "alpha_fraction": 0.6570862531661987, "alphanum_fraction": 0.6724479794502258, "avg_line_length": 30.5390625, "blob_id": "51fe2862ad86ce1d8ae2eb899ad7b2f506cecc23", "content_id": "ab97e64c7d3d00f2a1010cc9b4fa83643b1c9167", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4036, "license_type": "permissive", "max_line_length": 104, "num_lines": 128, "path": "/vtt-gen.py", "repo_name": "scaryrawr/vtt-generator", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport argparse\nimport contextlib\nimport json\nimport math\nimport os\nimport time\nimport wave\n\nimport azure.cognitiveservices.speech as azurespeech\nimport ffmpeg\nimport yaml\n\nparser = argparse.ArgumentParser(description='Generate VTT for video file')\nparser.add_argument('--input', type=str, help='input video file')\nparser.add_argument('--output', type=str, help='output file')\nparser.add_argument('--maxlinetime', type=float, help='max line time in seconds', default=2.5)\n\nargs = parser.parse_args()\n\n# Convert to PCM format\naudio_file = args.output + '.wav'\nffmpeg.input(args.input).output(audio_file).overwrite_output().run()\n\nwith contextlib.closing(wave.open(audio_file, 'r')) as wave_file:\n duration = wave_file.getnframes() / wave_file.getframerate()\n\nconfigPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.yml')\nwith open(configPath, 'r') as config_file:\n config = yaml.load(config_file, Loader=yaml.SafeLoader)\n\noutfile = open(args.output, 'w')\n\n# Add file header\noutfile.write('WEBVTT\\n\\n')\ndone = False\n\ndef format_timestamp(ticks):\n seconds = ticks / 10000000.0\n hours = int(seconds // 60 // 60)\n seconds -= hours * 60 * 60\n minutes = int(seconds // 60)\n seconds -= minutes * 60\n return '{:02d}:{:02d}:{:06.3f}'.format(hours, minutes, seconds)\n\ndef stop_cb(evt):\n global speech_recognizer\n speech_recognizer.stop_continuous_recognition()\n\n global outfile\n outfile.close()\n print()\n\n global done\n done = True\n\nsequence = 0\n\ndef write_chunk(chunk):\n global sequence\n global outfile \n sequence += 1\n timeline = '{} --> {}'.format(format_timestamp(chunk['start']), format_timestamp(chunk['end']))\n outfile.write('{}\\n{}\\n{}\\n\\n'.format(sequence, timeline, chunk['text'].strip()))\n\nmax_chunk_length = args.maxlinetime * 10000000.0\ndef recognized_cb(evt):\n global max_chunk_length\n\n # Need to load results from json to be able to get offset and duration\n result = json.loads(evt.result.json)\n\n # Offset is start time\n chunk = {}\n chunk['start'] = result['Offset']\n chunk['text'] = ''\n \n confidences_in_nbest = [item['Confidence'] for item in result['NBest']]\n best_index = confidences_in_nbest.index(max(confidences_in_nbest))\n\n # Assuming words and dispaly words length are the same...\n words = result['NBest'][best_index]['Words']\n display_words = result['DisplayText'].split(' ')\n\n end_time = result['Duration'] + result['Offset']\n\n # Min of half a second or the set max chunk length\n min_chunk_trail = min(max_chunk_length, 10000000.0 / 2)\n last_end = 0\n for i in range(len(words)):\n word = words[i]\n chunk['end'] = word['Offset'] + word['Duration']\n chunk['text'] = f\"{chunk['text']} {display_words[i]}\"\n # If there's tiny bits of text at the end, just include it in the previous line\n if ((chunk['end'] - chunk['start'] > max_chunk_length) and\n (end_time - chunk['end'] >= min_chunk_trail)):\n write_chunk(chunk)\n chunk['start'] = chunk['end']\n chunk['text'] = ''\n last_end = chunk['end']\n\n # the last bit of text might not be a \"full\" line\n if (last_end != chunk['end']):\n write_chunk(chunk)\n\n # Add the duration to get the end time\n end_pos = result['Offset'] + result['Duration']\n global duration\n print('\\rProgress: {:.2f}%'.format(end_pos / 100000.0 / duration), end='')\n\nspeech_config = azurespeech.SpeechConfig(subscription=config['key'], region=config['region'])\nspeech_config.request_word_level_timestamps()\n\naudio_config = azurespeech.audio.AudioConfig(filename=audio_file)\n\nspeech_recognizer = azurespeech.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)\n\nspeech_recognizer.recognized.connect(recognized_cb)\nspeech_recognizer.canceled.connect(stop_cb)\nspeech_recognizer.speech_end_detected.connect(stop_cb)\n\nspeech_recognizer.start_continuous_recognition()\n\nwhile not done:\n time.sleep(.5)\n\nos.remove(audio_file)" } ]
2
mikeskaug/ridges
https://github.com/mikeskaug/ridges
9558a3f9e8c327f4538dd53e93954d6e31b410a6
b31aa5a3a8e7ffdb0bd34356efb7b872c835b5bf
74be83cf15752ffb334a306b739714b120a70698
refs/heads/master
2021-07-13T11:51:35.040839
2020-07-22T03:35:13
2020-07-22T03:35:13
183,854,220
6
0
null
2019-04-28T04:08:33
2020-05-29T04:08:16
2020-05-29T04:08:52
Jupyter Notebook
[ { "alpha_fraction": 0.5916993021965027, "alphanum_fraction": 0.5993735194206238, "avg_line_length": 31.917526245117188, "blob_id": "507972b3ee9c8e5e8bc8932d8e324fa55585a581", "content_id": "63b73ec7f07f579e7ac5e1d38e7fc4af7ad6455b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6385, "license_type": "no_license", "max_line_length": 107, "num_lines": 194, "path": "/src/dataset.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "import os\n\nfrom PIL import Image\nimport numpy as np\nfrom tensorflow.keras.utils import Sequence\n\nfrom trowel import utils\n\nfrom data import url_to_path, download_tile, decode_elevation\nfrom config import TERRAIN_BASE_URL, ELEVATION_PNG_DIR, DEFAULT_ZOOM, IMAGE_SIZE\nfrom transforms import ridges\n\n\ndef harvest_tiles(bounds, zoom=DEFAULT_ZOOM, out_dir=ELEVATION_PNG_DIR):\n '''\n Download all the terrain tiles covering the bounding box defined by bounds and save to out_dir\n '''\n\n tiles = utils.bbox_to_tiles(*bounds, zoom)\n for tile in tiles:\n url = os.path.join(TERRAIN_BASE_URL, str(tile[0]), str(tile[1]), str(tile[2])) + '.png'\n download_tile(url, destination=out_dir)\n\n\ndef png_to_tif(png_dir, tif_dir):\n '''\n Convert the RGB elevation tiles into grayscale tif files\n '''\n for fl in os.listdir(png_dir):\n png = Image.open(os.path.join(png_dir, fl))\n\n elevation = decode_elevation(np.array(png))\n\n grayscale = Image.fromarray(elevation)\n grayscale.save(os.path.join(tif_dir, fl.replace('.png', '.tif')))\n\n\ndef create_masks(tif_dir, mask_dir):\n for fl in os.listdir(tif_dir):\n tif = Image.open(os.path.join(tif_dir, fl))\n elevation = np.array(tif)\n ridge_mask = ridges(elevation)\n\n mask = Image.fromarray(ridge_mask.astype(np.int8) * 256).convert('L')\n mask.save(os.path.join(mask_dir, fl))\n\n\ndef standardize_batch(featurewise_std, batch):\n batch -= batch.mean(axis=(1,2), keepdims=True)\n batch /= featurewise_std\n\n return batch\n \n\ndef load_subset(data_dir, N=None, frac=None, seed=1):\n '''\n Load a random subset of the images in a directory and return as an Nx256x256x1 numpy array\n '''\n np.random.seed(seed=seed)\n files = os.listdir(data_dir)\n if N:\n subset_files = np.random.choice(files, size=N, replace=False)\n elif frac:\n subset_files = np.random.choice(files, size=int(frac*len(files)), replace=False)\n \n data = []\n for fl in subset_files:\n im = Image.open(os.path.join(data_dir, fl))\n data.append(np.array(im))\n\n return np.expand_dims(np.stack(data), axis=3)\n\n\nclass CustomImageDataGenerator(Sequence):\n '''\n Custom image data generator that avoid some limitations of Keras ImageDataGenerator\n '''\n def __init__(self, image_path, mask_path, image_filenames,\n to_fit=True, batch_size=32, augment=True, standardize_batch=lambda x: x,\n seed=1, shuffle=True, rescale_x=1, rescale_y=1, n_outputs=1):\n '''Initialization\n :param list_IDs: list of all 'label' ids to use in the generator\n :param labels: list of image labels (file names)\n :param image_path: path to images location\n :param mask_path: path to masks location\n :param to_fit: True to return X and y, False to return X only\n :param batch_size: batch size at each iteration\n :param dim: tuple indicating image dimension\n :param n_channels: number of image channels\n :param n_classes: number of output masks\n :param shuffle: True to shuffle label indexes after every epoch\n '''\n np.random.seed(seed=seed)\n self.dim = IMAGE_SIZE\n self.augment = augment\n self.augment_factor = 8 if augment else 1\n self.image_files = image_filenames * self.augment_factor\n self.image_idxs = np.arange(len(self.image_files))\n self.image_path = image_path\n self.mask_path = mask_path\n self.to_fit = to_fit\n self.batch_size = batch_size\n self.seed = seed\n self.shuffle = shuffle\n self.n_channels = 1\n self.shape = (256, 256)\n self.rescale_x = rescale_x\n self.rescale_y = rescale_y\n self._standardize_batch = standardize_batch\n self.n_outputs = n_outputs\n self.on_epoch_end()\n\n def __len__(self):\n '''\n Return the number of batches per epoch\n '''\n return int(np.floor(len(self.image_idxs)/ self.batch_size))\n\n def __getitem__(self, index):\n '''\n Generate a batch of training samples and target masks\n '''\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n list_IDs_temp = [self.image_idxs[k] for k in indexes]\n\n X = self._generate_X(list_IDs_temp)\n X = self._standardize_batch(X)\n\n if self.to_fit:\n y = self._generate_y(list_IDs_temp)\n if self.augment:\n X, y = self._augment(X, y)\n return (X, (y,)*self.n_outputs)\n else:\n return X\n\n\n def on_epoch_end(self):\n '''Updates indexes after each epoch\n '''\n self.indexes = np.arange(len(self.image_idxs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n\n def _generate_X(self, list_IDs_temp):\n '''\n Return an rank 4 array containing a batch of image examples (batch, height, width, 1)\n '''\n X = np.empty((self.batch_size, *self.shape, self.n_channels))\n\n for i, ID in enumerate(list_IDs_temp):\n X[i,] = self._load_image(os.path.join(self.image_path, self.image_files[ID]))[:, :, np.newaxis]\n\n return X * self.rescale_x\n\n\n def _generate_y(self, list_IDs_temp):\n '''Generates data containing batch_size masks\n :param list_IDs_temp: list of label ids to load\n :return: batch if masks\n '''\n y = np.empty((self.batch_size, *self.shape, 1), dtype=int)\n\n for i, ID in enumerate(list_IDs_temp):\n y[i,] = self._load_image(os.path.join(self.mask_path, self.image_files[ID]))[:, :, np.newaxis]\n\n return y * self.rescale_y\n\n\n def _load_image(self, image_path):\n '''\n Return a grayscale image as a 2D numpy array\n '''\n im = Image.open(image_path)\n data = np.array(im)\n return data\n\n \n def _augment(self, X, y):\n '''\n Randomly flip and rotate each image and target in a batch\n '''\n for idx in np.arange(X.shape[0]):\n if np.random.rand() > 0.5:\n X[idx,] = np.flipud(X[idx,])\n y[idx,] = np.flipud(y[idx,])\n \n rot_steps = np.random.choice([0, 1, 2, 3])\n X[idx,] = np.rot90(X[idx,], k=rot_steps)\n y[idx,] = np.rot90(y[idx,], k=rot_steps)\n\n return X, y" }, { "alpha_fraction": 0.5703534483909607, "alphanum_fraction": 0.6251479983329773, "avg_line_length": 38.1556282043457, "blob_id": "a1fce2f29c86ac4a710d71a4dbd8eb664fa716cf", "content_id": "bd961de88499467e5189426bbc493f83d49cee16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11826, "license_type": "no_license", "max_line_length": 159, "num_lines": 302, "path": "/src/models.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "\nimport numpy as np\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import (\n BatchNormalization, Activation, Dense, Dropout,\n Conv2D, Conv2DTranspose, MaxPooling2D, concatenate,\n UpSampling2D\n)\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.applications import VGG16\n\n\ndef gradient_kernel(shape, dtype=K.floatx()):\n if shape[:2] != (3, 3):\n raise ValueError('Currently only supports kernels of shape (3, 3)')\n grad_x = np.array([\n [1, 0, -1],\n [2, 0, -2],\n [1, 0, -1]\n ])\n grad_x = grad_x / grad_x.std()\n\n grad_y = np.array([\n [1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]\n ])\n grad_y = grad_y / grad_y.std()\n\n output = np.zeros(shape)\n for i in range(shape[-1]):\n for j in range(shape[-2]):\n if np.random.uniform() < 0.5:\n output[:, :, j, i] = grad_x\n else:\n output[:, :, j, i] = grad_y\n \n return output\n\n\ndef conv2d_block(input_tensor, n_filters, kernel=gradient_kernel, kernel_size=3, batchnorm=True, name='Conv2D'):\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer=kernel,\n padding='same', name=name+'_1')(input_tensor)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer=kernel,\n padding='same', name=name+'_2')(x)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x\n\n\ndef unet_4x(input_img, n_filters=16, dropout=0.5, batchnorm=True, logits=False):\n c1 = conv2d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)\n p1 = MaxPooling2D((2, 2))(c1)\n p1 = Dropout(dropout*0.5)(p1)\n\n c2 = conv2d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)\n p2 = MaxPooling2D((2, 2))(c2)\n p2 = Dropout(dropout)(p2)\n\n c3 = conv2d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)\n p3 = MaxPooling2D((2, 2))(c3)\n p3 = Dropout(dropout)(p3)\n\n c4 = conv2d_block(p3, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)\n p4 = MaxPooling2D((2, 2))(c4)\n p4 = Dropout(dropout)(p4)\n \n c5 = conv2d_block(p4, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)\n \n u6 = Conv2DTranspose(n_filters*8, (3, 3), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n u6 = Dropout(dropout)(u6)\n c6 = conv2d_block(u6, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)\n\n u7 = Conv2DTranspose(n_filters*4, (3, 3), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n u7 = Dropout(dropout)(u7)\n c7 = conv2d_block(u7, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)\n\n u8 = Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n u8 = Dropout(dropout)(u8)\n c8 = conv2d_block(u8, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)\n\n u9 = Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n u9 = Dropout(dropout)(u9)\n c9 = conv2d_block(u9, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)\n \n if logits:\n outputs = Conv2D(1, (1, 1), activation=None)(c9)\n else:\n outputs = Conv2D(1, (1, 1), activation='sigmoid', bias_initializer=Constant(value=-np.log((1 - 0.01)/0.01)))(c9)\n\n model = Model(inputs=[input_img], outputs=[outputs])\n\n return model\n\n\ndef unet_2x(input_img, n_filters=16, dropout=0.5, batchnorm=True, logits=False):\n c1 = conv2d_block(input_img, n_filters=n_filters*1, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d_block1')\n p1 = MaxPooling2D((2, 2))(c1)\n p1 = Dropout(dropout*0.5)(p1)\n\n c2 = conv2d_block(p1, n_filters=n_filters*2, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d_block2')\n p2 = MaxPooling2D((2, 2))(c2)\n p2 = Dropout(dropout)(p2)\n \n c5 = conv2d_block(p2, n_filters=n_filters*16, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d_block3')\n\n u8 = Conv2DTranspose(n_filters*2, (3, 3), strides=(2, 2), padding='same')(c5)\n u8 = concatenate([u8, c2])\n u8 = Dropout(dropout)(u8)\n c8 = conv2d_block(u8, n_filters=n_filters*2, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d_block4')\n\n u9 = Conv2DTranspose(n_filters*1, (3, 3), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n u9 = Dropout(dropout)(u9)\n c9 = conv2d_block(u9, n_filters=n_filters*1, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d_block5')\n \n if logits:\n outputs = Conv2D(1, (1, 1), activation=None)(c9)\n else:\n outputs = Conv2D(1, (1, 1), activation='sigmoid', kernel_initializer='truncated_normal', bias_initializer=Constant(value=-np.log((1 - 0.01)/0.01)))(c9)\n\n model = Model(inputs=[input_img], outputs=[outputs])\n\n return model\n\n\ndef dilated_2d(input_tensor, n_filters, kernel_size=3, dilation_rate=1, batchnorm=True, name='DilatedConv2D'):\n x = Conv2D(filters=n_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, \n kernel_initializer='he_normal', padding='same', name=name+'_1')(input_tensor)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters=n_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, \n kernel_initializer='he_normal', padding='same', name=name+'_2')(x)\n if batchnorm:\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n return x\n\n\ndef LFE(input_img, n_filters=16, batchnorm=True, logits=False):\n '''\n Based on \"Local Feature Extraction\", dilated convolutional architecture from\n \"Effective Use of Dilated Convolutions for Segmenting Small Object Instances in Remote Sensing Imagery\"\n https://arxiv.org/abs/1709.00179\n '''\n # Front-end module\n x = dilated_2d(input_img, n_filters=n_filters, dilation_rate=1, batchnorm=batchnorm, name='fe_1')\n x = dilated_2d(x, n_filters=2*n_filters, dilation_rate=2, batchnorm=batchnorm, name='fe_2')\n x = dilated_2d(x, n_filters=4*n_filters, dilation_rate=3, batchnorm=batchnorm, name='fe_3')\n \n # LFE module\n x = dilated_2d(x, n_filters=4*n_filters, dilation_rate=3, batchnorm=batchnorm, name='lfe_1')\n x = dilated_2d(x, n_filters=2*n_filters, dilation_rate=2, batchnorm=batchnorm, name='lfe_2')\n x = dilated_2d(x, n_filters=n_filters, dilation_rate=1, batchnorm=batchnorm, name='lfe_3')\n\n # Head module\n x = dilated_2d(x, n_filters=8*n_filters, dilation_rate=1, kernel_size=1, batchnorm=batchnorm, name='head_1')\n if logits:\n outputs = Conv2D(1, (1, 1), activation=None)(x)\n else:\n outputs = Conv2D(1, (1, 1), activation='sigmoid', kernel_initializer='truncated_normal', bias_initializer=Constant(value=-np.log((1 - 0.01)/0.01)))(x)\n\n model = Model(inputs=[input_img], outputs=[outputs])\n\n return model\n\n\ndef stacked_multi_scale(input_img, n_filters=16, batchnorm=True, logits=False):\n \n n1 = dilated_2d(input_img, n_filters=n_filters, dilation_rate=1, batchnorm=batchnorm, name='scale_1')\n n2 = dilated_2d(input_img, n_filters=n_filters, dilation_rate=2, batchnorm=batchnorm, name='scale_2')\n n4 = dilated_2d(input_img, n_filters=n_filters, dilation_rate=4, batchnorm=batchnorm, name='scale_4')\n n8 = dilated_2d(input_img, n_filters=n_filters, dilation_rate=8, batchnorm=batchnorm, name='scale_8')\n\n c = conv2d_block(concatenate([n1, n2, n4, n8]), n_filters=2*n_filters, kernel='he_normal', kernel_size=3, batchnorm=batchnorm, name='conv2d')\n \n if logits:\n outputs = Conv2D(1, (1, 1), activation=None)(c)\n else:\n outputs = Conv2D(1, (1, 1), activation='sigmoid', kernel_initializer='truncated_normal', bias_initializer=Constant(value=-np.log((1 - 0.01)/0.01)))(c)\n\n model = Model(inputs=[input_img], outputs=[outputs])\n\n return model\n\n\ndef side_branch(x, factor):\n x = Conv2D(1, (1, 1), activation=None, padding='same')(x)\n x = UpSampling2D(size=(factor, factor), interpolation='bilinear')(x)\n\n return x\n\n\ndef HED(input_img):\n '''\n Implementation of holistically-nested edge detection\n\n See: https://arxiv.org/abs/1504.06375\n '''\n # Block 1\n x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(input_img)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n b1= side_branch(x, 1) # 256 256 1\n x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block1_pool')(x) # 128 128 64\n\n # Block 2\n x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n b2= side_branch(x, 2) # 256 256 1\n x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x) # 64 64 128\n\n # Block 3\n x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n b3= side_branch(x, 4) # 256 256 1\n x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x) # 32 32 256\n\n # Block 4\n x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x) # 32 32 512\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n b4= side_branch(x, 8) # 256 256 1\n\n # fuse\n fuse = concatenate([b1, b2, b3, b4], axis=-1)\n fuse = Conv2D(1, (1, 1), \n padding='same', \n use_bias=False, \n activation=None, \n kernel_initializer=Constant(value=1/5), \n kernel_regularizer=regularizers.l2(0.0002)\n )(fuse) # 256 256 1\n\n # outputs\n o1 = Activation('sigmoid', name='o1')(b1)\n o2 = Activation('sigmoid', name='o2')(b2)\n o3 = Activation('sigmoid', name='o3')(b3)\n o4 = Activation('sigmoid', name='o4')(b4)\n ofuse = Activation('sigmoid', name='ofuse')(fuse)\n\n model = Model(inputs=[input_img], outputs=[o1, o2, o3, o4, ofuse])\n\n # layers which will have weights set using pretrained VGG16 model\n transfer_layers = [\n 'block1_conv1',\n 'block1_conv2',\n 'block2_conv1',\n 'block2_conv2',\n 'block3_conv1',\n 'block3_conv2',\n 'block3_conv3',\n 'block4_conv1',\n 'block4_conv2',\n 'block4_conv3',\n ]\n vgg16 = VGG16()\n for layer_name in transfer_layers:\n weights = vgg16.get_layer(layer_name).get_weights()\n if layer_name == 'block1_conv1':\n # vgg16 is built for 3 channel RGB input images, \n # so we'll average across the channel axis in the first layer to match our 1 channel input\n weights[0] = weights[0].mean(axis=2, keepdims=True)\n \n model.get_layer(layer_name).set_weights(weights)\n # model.get_layer(layer_name).trainable = False\n\n return model\n" }, { "alpha_fraction": 0.5961751937866211, "alphanum_fraction": 0.6111419200897217, "avg_line_length": 31.513513565063477, "blob_id": "1462ab7941179d9935bf7fb900d662827911359a", "content_id": "f9a135e247ed08e5a820c69132b0dfb19ee58e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 114, "num_lines": 111, "path": "/src/train.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "import os\nfrom functools import partial\nfrom datetime import datetime\n\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau\n\nfrom config import *\nfrom models import unet_2x, LFE, stacked_multi_scale, HED\nfrom losses import focal_loss, balanced_cross_entropy, dice_loss, bce_plus_dice, per_sample_balanced_cross_entropy\nfrom metrics import iou, dice_coefficient\nfrom dataset import load_subset, CustomImageDataGenerator, standardize_batch\n\n\ndef compile_callbacks(\n logs_dir=os.path.join(LOGS_DIR, datetime.now().isoformat(timespec='minutes')), \n checkpoint_dir=os.path.join(CHECKPOINT_DIR, datetime.now().isoformat(timespec='minutes'))\n ):\n if not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n if not os.path.isdir(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n return [\n TensorBoard(\n log_dir=os.path.join(logs_dir),\n histogram_freq=1,\n update_freq='epoch',\n write_graph=False,\n write_images=False,\n profile_batch=0\n ),\n # EarlyStopping(patience=10, verbose=1),\n # ReduceLROnPlateau(\n # monitor='loss',\n # factor=0.1, \n # patience=5, \n # min_lr=0.00001, \n # verbose=1,\n # cooldown=10\n # ),\n ModelCheckpoint(\n os.path.join(checkpoint_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'), \n verbose=1,\n save_freq='epoch',\n save_best_only=False, \n save_weights_only=True\n )\n \n ]\n\n\ndef train(model):\n validation_fraction = 0.05\n batch_size = 8\n subset = load_subset(ELEVATION_TIF_DIR, frac=0.2)\n featurewise_std = subset.std()\n\n files = os.listdir(ELEVATION_TIF_DIR)\n np.random.shuffle(files)\n train_files = files[:int(len(files)*(1-validation_fraction))]\n validation_files = files[int(len(files)*(1-validation_fraction)):]\n\n training_generator = CustomImageDataGenerator(\n ELEVATION_TIF_DIR, \n MASK_TIF_DIR, \n train_files, \n batch_size=batch_size, \n standardize_batch=partial(standardize_batch, featurewise_std), \n rescale_y=1/255,\n n_outputs=5\n )\n\n validation_generator = CustomImageDataGenerator(\n ELEVATION_TIF_DIR, \n MASK_TIF_DIR, \n validation_files, \n batch_size=len(validation_files)*8, \n standardize_batch=partial(standardize_batch, featurewise_std), \n rescale_y=1/255,\n n_outputs=5\n )\n validation_data = validation_generator.__getitem__(0)\n\n training_history = model.fit(\n training_generator,\n steps_per_epoch=len(training_generator),\n epochs=50,\n callbacks=compile_callbacks(),\n shuffle=False,\n validation_data=validation_data\n )\n\n return model, training_history\n\n\nif __name__ == \"__main__\":\n input_img = Input((*IMAGE_SIZE, 1), name='img')\n \n model = HED(input_img)\n model.compile(loss={'o1': balanced_cross_entropy,\n 'o2': balanced_cross_entropy,\n 'o3': balanced_cross_entropy,\n 'o4': balanced_cross_entropy,\n 'ofuse': balanced_cross_entropy,\n },\n metrics={'ofuse': ['accuracy', dice_coefficient]},\n optimizer=Adam(learning_rate=0.001))\n model, history = train(model)" }, { "alpha_fraction": 0.5986911654472351, "alphanum_fraction": 0.6227449774742126, "avg_line_length": 38.26388931274414, "blob_id": "56650dd3821daca9af8520ea094fc0a42749304f", "content_id": "e0ca282e01a8ed549ae0bfb7e3ea969e0a3b6ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5655, "license_type": "no_license", "max_line_length": 138, "num_lines": 144, "path": "/src/losses.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "\nfrom tensorflow.keras import backend as K\nimport tensorflow as tf\n\n\ndef dice_loss(y_true, y_pred):\n intersection = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))\n union = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))\n\n # return shape = (batch,)\n # the +1 terms ensure loss=0 when y_true = 0, or a tile contains no ridges\n return 1 - (intersection + 1) / (union + 1)\n\n\ndef keras_lovasz_hinge(labels, logits):\n return lovasz_hinge(logits, labels, per_image=True, ignore=None)\n\n\ndef lovasz_hinge(logits, labels, per_image=True, ignore=None):\n '''\n from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/tensorflow/lovasz_losses_tf.py\n See The Lovász-Softmax loss... https://arxiv.org/abs/1705.08790\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n '''\n if per_image:\n def treat_image(log_lab):\n log, lab = log_lab\n log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)\n log, lab = flatten_binary_scores(log, lab, ignore)\n return lovasz_hinge_flat(log, lab)\n losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)\n loss = tf.reduce_mean(losses)\n else:\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\n return loss\n\n\ndef lovasz_hinge_flat(logits, labels):\n '''\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n '''\n\n def compute_loss():\n labelsf = tf.cast(labels, logits.dtype)\n signs = 2. * labelsf - 1.\n errors = 1. - logits * tf.stop_gradient(signs)\n errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name='descending_sort')\n gt_sorted = tf.gather(labelsf, perm)\n grad = lovasz_grad(gt_sorted)\n loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name='loss_non_void')\n return loss\n\n # deal with the void prediction case (only void pixels)\n loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),\n lambda: tf.reduce_sum(logits) * 0.,\n compute_loss,\n strict=True,\n name='loss'\n )\n return loss\n\n\ndef flatten_binary_scores(scores, labels, ignore=None):\n '''\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n '''\n scores = tf.reshape(scores, (-1,))\n labels = tf.reshape(labels, (-1,))\n if ignore is None:\n return scores, labels\n valid = tf.not_equal(labels, ignore)\n vscores = tf.boolean_mask(scores, valid, name='valid_scores')\n vlabels = tf.boolean_mask(labels, valid, name='valid_labels')\n return vscores, vlabels\n\n\ndef focal_loss(y_true, y_pred):\n '''\n See \"Focal Loss for Dense Object Detection\" https://arxiv.org/abs/1708.02002\n '''\n gamma=2\n alpha=0.9\n pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))\n pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))\n\n eps = 1e-4\n pt_1 = K.clip(pt_1, eps, 1 - eps)\n pt_0 = K.clip(pt_0, eps, 1 - eps)\n\n return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))\n\n\ndef balanced_cross_entropy(y_true, y_pred):\n '''\n Compute the balanced cross entropy. Good for cases of large class imbalance.\n\n To more heavily penalize false negatives, set alpha > 0.5.\n To decrease false positivees, set alpha < 0.5\n '''\n alpha = 0.92\n # setting false locations to ones_like or zeros_like will result in log(pt_1) or log(1-pt_0) going to zero\n pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))\n pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))\n\n # I've seen this other places. Assume it's to avoid possibly log(0).\n eps = 1e-4\n pt_1 = tf.clip_by_value(pt_1, eps, 1 - eps)\n pt_0 = tf.clip_by_value(pt_0, eps, 1 - eps)\n\n # reduce_sum eliminates the size=1 channel axis so the result is [batch, width, height]\n return -tf.reduce_sum(alpha * K.log(pt_1), axis=-1) - tf.reduce_sum((1 - alpha) * K.log(1. - pt_0), axis=-1)\n\n\ndef per_sample_balanced_cross_entropy(y_true, y_pred):\n '''\n Compute the balanced cross entropy. Good for cases of large class imbalance.\n\n Weighting is computed per sample/image as alpha = num_non_ridge / num_pixels\n '''\n # setting false locations to ones_like or zeros_like will result in log(pt_1) or log(1-pt_0) going to zero\n pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))\n pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))\n\n # I've seen this other places. Assume it's to avoid possibly log(0).\n eps = 1e-4\n pt_1 = tf.clip_by_value(pt_1, eps, 1 - eps)\n pt_0 = tf.clip_by_value(pt_0, eps, 1 - eps)\n\n # compute per sample weight\n alpha = 1 - tf.reduce_sum(y_true, axis=[1,2,3]) / tf.cast(tf.shape(y_true)[1] * tf.shape(y_true)[2] * tf.shape(y_true)[3], tf.float32)\n\n # reduce_sum eliminates the size=1 channel axis so the result is [batch, width, height]\n return -tf.reduce_sum(alpha * K.log(pt_1), axis=-1) - tf.reduce_sum((1 - alpha) * K.log(1. - pt_0), axis=-1)\n\n\ndef bce_plus_dice(y_true, y_pred):\n return balanced_cross_entropy(y_true, y_pred) + tf.reshape(dice_loss(y_true, y_pred), (-1, 1, 1))" }, { "alpha_fraction": 0.6753782629966736, "alphanum_fraction": 0.7235212922096252, "avg_line_length": 35.29999923706055, "blob_id": "1957ac780f81dba72ce015727c2825104a961c60", "content_id": "500d64373dad9f0f24680275c59bb12cba9e6087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "no_license", "max_line_length": 97, "num_lines": 20, "path": "/src/config.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nTERRAIN_BASE_URL = 'https://s3.amazonaws.com/elevation-tiles-prod/terrarium'\nDEFAULT_ZOOM = 12\n\nCOLORADO_BOUNDS = (-109.00634, 37.02886, -102.09594, 41.0)\n\nELEVATION_PNG_DIR = f'{os.environ[\"RIDGES_ROOT\"]}/data/high_ridge_terrain/Colorado/train/png'\nELEVATION_TIF_DIR = f'{os.environ[\"RIDGES_ROOT\"]}/data/high_ridge_terrain/Colorado/train/tif/sub'\nMASK_TIF_DIR = f'{os.environ[\"RIDGES_ROOT\"]}/data/high_ridge_terrain/Colorado/train/mask/sub'\nLOGS_DIR = f'{os.environ[\"RIDGES_ROOT\"]}/output/logs'\nCHECKPOINT_DIR = f'{os.environ[\"RIDGES_ROOT\"]}/output/checkpoint'\n\nIMAGE_SIZE = (256, 256)\n\n" }, { "alpha_fraction": 0.5428465008735657, "alphanum_fraction": 0.5890461802482605, "avg_line_length": 26.95833396911621, "blob_id": "fe3da7b0d9d1fbd5279169ef2722477213dc4907", "content_id": "815235bb6981359c2e1ebfa46c171ca425fb027c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2684, "license_type": "no_license", "max_line_length": 93, "num_lines": 96, "path": "/src/transforms.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nfrom scipy import signal\nfrom PIL import Image, ImageFilter\n\nGRADIENT_THRESHOLD = 50\nCURVATURE_THRESHOLD = 10e3 \n\n\ndef resize(image, size):\n im = Image.fromarray(image)\n im_new = im.resize(size=size, resample=Image.BILINEAR)\n return np.array(im_new)\n\ndef erode(image):\n im = Image.fromarray(image)\n im_new = im.filter(ImageFilter.MinFilter(3))\n return np.array(im_new)\n\ndef local_maxima(image):\n kernel = np.array([\n [-1, 0, -1],\n [0, 1, 0],\n [-1, 0, -1]\n ])\n kernel = kernel - kernel.mean()\n maxima = signal.convolve2d(image, kernel, boundary='symm', mode='same')\n return maxima - image\n\ndef gradient_x(image):\n kernel = np.array([\n [1, 0, -1],\n [2, 0, -2],\n [1, 0, -1]\n ])\n grad_x = signal.convolve2d(image, kernel, boundary='symm', mode='same')\n return grad_x\n\ndef gradient_y(image):\n kernel = np.array([\n [1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]\n ])\n grad_y = signal.convolve2d(image, kernel, boundary='symm', mode='same')\n return grad_y\n\ndef gradient(image):\n grad_x = gradient_x(image)\n grad_y = gradient_y(image)\n return np.sqrt(grad_x**2 + grad_y**2)\n\ndef curvature(image):\n return gradient(gradient(image))\n\ndef scharr(image):\n kernel = np.array([\n [ -3-3j, 0-10j, +3 -3j],\n [-10+0j, 0+ 0j, +10 +0j],\n [ -3+3j, 0+10j, +3 +3j]\n ]) # Gx + j*Gy\n \n grad = signal.convolve2d(image, kernel, boundary='symm', mode='same')\n return np.absolute(grad), np.angle(grad)\n\ndef scharr_curvature(image):\n kernel = np.array([\n [ -3-3j, 0-10j, +3 -3j],\n [-10+0j, 0+ 0j, +10 +0j],\n [ -3+3j, 0+10j, +3 +3j]\n ]) # Gx + j*Gy\n \n grad = signal.convolve2d(image, kernel, boundary='symm', mode='same')\n curvature = signal.convolve2d(grad, kernel, boundary='symm', mode='same')\n return np.absolute(curvature)\n\ndef ridges(elevation):\n candidate_ridges = []\n total_gradient = gradient(elevation)\n curvature = resize(scharr_curvature(resize(elevation, size=(64, 64))), size=(256, 256))\n\n for size in [(256, 256), (128, 128), (64, 64)]:\n elevation_reduced = resize(elevation, size=size)\n\n grad_x = gradient_x(elevation_reduced)\n curv_x = gradient_x(grad_x > 0)\n\n grad_y = gradient_y(elevation_reduced)\n curv_y = gradient_y(grad_y > 0)\n\n curv_xy = (curv_x<0) + (curv_y<0)\n\n original_size = resize(curv_xy.astype(np.int8), (256, 256))\n candidate_ridges.append(original_size)\n\n candidate_ridges = np.stack(candidate_ridges + [curvature > CURVATURE_THRESHOLD], axis=2)\n return np.all(candidate_ridges, axis=2)" }, { "alpha_fraction": 0.6331058144569397, "alphanum_fraction": 0.6535836458206177, "avg_line_length": 26.904762268066406, "blob_id": "a2198f5fb0d43513e8e3800d353cf9ca1fe3509f", "content_id": "6f1144870d5426f425ec0580dc797e011b4c6ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 77, "num_lines": 21, "path": "/src/metrics.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nimport numpy as np\n\n\ndef iou(y_true, y_pred):\n intersection = tf.reduce_sum(y_true * y_pred)\n union = tf.reduce_sum(tf.cast((y_true + y_pred) >= 1, tf.float64))\n\n return intersection / union\n\n\ndef dice_coefficient(y_true, y_pred):\n intersection = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))\n union = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))\n\n return (intersection + 1) / (union + 1)\n\n\ndef accuracy(y_true, y_pred):\n return tf.math.reduce_sum((y_true == y_pred).astype(float)) / y_true.size" }, { "alpha_fraction": 0.6167545914649963, "alphanum_fraction": 0.6306068897247314, "avg_line_length": 29.299999237060547, "blob_id": "ed1d868e98feeef3081294ff75735ca682cf4601", "content_id": "ca3a011e210849ff084e9741c3e9e1712cd7a2f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 81, "num_lines": 50, "path": "/src/data.py", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "import shutil\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\n\nfrom trowel import utils as tile_utils\nfrom config import TERRAIN_BASE_URL\n\n\ndef terrain_tile_url(lon, lat, zoom):\n (X, Y) = tile_utils.lonlat_to_tile(lon, lat, zoom)\n return os.path.join(TERRAIN_BASE_URL, str(zoom), str(X), str(Y)) + '.png'\n\n\ndef url_to_path(root, url, ending):\n path = urlparse(url).path\n no_ending = path.split('.')[-2]\n [z, y, x] = no_ending.split('/')[-3:]\n destination_file = z + '_' + y + '_' + x + ending\n return os.path.join(root, destination_file)\n\n\ndef download_tile(url, destination=None):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n if destination:\n destination_file = url_to_path(destination, url, '.png')\n with open(destination_file, 'wb') as out_file:\n shutil.copyfileobj(r.raw, out_file)\n else:\n return Image.open(r.raw)\n else:\n print('problem downloading file: {}'.format(url))\n\n\ndef decode_elevation(data):\n '''\n decode the elevation from the RGB channels into a single grayscale array\n see https://mapzen.com/documentation/terrain-tiles/formats/\n\n Arguments:\n data - an NxNx3 numpy array where the last dimension contains the RGB layers,\n i.e. data[:, :, 0] are the red values, etc.\n\n Returns:\n an NxN numpy float array containing elevation in meters\n '''\n return (data[:, :, 0] * 256 + data[:, :, 1] + data[:, :, 2] / 256) - 32768\n\n" }, { "alpha_fraction": 0.7623762488365173, "alphanum_fraction": 0.7623762488365173, "avg_line_length": 56.130435943603516, "blob_id": "044e6db3d140d609e2649d60138545bf2de32a0b", "content_id": "aae963a44188bdbcfb2178ae3f25613a3a238d3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 910, "num_lines": 23, "path": "/README.md", "repo_name": "mikeskaug/ridges", "src_encoding": "UTF-8", "text": "# Ridges\n\nThe goal is to develop a deep learning model for identifying ridges in topography. The approach is to develop a deep semantic segmentation model which takes a [digital elevation model](https://www.wikiwand.com/en/Digital_elevation_model) and labels which pixels correspond to ridge lines and which do not. There are two reasons why using a deep learning model for finding ridges might make sense. Number one is that traditional image processing approaches to this problem, or at least the ones I've tried, don't work very well. The second is that it's difficult to define what ridge is. I've failed to come up with a precise definition of \"ridge\" which matches my own intuition about what is a ridge and what is not. And it is in cases like this of ill defined categories where deep learning is most valuable. For example, it's impossible to define a set of rules that distinguish an image of a cat from a dog.\n\n![overview diagram](./overview_diagram.png)\n\n### Install requirements\n\n $ pip install -r requirements.txt\n $ export RIDGES_ROOT=/path/to/this/repo\n\n### Download training data\n\n from dataset import *\n from config import COLORADO_BOUNDS\n\n harvest_tiles(COLORADO_BOUNDS)\n png_to_tif(png_dir, tif_dir)\n create_masks(tif_dir, mask_dir)\n\n### Train model\n\n $ python train.py" } ]
9
james-ngo/my-spotify-sound
https://github.com/james-ngo/my-spotify-sound
aa70d42ab7fd64098c3393aceed0466ef5da7787
a21a84d4a17a69329853be02a323cdc7d94c3d3a
e5187156d56d450bc5512a0d052567bbb501cb2f
refs/heads/master
2022-12-17T08:42:50.419578
2020-09-23T19:42:35
2020-09-23T19:42:35
292,253,383
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6060048937797546, "alphanum_fraction": 0.6164215803146362, "avg_line_length": 37.85714340209961, "blob_id": "ad9cbfbe2f10d63d2efd44d27ce0b1c2b9303e41", "content_id": "ddd2dab877cfd07a760f98aabf80cd123636ed91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 188, "num_lines": 42, "path": "/scripts/get_recommended.py", "repo_name": "james-ngo/my-spotify-sound", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport sys\nfrom pandas import json_normalize\n\naccess_token = sys.argv[1]\nartist_id_pairs = []\nrec_artists = {\"artists\": []}\ngenres = {}\ntime_range = sys.argv[2]\nif time_range == \"undefined\":\n time_range = \"long_term\"\nurl = \"https://api.spotify.com/v1/me/top/artists?time_range=\" + time_range + \"&limit=50\"\nresponse = requests.get(\n url,\n headers = {\"Authorization\": \"Bearer \" + access_token}\n)\nfor artist in json_normalize(response.json())[\"items\"][0]:\n artist_id_pairs.append((artist[\"name\"], artist[\"id\"]))\n for genre in artist[\"genres\"]:\n if not genre in genres.keys():\n genres[genre] = 1\n else:\n genres[genre] += 1\nres = []\nfor key, val in genres.items():\n res.append([key] + [val])\nres.sort(key=lambda x: x[1], reverse=True)\nres = res[:10]\ntop_genres = [x[0] for x in res]\nfor artist_name, id in artist_id_pairs:\n response = requests.get(\n \"https://api.spotify.com/v1/artists/%s/related-artists\" % (id),\n headers = {\"Authorization\": \"Bearer \" + access_token}\n )\n for artist in json_normalize(response.json())[\"artists\"][0]:\n if not artist[\"id\"] in [x[1] for x in artist_id_pairs] and not artist[\"id\"] in [x[\"id\"] for x in rec_artists[\"artists\"]] and len(list(set(artist[\"genres\"]) & set(top_genres))) > 3:\n artist[\"common_genres\"] = list(set(artist[\"genres\"]) & set(top_genres))\n artist = {key: artist[key] for key in [\"id\", \"images\", \"name\", \"common_genres\"]}\n artist[\"images\"] = artist[\"images\"][0]\n rec_artists[\"artists\"].append(artist)\nprint(json.dumps(rec_artists))\n" }, { "alpha_fraction": 0.8012048006057739, "alphanum_fraction": 0.8012048006057739, "avg_line_length": 32.20000076293945, "blob_id": "8e9c4af29ff5a685e1cf96cbb57430b27599a2b6", "content_id": "8ad441bf213a803d57c39b4d0572cb153ac595e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 166, "license_type": "no_license", "max_line_length": 105, "num_lines": 5, "path": "/README.md", "repo_name": "james-ngo/my-spotify-sound", "src_encoding": "UTF-8", "text": "# My Spotify Sound\n\nA web application to view most listened to Spotify artists and genres and recommendations based on those.\n\nhttps://my-spotify-sound.herokuapp.com\n" } ]
2
viraatdas/Deep-tic-tac-toe
https://github.com/viraatdas/Deep-tic-tac-toe
0097e6e9c92f835251457b5a1cb5befb41e9b29f
caa1653664c2b9564ce6d7adc4e99c46c4db88b9
427569a0e3e2358b987c35005919f5ea075c8095
refs/heads/master
2020-12-01T15:42:38.272203
2019-12-29T00:57:20
2019-12-29T00:57:20
230,685,639
0
0
MIT
2019-12-29T00:54:36
2019-10-05T10:14:11
2018-08-12T00:55:23
null
[ { "alpha_fraction": 0.41372862458229065, "alphanum_fraction": 0.43242520093917847, "avg_line_length": 27.25, "blob_id": "d1b33183dceb4a953822ff503a8e35b8dfe40598", "content_id": "b5eae5057139bc1f87727b6a30f5b8323ccbd3e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3744, "license_type": "permissive", "max_line_length": 72, "num_lines": 128, "path": "/Games.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "import numpy as np\r\n\r\nclass Game:\r\n\r\n def __init__(self,num_rows, num_cols, action_space, obs_space):\r\n self.num_rows = num_rows\r\n self.num_cols = num_cols\r\n self.action_space = action_space\r\n self.obs_space = obs_space\r\n \r\n def board2array(self):\r\n new_board = np.zeros(self.obs_space)\r\n for row in range(self.num_rows):\r\n for col in range(self.num_cols):\r\n val = self.board[row][col]\r\n new_board[(3 * self.num_cols) * row + 3 * col + val] = 1\r\n return(new_board)\r\n\r\nclass TicTacToe(Game):\r\n\r\n def __init__(self):\r\n self.board = np.zeros((3,3),dtype=\"int\")\r\n self.terminal = False\r\n super().__init__(3,3,9,27)\r\n\r\n def restart(self):\r\n self.board = np.zeros((3,3),dtype=\"int\")\r\n self.terminal = False\r\n\r\n def is_valid(self, action):\r\n if self.board[int(np.floor(action / 3))][action % 3] != 0:\r\n return False\r\n else:\r\n return True\r\n\r\n def invert_board(self):\r\n for row in range(3):\r\n for col in range(3):\r\n if(self.board[row][col] == 1):\r\n self.board[row][col] = 2\r\n elif(self.board[row][col] == 2):\r\n self.board[row][col] = 1\r\n\r\n def step(self,action):\r\n \"\"\"\r\n PARAMS: a valid action (int 0 to 8)\r\n RETURN: reward (-1,0,1)\r\n\r\n self.board is updated in the process\r\n self.terminal is updated in the process\r\n \"\"\"\r\n\r\n # insert\r\n row_index = int(np.floor(action / 3))\r\n col_index = action % 3\r\n self.board[row_index][col_index] = 1\r\n\r\n # undecided\r\n terminal = 1\r\n\r\n # to check for 3 in a row horizontal\r\n for row in range(3):\r\n for col in range(3):\r\n if(self.board[row][col] != 1):\r\n terminal = 0\r\n if(terminal == 1):\r\n self.terminal = True\r\n return +1\r\n else:\r\n terminal = 1\r\n\r\n # to check for 3 in a row vertical\r\n for col in range(3):\r\n for row in range(3):\r\n if(self.board[row][col] != 1):\r\n terminal = 0\r\n if(terminal == 1):\r\n self.terminal = True\r\n return +1\r\n else:\r\n terminal = 1\r\n\r\n # diagonal top-left to bottom-right\r\n for diag in range(3):\r\n if(self.board[diag][diag] != 1):\r\n terminal = 0\r\n if(terminal == 1):\r\n self.terminal = True\r\n return +1\r\n else:\r\n terminal = 1\r\n\r\n # diagonal bottom-left to top-right\r\n for diag in range(3):\r\n if(self.board[2 - diag][diag] != 1):\r\n terminal = 0\r\n if(terminal == 1):\r\n self.terminal = True\r\n return +1\r\n else:\r\n terminal = 1\r\n\r\n # checks if board is filled completely\r\n for row in range(3):\r\n for col in range(3):\r\n if(self.board[row][col] == 0):\r\n terminal = 0\r\n break\r\n if terminal == 1:\r\n self.terminal = True\r\n \r\n return 0\r\n\r\n def render(self):\r\n \"\"\"\r\n print to screen the full board nicely\r\n \"\"\"\r\n \r\n for i in range(3):\r\n print('\\n|', end=\"\")\r\n for j in range(3):\r\n if self.board[i][j] == 1:\r\n print(' X |' , end=\"\")\r\n elif self.board[i][j] == 0:\r\n print(' |' , end=\"\")\r\n else:\r\n print(' O |' , end=\"\")\r\n print('\\n')\r\n" }, { "alpha_fraction": 0.47517919540405273, "alphanum_fraction": 0.4863286316394806, "avg_line_length": 29.872880935668945, "blob_id": "d46f3d8c976b3c33f5c07856b42cc79f7007a992", "content_id": "7089fb6a81e5038c525567fb636194c5efc7092d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3767, "license_type": "permissive", "max_line_length": 72, "num_lines": 118, "path": "/gui.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom main import *\r\nimport const\r\n\r\nclass tile(Button):\r\n def __init__(self,id,mainframe):\r\n self.name = str(id)\r\n\r\n super().__init__(mainframe,\r\n text = \" \",\r\n height = 3,\r\n width = 7,\r\n borderwidth = 0,\r\n bg = \"Lightgray\",\r\n font= (\"Roboto\", 22),\r\n command = lambda: self.make_move(id))\r\n def update(self,player):\r\n # update the tile according to the player who is playing\r\n if player == 1:\r\n self[\"text\"] = \"X\"\r\n else:\r\n self[\"text\"] = \"O\"\r\n\r\n def make_move(self,action):\r\n global current_node\r\n global buttons\r\n if const.game.is_valid(action) == True:\r\n self.update(1)\r\n const.game.invert_board()\r\n r = - const.game.step(action)\r\n const.game.invert_board()\r\n current_node = const.mct[current_node.Child_nodes[action]]\r\n player = 0\r\n \r\n if const.game.terminal == False:\r\n #player 2 plays\r\n a = choose_move(current_node)\r\n buttons[a].update(0)\r\n r = const.game.step(a)\r\n current_node = const.mct[current_node.Child_nodes[a]]\r\n player = 1\r\n \r\n if const.game.terminal == True:\r\n global bottom_label\r\n for b in buttons:\r\n b[\"state\"] = \"disabled\"\r\n if r == 0:\r\n bottom_label[\"text\"] = \"Tie\"\r\n elif r == -1:\r\n bottom_label[\"text\"] = \"You won\"\r\n else:\r\n bottom_label[\"text\"] = \"You lost\"\r\n\r\ndef generate_buttons(num_rows, num_cols,mainframe):\r\n # generate as many buttons as the number of tiles in the board\r\n buttons = []\r\n for r in range(num_rows):\r\n for c in range(num_cols):\r\n new_button = tile(r*num_cols + c,mainframe)\r\n buttons.append(new_button)\r\n new_button.grid(row = r, column = c, padx=(1,1), pady=(1,1))\r\n return buttons\r\n\r\ndef restart():\r\n # restart the game\r\n global buttons\r\n global player\r\n global current_node\r\n player = random.randint(0,1) # choose player\r\n const.game.restart() # empty board\r\n current_node = const.mct[0] # root of the tree is current node\r\n\r\n if buttons != []:\r\n for b in buttons:\r\n b[\"state\"] = \"normal\"\r\n b[\"text\"] = \" \"\r\n global bottom_label\r\n bottom_label[\"text\"] = \"Make your move!\"\r\n\r\n# initialize\r\nbuttons = []\r\nrestart()\r\n\r\n# display\r\nroot = Tk()\r\n# root.attributes(\"-fullscreen\", True) -- this is not a nice fullscreen\r\ntopframe = Frame(root)\r\ntopframe.pack()\r\nmainframe = Frame(root)\r\nmainframe.pack()\r\nbottomframe = Frame(root)\r\nbottomframe.pack()\r\n\r\ntitle = Label(topframe,\r\n text=\"Tic-Tac-Toe Zero\",\r\n font= (\"Roboto\", 30))\r\ntitle.pack(pady = (32,32))\r\nbuttons = generate_buttons(3,3,mainframe)\r\nbottom_label = Label(bottomframe,\r\n text = \"make your move!\",\r\n font = (\"Roboto\", 14))\r\nbottom_label.pack(pady = (32,32))\r\nrestart_button = Button(bottomframe,\r\n text = \"Restart\",\r\n bg = \"DarkGray\",\r\n borderwidth = 0,\r\n font = (\"Roboto\",14),\r\n command = restart)\r\nrestart_button.pack(pady = (0,32))\r\n\r\n#choose move\r\nif player == 0:\r\n #if player 1 not random\r\n a = choose_move(current_node)\r\n buttons[a].update(player)\r\n r = const.game.step(a)\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.583190381526947, "alphanum_fraction": 0.583190381526947, "avg_line_length": 27.149999618530273, "blob_id": "041b80426f56408a4e7a87563177daaf2a280c01", "content_id": "d4a0ad12e894b32021abc021743cfbda51c5a270", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "permissive", "max_line_length": 65, "num_lines": 20, "path": "/File_storage.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "import os\r\nimport pickle\r\n\r\ndef save_mct(mct):\r\n mct_filename = \"mct.txt\"\r\n with open(mct_filename, \"wb\") as fp: #Pickling\r\n pickle.dump(mct, fp)\r\n print(\"Monte Carlo Tree saved correctly\")\r\n\r\ndef load_mct():\r\n mct_filename = \"mct.txt\"\r\n if os.path.isfile(mct_filename):\r\n # load existing file\r\n print(\"Found existing Monte Carlo Tree file. Opening it\")\r\n with open(mct_filename, \"rb\") as fp: # Unpickling\r\n mct = pickle.load(fp)\r\n else:\r\n print(\"Creating new Monte Carlo Tree.\")\r\n mct = []\r\n return mct\r\n" }, { "alpha_fraction": 0.48363161087036133, "alphanum_fraction": 0.5080750584602356, "avg_line_length": 33.24615478515625, "blob_id": "efb21420aa755c76df49e4e2cfe080bea13aa1d3", "content_id": "ea00cebe4ec39013ffa684455e6280781a931753", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4582, "license_type": "permissive", "max_line_length": 88, "num_lines": 130, "path": "/nn.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "\"\"\"\r\nNeural network\r\nMade by Lorenzo Mambretti\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nprint(\"tensorflow: \",tf.__version__)\r\nimport numpy as np\r\nimport random\r\nimport const\r\n\r\nclass NN:\r\n \"\"\" \r\n INPUT LAYER\r\n 27 neurons\r\n \"\"\"\r\n x = tf.placeholder(tf.float32,[None, 27], name='x')\r\n \r\n \"\"\"\r\n HIDDEN LAYER 1\r\n 21 neurons\r\n tanh\r\n \"\"\"\r\n with tf.name_scope('Hidden_layer_1') as scope:\r\n W1 = tf.Variable(tf.random_uniform([27,27], minval = -1, maxval = 1), name='W1')\r\n b1 = tf.Variable(tf.random_uniform([27], minval = -1, maxval = 1), name='b1')\r\n h1 = tf.tanh(tf.matmul(x, W1) + b1)\r\n \"\"\"\r\n HIDDEN LAYER 2\r\n 15 neurons\r\n tanh\r\n \"\"\"\r\n with tf.name_scope('Hidden_layer_2') as scope:\r\n W2 = tf.Variable(tf.random_uniform([27,21], minval = -1, maxval = 1), name='W2')\r\n b2 = tf.Variable(tf.random_uniform([21], minval = -1, maxval = 1), name='b2')\r\n h2 = tf.tanh(tf.matmul(h1, W2) + b2)\r\n \"\"\"\r\n HIDDEN LAYER 3\r\n 15 neurons\r\n tanh\r\n \"\"\"\r\n with tf.name_scope('Hidden_layer_3') as scope:\r\n W3 = tf.Variable(tf.random_uniform([21,15], minval = -1, maxval = 1), name='W3')\r\n b3 = tf.Variable(tf.random_uniform([15], minval = -1, maxval = 1), name='b3')\r\n h3 = tf.tanh(tf.matmul(h2, W3) + b3)\r\n \"\"\"\r\n OUTPUT LAYER\r\n 9 neurons\r\n tanh\r\n \"\"\"\r\n with tf.name_scope('Output_layer') as scope:\r\n W4 = tf.Variable(tf.random_uniform([15,9], minval = -1, maxval = 1))\r\n b4 = tf.Variable(tf.random_uniform([9], minval = -1, maxval = 1))\r\n y_ = tf.tanh(tf.matmul(h3, W4) + b4)\r\n\r\n y = tf.placeholder(tf.float32,[None, 9], name=\"y\")\r\n\r\n \"\"\"\r\n loss = mean squared error\r\n optimizer: adam\r\n or try the fastai library optimizers\r\n \"\"\"\r\n loss = tf.losses.mean_squared_error(y_,y)\r\n\r\n def __init__(self, lr = 0.00025, batch_size = 64):\r\n self.batch_size = batch_size\r\n self.train_step = tf.train.AdamOptimizer(lr).minimize(self.loss)\r\n \r\n # summaries\r\n tf.summary.scalar('loss', self.loss)\r\n self.summaries = tf.summary.merge_all()\r\n\r\n # start training session\r\n self.sess = tf.InteractiveSession()\r\n self.train_writer = tf.summary.FileWriter(const.cwd, self.sess.graph)\r\n tf.global_variables_initializer().run()\r\n self.training_mode = False\r\n\r\n def train(self, mct, iterations, training_steps):\r\n\r\n self.training_mode = True\r\n\r\n # create batches\r\n input_batch = np.zeros((self.batch_size, 27))\r\n output_batch = np.zeros((self.batch_size, 9))\r\n action_matrix = np.zeros(9, dtype=\"int\")\r\n \r\n for i in range(iterations):\r\n seed = random.randint(0, len(mct) - self.batch_size - 1)\r\n for b in range(self.batch_size):\r\n\r\n if not mct[seed + b].Child_nodes:\r\n # this node is not useful for training if it's not visited\r\n # don't count it and advance by 1 in the list\r\n b = b - 1\r\n # generate new point from where to look in the list\r\n seed = random.randint(0, len(mct) - self.batch_size - 1)\r\n \r\n else: \r\n input_batch[b] = mct[seed + b].board\r\n for a in range(9):\r\n if mct[seed + b].Child_nodes[a] != None:\r\n action_matrix[a] = mct[mct[seed + b].Child_nodes[a]].Q()\r\n else:\r\n action_matrix[a] = -1\r\n output_batch[b] = action_matrix\r\n\r\n for j in range(training_steps):\r\n summary, _ = self.sess.run([self.summaries, self.train_step],\r\n feed_dict={ self.x: input_batch,\r\n self.y: output_batch})\r\n self.train_writer.add_summary(summary, i)\r\n print(\"loss: \",self.sess.run(self.loss, feed_dict={self.x: input_batch,\r\n self.y: output_batch}))\r\n\r\n def run(self,input_data):\r\n \"\"\"\r\n PARAMS\r\n input_data a 27d representation of a single board\r\n\r\n RETURN\r\n v a 9d float array with the q values of all the actions\r\n \"\"\"\r\n\r\n if self.training_mode == True:\r\n v = self.sess.run(self.y_, feed_dict={ self.x: [input_data]})\r\n else:\r\n v = np.zeros(9,dtype=int)\r\n return v\r\n" }, { "alpha_fraction": 0.5780822038650513, "alphanum_fraction": 0.5835616588592529, "avg_line_length": 16.894737243652344, "blob_id": "7922535d2c991b4cf46cab2979ada6fe7d0f478f", "content_id": "81d59644fe8d02c72e761f3b19995c5bdbbe38fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "permissive", "max_line_length": 33, "num_lines": 19, "path": "/const.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "import os\r\nfrom Games import TicTacToe\r\nfrom File_storage import *\r\n\r\nEPSILON = 0.1\r\nSANITY_CHECK = True\r\ncwd = os.getcwd()\r\ncwd = cwd + '\\\\tensorflow_logs'\r\n\r\ndef init():\r\n # create game\r\n global game\r\n game = TicTacToe()\r\n\r\n # initialize Monte Carlo tree\r\n global mct\r\n mct = load_mct()\r\n if mct == []:\r\n mct.append(Node(game))\r\n \r\n" }, { "alpha_fraction": 0.48233237862586975, "alphanum_fraction": 0.5035603642463684, "avg_line_length": 34.52744674682617, "blob_id": "ec398dd2b78ba86e801e4edfa07ae644e650e4f4", "content_id": "5768dac0eb9b1b0549ec9d7dd258a91b33cf61fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14886, "license_type": "permissive", "max_line_length": 146, "num_lines": 419, "path": "/dd_ttt.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "\"\"\"\nSelf-learning Tic Tac Toe\nMade by Lorenzo Mambretti and Hariharan Sezhiyan\n\"\"\"\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport datetime\n\nclass State:\n board = np.zeros((3,3))\n terminal = False\n\ndef is_valid(action, state):\n if state.board[int(np.floor(action / 3))][action % 3] != 0:\n return False\n else:\n return True\n\ndef step(state, action):\n\n # insert\n state_ = State()\n state_.board = np.copy(state.board)\n row_index = int(np.floor(action / 3))\n col_index = action % 3\n state_.board[row_index][col_index] = 1\n\n # undecided\n terminal = 1\n\n # to check for 3 in a row horizontal\n for row in range(3):\n for col in range(3):\n if(state_.board[row][col] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # to check for 3 in a row vertical\n for col in range(3):\n for row in range(3):\n if(state_.board[row][col] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # diagonal top-left to bottom-right\n for diag in range(3):\n if(state_.board[diag][diag] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # diagonal bottom-left to top-right\n for diag in range(3):\n if(state_.board[2 - diag][diag] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # checks if board is filled completely\n for row in range(3):\n for col in range(3):\n if(state_.board[row][col] == 0):\n terminal = 0\n break\n if terminal == 1:\n state_.terminal = True\n\n return 0, state_\n\ndef save(W1, W2, B1, B2):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n filename = \"weights \"+ str(st)+\".npz\"\n np.savez(filename, W1, W2, B1, B2)\n print(\"The file has beeen saved successfully\")\n\ndef load():\n npzfile = np.load(\"weights.npz\")\n W1 = np.reshape(npzfile['arr_0'], (27, 18))\n W2 = np.reshape(npzfile['arr_1'], (18,9))\n b1 = np.reshape(npzfile['arr_2'], (18))\n b2 = np.reshape(npzfile['arr_3'], (9))\n return w1, w2, b1, b2\n\n \ndef invert_board(state):\n state_ = State()\n state_.board = np.copy(state.board)\n state_.terminal = state.terminal\n for row in range(3):\n for col in range(3):\n if(state.board[row][col] == 1):\n state_.board[row][col] = 2\n elif(state.board[row][col] == 2):\n state_.board[row][col] = 1\n return state_\n\ndef play_game():\n while(True):\n start_nb = input(\"If you would like to move first, enter 1. Otherwise, enter 2. \")\n start = int(start_nb)\n state = State()\n state.board = np.zeros((3,3))\n\n while not state.terminal:\n if start == 1:\n action = int(input(\"Please enter your move: \"))\n while(is_valid(action, state) == False):\n action = int(input(\"Please enter a correct move: \"))\n start = 0\n r, state = step(state, action)\n else:\n state = invert_board(state)\n action = player1.extract_policy(state)\n start = 1\n r, state = step(state, action)\n r = -r\n state = invert_board(state)\n\n print(state.board)\n \n if r == 0:\n print (\"Tie\")\n elif r == 1:\n print (\"You won\")\n else:\n print (\"You lost\")\n\ndef convert_state_representation(state):\n new_board = np.zeros(27)\n for row in range(3):\n for col in range(3):\n if(state[row][col] == 0):\n new_board[9 * row + 3 * col] = 1\n elif(state[row][col] == 1):\n new_board[9 * row + 3 * col + 1] = 1\n else:\n new_board[9 * row + 3 * col + 2] = 1\n\n return(new_board)\n\nclass DDQN(object):\n\n def __init__(self):\n self.x = tf.placeholder(tf.float32, [None, 27], name='x')\n self.x_ = tf.placeholder(tf.float32, [None, 27], name='x_')\n\n # for testing against random play\n self.tie_rate_value = 0.0\n self.win_rate_value = 0.0\n self.loss_rate_value = 0.0\n\n xavier = tf.contrib.layers.xavier_initializer(uniform=True,seed=None,dtype=tf.float32)\n\n # Q learner\n with tf.name_scope('Q-learner') as scope:\n with tf.name_scope('hidden_layer') as scope:\n self.W1 = tf.Variable(xavier([27, 18]))\n self.b1 = tf.Variable(xavier([18]))\n self.h1 = tf.tanh(tf.matmul(self.x, self.W1) + self.b1)\n self.h1_alt = tf.tanh(tf.matmul(self.x_, self.W1) + self.b1)\n with tf.name_scope('output_layer') as scope:\n self.W2 = tf.Variable(xavier([18,9]))\n self.b2 = tf.Variable(xavier([9]))\n self.y = tf.tanh(tf.matmul(self.h1, self.W2) + self.b2)\n self.y_alt = tf.stop_gradient(tf.tanh(tf.matmul(self.h1_alt, self.W2) + self.b2))\n self.action_t = tf.placeholder(tf.int32, [None, 2])\n self.q_learner = tf.gather_nd(self.y, self.action_t)\n\n # Q target\n with tf.name_scope('Q-target') as scope:\n with tf.name_scope('hidden_layer') as scope:\n self.W1_old = tf.placeholder(tf.float32, [27, 18], name = 'W1_old')\n self.b1_old = tf.placeholder(tf.float32, [18], name = 'b1_old')\n self.h1_old = tf.tanh(tf.matmul(self.x_, self.W1_old) + self.b1_old, name ='h1')\n with tf.name_scope('output_layer') as scope:\n self.W2_old =tf.placeholder(tf.float32, [18, 9], name='W2_old')\n self.b2_old =tf.placeholder(tf.float32, [9], name='b2_old')\n self.y_old = tf.tanh(tf.matmul(self.h1_old, self.W2_old) + self.b2_old, name='y_old')\n\n self.l_done = tf.placeholder(tf.bool, [None], name='done')\n self.reward = tf.placeholder(tf.float32, [None], name='reward')\n self.gamma = tf.constant(0.99, name='gamma')\n self.qt_best_action = tf.argmax(self.y_alt, axis = 1, name='qt_best_action')\n self.qt_selected_action_onehot = tf.one_hot(indices = self.qt_best_action, depth = 9)\n self.qt= tf.reduce_sum( tf.multiply( self.y_old, self.qt_selected_action_onehot ) , reduction_indices=[1,] )\n self.q_target = tf.where(self.l_done, self.reward, self.reward + (self.gamma * self.qt), name='selected_max_qt')\n\n self.loss = tf.losses.mean_squared_error(self.q_target, self.q_learner)\n self.tie_rate = tf.placeholder(tf.float32, name='tie_rate')\n self.win_rate = tf.placeholder(tf.float32, name='win_rate')\n self.loss_rate = tf.placeholder(tf.float32, name='loss_rate')\n self.train_step = tf.train.RMSPropOptimizer(0.00020, momentum=0.95, use_locking=False, centered=False, name='RMSProp').minimize(self.loss)\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('tie_rate', self.tie_rate)\n tf.summary.scalar('win_rate', self.win_rate)\n tf.summary.scalar('loss_rate', self.loss_rate)\n self.merged = tf.summary.merge_all()\n \n tf.global_variables_initializer().run()\n\n def update_old_weights(self):\n self.saved_W1 = self.W1.eval()\n self.saved_W2 = self.W2.eval()\n self.saved_b1 = self.b1.eval()\n self.saved_b2 = self.b2.eval()\n\n def compute_Q_values(self,state):\n # computes associated Q value based on NN function approximator\n q_board = [np.copy(convert_state_representation(state.board))]\n\n #NN forward propogation\n q_values = sess.run(self.y, {self.x: q_board})\n q_values = np.reshape(q_values, 9)\n return (q_values)\n\n def extract_policy(self,state):\n policy = None\n q_values = self.compute_Q_values(state)\n for action in range(9):\n if is_valid(action,state):\n if policy == None:\n policy = action\n best_q = q_values[action]\n else:\n new_q = q_values[action]\n if new_q > best_q:\n policy = action\n best_q = new_q\n return policy\n\n def train(self):\n for _ in range(4):\n # take a random mini_batch\n mini_batch = experience_replay[np.random.choice(experience_replay.shape[0], batch_size), :]\n\n # select state, state_, action, and reward from the mini batch\n state = np.concatenate(mini_batch[:,0]).reshape((batch_size, -1))\n a = np.transpose(np.append([np.arange(batch_size)],[np.array(mini_batch[:,1])], axis = 0))\n r = mini_batch[:,2]\n state_ = np.concatenate(mini_batch[:,3]).reshape((batch_size, -1))\n done = mini_batch[:,4]\n \n # is the list of all rewards within the mini_batch\n summary, _= sess.run([self.merged, self.train_step], { self.x: state,\n self.x_ : state_,\n self.W1_old : self.saved_W1,\n self.W2_old : self.saved_W2,\n self.b1_old : self.saved_b1,\n self.b2_old : self.saved_b2,\n self.l_done : done,\n self.reward : r, \n self.action_t : a,\n self.tie_rate : self.tie_rate_value,\n self.win_rate : self.win_rate_value,\n self.loss_rate : self.loss_rate_value})\n train_writer.add_summary(summary, e)\n\n def random_play_test(self):\n numTests = 100\n numWins = 0\n numLosses = 0\n numTies = 0\n state = State()\n\n for _ in range(numTests):\n state.board = np.zeros((3,3))\n state.terminal = False\n turn = 1\n while not state.terminal:\n if turn == 1:\n action = self.extract_policy(state) # agent action\n r, state = step(state, action)\n turn = 0\n else:\n state = invert_board(state)\n action = np.random.randint(9)\n while(is_valid(action, state) == False):\n action = np.random.randint(9)\n r, state = step(state, action)\n r = -r\n state = invert_board(state)\n turn = 1\n\n if r == 0:\n numTies += 1\n elif r == 1:\n numWins += 1\n else:\n numLosses += 1\n\n self.tie_rate_value = numTies\n self.win_rate_value = numWins\n self.loss_rate_value = numLosses\n\nsess = tf.InteractiveSession()\ntrain_writer = tf.summary.FileWriter('tensorflow_logs', sess.graph)\n\n\n# Global variables\nglobal experience_replay\nglobal batch_size\nglobal e\n\n# Hyperparameters\nbatch_size = 64\nepisodes = 100000\nepsilon_minimum = 0.1\nn0 = 100\nstart_size = 500\nupdate_target_rate = 50\n\n# Create experience_replay\nexperience_replay = np.zeros((0,5))\n\nprint(\"All set. Start playing\")\n\n# Create players\nplayer1 = DDQN()\n#player2 = DDQN() not used yet *** future improvements coming\n\nfor e in range(episodes):\n # print(\"episode \",e)\n state = State()\n if e >= start_size:\n epsilon = max(n0 / (n0 + (e - start_size)), epsilon_minimum)\n else:\n epsilon = 1\n \n if e % 2 == 1:\n # this is player 2's turn\n state = invert_board(state)\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state):\n action = a\n break\n else:\n # take greedy action\n action = player1.extract_policy(state)\n\n r, state = step(state, action)\n state = invert_board(state)\n r = -r \n \n while not state.terminal:\n # this section is player 1's turn\n # select epsilon-greedy action\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state):\n action = a\n break\n else:\n # take greedy action\n action = player1.extract_policy(state)\n\n r, state_ = step(state, action)\n\n if not state_.terminal:\n # this is player 2's turn\n state_ = invert_board(state_)\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state_):\n action2 = a\n break\n else:\n # take greedy action\n action2 = player1.extract_policy(state_) # in the future, it will be player2\n\n r, state_ = step(state_, action2)\n state_ = invert_board(state_)\n r = -r \n\n s = convert_state_representation(np.copy(state.board))\n s_ = convert_state_representation(np.copy(state_.board))\n done = state_.terminal\n D = (s, action, r, s_, done)\n experience_replay = np.append(experience_replay, [D], axis = 0)\n state.board = np.copy(state_.board)\n state.terminal = state_.terminal\n\n if e == start_size: print(\"Start Training\")\n if e >= start_size:\n if (e % update_target_rate == 0):\n print(e)\n # here save the W1,W2,b1,B2\n player1.update_old_weights()\n player1.random_play_test()\n\n player1.train()\n\n\nprint(\"Training completed\")\nsave(player1.W1.eval(), player1.W2.eval(), player1.b1.eval(), player1.b2.eval())\nplay_game()\n" }, { "alpha_fraction": 0.5123236775398254, "alphanum_fraction": 0.5267400145530701, "avg_line_length": 28.86602783203125, "blob_id": "99bed0d46dff419d15a1b8fd024ebb7167fac383", "content_id": "d39ee62ec5c004041e8b488f67bce31aa50568db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6451, "license_type": "permissive", "max_line_length": 129, "num_lines": 209, "path": "/main.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "\"\"\"\r\nSelf-learning Tic Tac Toe\r\nMade by Lorenzo Mambretti\r\n\r\nLast Update: 8/10/2018 11:38 AM (Lorenzo)\r\n\"\"\"\r\nimport random\r\nimport numpy as np\r\nimport progressbar\r\nfrom File_storage import *\r\nfrom nn import NN\r\nfrom Games import *\r\nimport const\r\nimport math\r\nimport tensorflow as tf\r\nimport argparse\r\n\r\nclass Node:\r\n global nnet\r\n def __init__(self):\r\n self.N = 0\r\n self.V = 0\r\n self.Child_nodes = []\r\n self.board = const.game.board2array()\r\n \r\n def update(self,r):\r\n self.V = self.V + r\r\n self.N = self.N + 1\r\n\r\n def Q(self):\r\n c_puct = 0.2 #hyperparameter\r\n P = np.max(nnet.run(self.board))\r\n \r\n if self.N == 0:\r\n return c_puct * P * math.sqrt(self.N)/(1 + self.N)\r\n else:\r\n if self.Child_nodes == []:\r\n Q = self.V\r\n else:\r\n Q = ((self.V * self.N) + P)/(self.N + 1)\r\n return Q / self.N\r\n\r\ndef check_new_node(current_node):\r\n \"\"\"\r\n this function check if it is the first time the player visited the current node\r\n if it is the first time, create all the child nodes\r\n and append them in the Monte Carlo Tree (const.mct)\r\n \"\"\"\r\n if current_node.N == 0:\r\n # generate child nodes\r\n for a in range(9):\r\n if const.game.is_valid(a) == True:\r\n current_node.Child_nodes.append(len(const.mct))\r\n const.mct.append(Node())\r\n else:\r\n current_node.Child_nodes.append(None)\r\n\r\n \r\n\r\ndef random_move(current_node):\r\n\r\n check_new_node(current_node) #check if it is a new node\r\n\r\n #random action\r\n a = random.randint(0,const.game.action_space - 1)\r\n while const.game.is_valid(a) == False:\r\n a = (a + 1) % const.game.action_space\r\n return a\r\n\r\ndef choose_move(current_node):\r\n \r\n # if is the first time you visit this node\r\n if current_node.N == 0:\r\n # generate child nodes\r\n for a in range(9):\r\n if const.game.is_valid(a) == True:\r\n current_node.Child_nodes.append(len(const.mct))\r\n const.mct.append(Node())\r\n else:\r\n current_node.Child_nodes.append(None) \r\n\r\n #random action\r\n a = random.randint(0,8)\r\n while const.game.is_valid(a) == False:\r\n a = (a + 1) % 9\r\n return a\r\n\r\n # if you already visited this node\r\n else:\r\n best_a = 0\r\n best_q = -2\r\n for c in current_node.Child_nodes:\r\n if c != None:\r\n if const.mct[c].Q() > best_q:\r\n best_q = const.mct[c].Q()\r\n best_a = current_node.Child_nodes.index(c)\r\n #print(const.mct[c].Q())\r\n #else:\r\n #print(\"None\")\r\n return best_a\r\n\r\ndef simulation(episodes, TRAINING = False):\r\n node_list = [[]]\r\n\r\n # progressbar\r\n bar = progressbar.ProgressBar(maxval=episodes, \\\r\n widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])\r\n bar.start()\r\n \r\n for e in range(episodes):\r\n\r\n if (e + 1) % (episodes/100) == 0:\r\n bar.update(e)\r\n \r\n player = e % 2 # choose player\r\n const.game.restart() # empty board\r\n node_list.clear()\r\n current_node = const.mct[0] # root of the tree is current node\r\n node_list.append([0,player])\r\n\r\n # while state not terminal\r\n while const.game.terminal == False:\r\n #choose move\r\n if player == 0:\r\n #if player 1 not random\r\n a = choose_move(current_node)\r\n r = const.game.step(a)\r\n else:\r\n #if player 2 epsilon-greedy\r\n if random.random() < const.EPSILON:\r\n a = random_move(current_node)\r\n else:\r\n a = choose_move(current_node)\r\n const.game.invert_board()\r\n r = - const.game.step(a)\r\n const.game.invert_board()\r\n\r\n current_node = const.mct[current_node.Child_nodes[a]]\r\n player = (player + 1) % 2\r\n node_list.append([const.mct.index(current_node),player])\r\n #save state in node list\r\n\r\n #update all nodes\r\n for node in node_list:\r\n if node[1] == 0:\r\n const.mct[node[0]].update(-r)\r\n else:\r\n const.mct[node[0]].update(r)\r\n\r\n # train neural network\r\n nnet.train(const.mct, 100, 2)\r\n \r\n bar.finish()\r\n\r\ndef play():\r\n import gui\r\n\r\ndef train():\r\n global nnet\r\n \r\n if const.SANITY_CHECK == True:\r\n if len(const.mct) > 1000:\r\n # sanity check\r\n print(\"Single batch overfit.\")\r\n nnet.train(const.mct, 1, 10000)\r\n # SIMULATION: playing and updating Monte Carlo Tree\r\n print(\"Simulating episodes\")\r\n if len(const.mct) < 30000:\r\n # const.mct is small, make a lot of simulations\r\n print(\"Simulation without neural network\")\r\n simulation(95000)\r\n # TRAINING: neural network is trained while keeping playing\r\n print(\"Neural network training\")\r\n simulation(5000, TRAINING = True)\r\n else:\r\n # TRAINING: neural network is trained on the Monte Carlo Tree\r\n print(\"Neural network training. This will take a while\")\r\n for _ in range(10):\r\n nnet.train(const.mct,10000,2)\r\n print(\"Simulation terminated.\")\r\n # SAVE FILE\r\n try:\r\n saver.save(nnet.sess, \"/tmp/model.ckpt\")\r\n print(\"/tmp/model.ckpt saved correctly.\")\r\n except:\r\n print(\"ERROR: an error has occured while saving the weights. The session will not be available when closing the program\")\r\n\r\n save_mct(const.mct)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n const.init()\r\n\r\n # create neural network\r\n nnet = NN(0.0001, 64)\r\n saver = tf.train.Saver()\r\n try:\r\n saver.restore(nnet.sess, \"/tmp/model.ckpt\")\r\n nnet.training_mode = True\r\n except:\r\n print(\"/tmp/model.ckpt not found. Training new session (nnet.sess)\")\r\n\r\n parser = argparse.ArgumentParser(description='Train or play.')\r\n parser.add_argument('--play', dest='accumulate', action='store_const',\r\n const=train, default=play,\r\n help='play a const.game (default: train)')\r\n\r\n args = parser.parse_args()\r\n print(args.accumulate())\r\n" }, { "alpha_fraction": 0.492044061422348, "alphanum_fraction": 0.516086757183075, "avg_line_length": 32.153621673583984, "blob_id": "b73f574908e20570c974f27aa3a4f6282f79f780", "content_id": "5531aeadd76a0f1f6b54326f55a7eaae87d4cc2c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11438, "license_type": "permissive", "max_line_length": 128, "num_lines": 345, "path": "/ttt.py", "repo_name": "viraatdas/Deep-tic-tac-toe", "src_encoding": "UTF-8", "text": "\"\"\"\nSelf-learning Tic Tac Toe\nMade by Lorenzo Mambretti and Hariharan Sezhiyan\n\n\"\"\"\n\nimport random\nimport numpy as np\nimport tensorflow as tf\n\nclass State:\n board = np.zeros((3,3))\n terminal = False\n\ndef is_valid(action, state):\n if state.board[int(np.floor(action / 3))][action % 3] != 0:\n return False\n else:\n return True\n\ndef step(state, action):\n\n # insert\n state_ = State()\n state_.board = np.copy(state.board)\n row_index = int(np.floor(action / 3))\n col_index = action % 3\n state_.board[row_index][col_index] = 1\n\n # undecided\n terminal = 1\n\n # to check for 3 in a row horizontal\n for row in range(3):\n for col in range(3):\n if(state_.board[row][col] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # to check for 3 in a row vertical\n for col in range(3):\n for row in range(3):\n if(state_.board[row][col] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # diagonal top-left to bottom-right\n for diag in range(3):\n if(state_.board[diag][diag] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # diagonal bottom-left to top-right\n for diag in range(3):\n if(state_.board[2 - diag][diag] != 1):\n terminal = 0\n if(terminal == 1):\n state_.terminal = True\n return +1, state_\n else:\n terminal = 1\n\n # checks if board is filled completely\n for row in range(3):\n for col in range(3):\n if(state_.board[row][col] == 0):\n terminal = 0\n break\n if terminal == 1:\n state_.terminal = True\n\n return 0, state_\n\ndef save(W1, W2, B1, B2):\n np.savez(\"weights.npz\", W1, W2, B1, B2)\n print(\"file weights.txt has beeen updated successfully\")\n\ndef load():\n npzfile = np.load(\"weights.npz\")\n W1 = np.reshape(npzfile['arr_0'], (27, 18))\n W2 = np.reshape(npzfile['arr_1'], (18,9))\n b1 = np.reshape(npzfile['arr_2'], (18))\n b2 = np.reshape(npzfile['arr_3'], (9))\n return w1, w2, b1, b2\n\ndef extract_policy(state):\n policy = None\n q_values = compute_Q_values(state)\n for action in range(9):\n if is_valid(action,state):\n if policy == None:\n policy = action\n best_q = q_values[action]\n else:\n new_q = q_values[action]\n if new_q > best_q:\n policy = action\n best_q = new_q\n return policy\n \ndef invert_board(state):\n state_ = State()\n state_.board = np.copy(state.board)\n state_.terminal = state.terminal\n for row in range(3):\n for col in range(3):\n if(state.board[row][col] == 1):\n state_.board[row][col] = 2\n elif(state.board[row][col] == 2):\n state_.board[row][col] = 1\n\n return state_\n\ndef play_game():\n while(True):\n start_nb = input(\"If you would like to move first, enter 1. Otherwise, enter 2. \")\n start = int(start_nb)\n state = State()\n state.board = np.zeros((3,3))\n\n while not state.terminal:\n if start == 1:\n action = int(input(\"Please enter your move: \"))\n while(is_valid(action, state) == False):\n action = int(input(\"Please enter a correct move: \"))\n start = 0\n r, state = step(state, action)\n else:\n state = invert_board(state)\n action = extract_policy(state)\n start = 1\n r, state = step(state, action)\n r = -r\n state = invert_board(state)\n\n print(state.board)\n \n if r == 0:\n print (\"Tie\")\n elif r == 1:\n print (\"You won\")\n else:\n print (\"You lost\")\n\ndef convert_state_representation(state):\n new_board = np.zeros(27)\n for row in range(3):\n for col in range(3):\n if(state[row][col] == 0):\n new_board[9 * row + 3 * col] = 1\n elif(state[row][col] == 1):\n new_board[9 * row + 3 * col + 1] = 1\n else:\n new_board[9 * row + 3 * col + 2] = 1\n\n return(new_board)\n\ndef compute_Q_values(state):\n # computes associated Q value based on NN function approximator\n q_board = np.zeros((1,27))\n q_board = [np.copy(convert_state_representation(state.board))]\n\n #NN forward propogation\n q_values = sess.run(y, feed_dict = {x: q_board})\n q_values = np.reshape(q_values, 9)\n return (q_values)\n\ndef train(experience_replay, saved_W1, saved_W2, saved_b1, saved_b2):\n # can modify batch size here\n batch_size = 32\n\n # take a random mini_batch\n mini_batch = experience_replay[np.random.choice(experience_replay.shape[0], batch_size), :]\n\n # select state, state_, action, and reward from the mini batch\n state = np.concatenate(mini_batch[:,0]).reshape((batch_size, -1))\n act = np.array(mini_batch[:,1])\n act = np.append([np.arange(batch_size)],[act], axis = 0)\n act = np.transpose(act)\n r = mini_batch[:,2]\n state_ = np.concatenate(mini_batch[:,3]).reshape((batch_size, -1))\n done = mini_batch[:,4]\n \n # is the list of all rewards within the mini_batch\n \n summary, _= sess.run([merged, train_step], feed_dict={ x: state,\n x_old : state_,\n W1_old : saved_W1,\n W2_old : saved_W2,\n b1_old : saved_b1,\n b2_old : saved_b2,\n l_done : done,\n reward : r, \n action_t : act\n })\n train_writer.add_summary(summary)\n\n# Q learner neural network\nwith tf.name_scope('Q-learner') as scope:\n x = tf.placeholder(tf.float32, [None, 27], name='x')\n with tf.name_scope('hidden_layer') as scope:\n W1 = tf.get_variable(\"W1\", shape=[27, 18],\n initializer=tf.contrib.layers.xavier_initializer())\n b1 = tf.get_variable(\"b1\", shape=[18],\n initializer=tf.contrib.layers.xavier_initializer())\n h1 = tf.tanh(tf.matmul(x, W1) + b1)\n with tf.name_scope('output_layer') as scope:\n W2 = tf.get_variable(\"W2\", shape=[18, 9],\n initializer=tf.contrib.layers.xavier_initializer())\n b2 = tf.get_variable(\"b2\", shape=[9],\n initializer=tf.contrib.layers.xavier_initializer())\n y = tf.tanh(tf.matmul(h1, W2) + b2)\n action_t = tf.placeholder(tf.int32, [None, 2])\n\n q_learner = tf.gather_nd(y, action_t)\n\n# Q target neural network\nwith tf.name_scope('Q-target') as scope:\n x_old = tf.placeholder(tf.float32, [None, 27], name='x_old')\n with tf.name_scope('hidden_layer') as scope:\n W1_old = tf.placeholder(tf.float32, [27, 18], name='W1_old')\n b1_old = tf.placeholder(tf.float32, [18], name='b1_old')\n h1_old = tf.tanh(tf.matmul(x_old, W1_old) + b1_old, name='h1')\n with tf.name_scope('output_layer') as scope:\n W2_old =tf.placeholder(tf.float32, [18, 9], name='W2_old')\n b2_old =tf.placeholder(tf.float32, [9], name='b2_old')\n y_old = tf.tanh(tf.matmul(h1_old, W2_old) + b2_old, name='y_old')\n\n l_done = tf.placeholder(tf.bool, [None])\n reward = tf.placeholder(tf.float32, [None])\n gamma = tf.constant(0.99, name='gamma')\n qt = tf.reduce_max(y_old, axis = 1, name='maximum_qt')\n q_target = tf.where(l_done, reward, reward + (gamma * qt), name='selected_max_qt')\n\nwith tf.name_scope('loss') as scope:\n loss = tf.losses.mean_squared_error(q_target, q_learner)\n \n#train_step = tf.train.GradientDescentOptimizer(0.03).minimize(loss)\ntrain_step = tf.train.RMSPropOptimizer(0.00025, momentum=0.95, use_locking=False, centered=False, name='RMSProp').minimize(loss)\n\ntf.summary.scalar('loss', loss)\nmerged = tf.summary.merge_all()\n\nsess = tf.InteractiveSession()\ntrain_writer = tf.summary.FileWriter('tensorflow_logs', sess.graph)\ntf.global_variables_initializer().run()\n\nepisodes = 100000\nn0 = 100.0\nstart_size = 500\nexperience_replay = np.zeros((0,5))\n\nprint(\"All set. Start epoch\")\n\nfor e in range(episodes):\n # print(\"episode \",e)\n state = State()\n if e >= start_size:\n epsilon = max(n0 / (n0 + (e- start_size)), 0.1)\n else: epsilon = 1\n \n if e % 2 == 1:\n # this is player 2's turn\n state = invert_board(state)\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state):\n action = a\n break\n else:\n # take greedy action\n action = extract_policy(state)\n\n r, state = step(state, action)\n state = invert_board(state)\n r = -r \n \n while not state.terminal:\n # this section is player 1's turn\n # select epsilon-greedy action\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state):\n action = a\n break\n else:\n # take greedy action\n action = extract_policy(state)\n\n r, state_ = step(state, action)\n\n if not state_.terminal:\n # this is player 2's turn\n state_ = invert_board(state_)\n if random.random() < epsilon:\n # take random action\n action_pool = np.random.choice(9,9, replace = False)\n for a in action_pool:\n if is_valid(a, state_):\n action2 = a\n break\n else:\n # take greedy action\n action2 = extract_policy(state_)\n\n r, state_ = step(state_, action2)\n state_ = invert_board(state_)\n r = -r \n\n s = convert_state_representation(np.copy(state.board))\n s_ = convert_state_representation(np.copy(state_.board))\n done = state_.terminal\n D = (s, action, r, s_, done)\n experience_replay = np.append(experience_replay, [D], axis = 0)\n state.board = np.copy(state_.board)\n state.terminal = state_.terminal\n\n if e == start_size: print(\"Start Training\")\n if e >= start_size:\n if((e % 50) == 0):\n print(\"Episode:\",e)\n # here save the W1,W2,b1,B2\n saved_W1 = W1.eval()\n saved_W2 = W2.eval()\n saved_b1 = b1.eval()\n saved_b2 = b2.eval()\n train(experience_replay, saved_W1, saved_W2, saved_b1, saved_b2)\nprint(\"Training completed\")\nplay_game()\n" } ]
8
alefaggravo/quackers
https://github.com/alefaggravo/quackers
9faf10820442a4e1a31dcba04a9195685a44e0b0
7e5d93b0075a4b69a6f7e556ec62e9a31f31e1d0
5534ed9eab07060163c6fa6cf0d5e0a377abb22e
refs/heads/master
2023-03-18T17:22:54.414427
2020-07-30T19:41:43
2020-07-30T19:41:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6068655252456665, "alphanum_fraction": 0.6097261905670166, "avg_line_length": 33.627357482910156, "blob_id": "8851733ae78cd42cb9c22f02ba81b2342d90cf60", "content_id": "45ce961947cc218bd221b8782f839a83be7bc899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7341, "license_type": "no_license", "max_line_length": 87, "num_lines": 212, "path": "/quackers/core.py", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport os\nimport random\nfrom copy import deepcopy\nfrom datetime import datetime\n\nimport dotenv\nimport slack\nfrom airtable import Airtable\n\nfrom quackers.data import error_modal, start_modal\nfrom quackers.helpers import fire_and_forget\n\nDEBUG = False\n\nif DEBUG:\n dotenv.load_dotenv(\".env.testing\")\nelse:\n dotenv.load_dotenv(\".env\")\n\nclient = slack.WebClient(token=os.environ[\"BOT_USER_OAUTH_ACCESS_TOKEN\"])\n\nse_students = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Students')\nse_instructors = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Instructors')\nse_questions = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Quackers Questions')\n\nux_students = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Students')\nux_instructors = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Instructors')\nux_questions = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Quackers Questions')\n\nlogger = logging.getLogger('gunicorn.error')\n\n\ndef post_message_to_coaches(user, channel, question, info, client, channel_map):\n logger.info(f'Posting question from {user} to {channel}!')\n ch = channel_map.get_coach_channel(channel)\n message = (\n f\"Received request for help from @{user} with the following info:\\n\\n\"\n f\"Question: {question}\\n\"\n f\"Additional info: {info}\"\n )\n\n client.chat_postMessage(\n channel=ch,\n blocks=[\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message\n }\n }\n ],\n icon_emoji=\":quackers:\"\n )\n\n\ndef post_to_airtable(user_id, slack_username, channel, channel_map, question, info):\n # We want to log both student interactions and instructor interactions.\n # We'll check the student table first (because it's most likely that a\n # student is the one using the system), then check for an instructor.\n # The mapping in main.py is used to select which set of airtable instances\n # we check, mostly just to save time.\n #\n # ...and if we don't get a positive response from AirTable? Send it to\n # Unresolved User.\n\n # make pycharm happy\n person_id = None\n option = None\n\n base = channel_map.get_base(channel).lower() # .lower() == safety check\n\n if base == \"se\":\n airtable_target = se_questions\n search_options = [\n {'table': se_students, 'is_student': True},\n {'table': se_instructors, 'is_student': False},\n ]\n elif base == \"ux\":\n airtable_target = ux_questions\n search_options = [\n {'table': ux_students, 'is_student': True},\n {'table': ux_instructors, 'is_student': False},\n ]\n else:\n raise Exception(f\"No search options found for Airtable base {base}\")\n\n for option in search_options:\n if person := option['table'].search('Slack ID', user_id):\n person_id = person[0]['id']\n break\n\n if not person_id:\n # we didn't find anyone with the right Slack ID in Airtable, so we'll force\n # the next set of checks to return None for each of the questions.\n option = {}\n\n student_id, instructor_id, unresolved_user_id = (\n person_id if option.get(\"is_student\") else None,\n person_id if not option.get(\"is_student\") else None,\n slack_username if not person_id else \"\"\n )\n\n data = {\n 'Question': question,\n 'Additional Info': info,\n 'Channel': channel,\n 'Student': [student_id] if student_id else None,\n 'Instructor': [instructor_id] if instructor_id else None,\n 'Unresolved User': unresolved_user_id if unresolved_user_id else None,\n 'Date': datetime.now().isoformat()\n }\n\n airtable_target.insert(data)\n\n\ndef post_message_to_user(user_id, channel, channel_map, question, emoji_list, client):\n channel = channel_map.get_channel_id(channel)\n client.chat_postEphemeral(\n user=user_id,\n channel=channel,\n text=(\n \"Thanks for reaching out! One of the coaches or facilitators will be\"\n \" with you shortly! :{}: Your question was: {}\".format(\n random.choice(emoji_list), question\n )\n )\n )\n\n\n@fire_and_forget\ndef process_question_followup(data, channel_map, emoji_list):\n # the payload is a dict... as a string.\n data['payload'] = json.loads(data['payload'])\n\n # TODO: add example response from slack\n # slack randomizes the block names. That means the location that the response will\n # be in won't always be the same. We need to pull the ID out of the rest of the\n # response before we go hunting for the data we need.\n # Bonus: every block will have an ID! Just... only one of them will be right.\n channel = None\n original_q = None\n addnl_info_block_id = None\n user_id = None\n\n for block in data['payload']['view']['blocks']:\n if block.get('type') == \"input\":\n addnl_info_block_id = block.get('block_id')\n if block.get('type') == \"section\":\n previous_data = block['text']['text'].split(\"\\n\")\n original_q = previous_data[0][previous_data[0].index(\":\") + 2:]\n channel = previous_data[1][previous_data[1].index(\":\") + 2:]\n if block.get('type') == \"context\":\n user_id = block['elements'][0]['text'].split(':')[2].strip()\n\n dv = data['payload']['view']\n\n additional_info = dv['state']['values'][addnl_info_block_id]['ml_input']['value']\n username = data['payload']['user']['username']\n\n post_message_to_coaches(\n user=username,\n channel=channel,\n question=original_q,\n info=additional_info,\n client=client,\n channel_map=channel_map\n )\n post_to_airtable(\n user_id, username, channel, channel_map, original_q, additional_info\n )\n post_message_to_user(\n user_id=user_id,\n channel=channel,\n channel_map=channel_map,\n question=original_q,\n emoji_list=emoji_list,\n client=client\n )\n\n\ndef process_question(data, channel_map):\n if trigger_id := data.get('trigger_id'):\n # first we need to verify that we're being called in the right place\n if data.get('channel_name') not in channel_map.keys():\n client.views_open(\n trigger_id=trigger_id,\n view=error_modal\n )\n return (\"\", 200)\n\n # copy the modal so that we don't accidentally modify the version in memory.\n # the garbage collector will take care of the copies later.\n start_modal_copy = deepcopy(start_modal)\n # stick the original question they asked and the channel they asked from\n # into the modal so we can retrieve it in the next section\n start_modal_copy['blocks'][0]['text']['text'] = \\\n start_modal['blocks'][0]['text']['text'].format(\n data.get('text'), data.get('channel_name')\n )\n\n start_modal_copy['blocks'][4]['elements'][0]['text'] = \\\n start_modal['blocks'][4]['elements'][0]['text'].format(data.get('user_id'))\n\n client.views_open(\n trigger_id=trigger_id,\n view=start_modal_copy\n )\n # return an empty string as fast as possible per slack docs\n return (\"\", 200)\n" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 17.66666603088379, "blob_id": "21e4c653c7469fb6fe49d74df85af9e8a730ba04", "content_id": "8a0c79a65c0d059b442872308797054364f9b266", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/README.md", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "Gunicorn stuff: /etc/systemd/system\n\nnginx: /etc/nginx/\n\n\n" }, { "alpha_fraction": 0.5704131722450256, "alphanum_fraction": 0.5917370319366455, "avg_line_length": 26.790122985839844, "blob_id": "92cf62c20b09cc76720f44e5ef9177725d3ad83f", "content_id": "d0c34d7f77d981852b8336125af05335a16e98ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 82, "num_lines": 81, "path": "/main.py", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "import logging\n\nfrom flask import Flask, request\n\nfrom quackers.core import process_question, process_question_followup, client\nfrom quackers.helpers import ChannelMap\n\n# *********************************************\n# EDIT HERE\n# *********************************************\n\n# map is in the following format:\n# (channel-to-listen-to, coach-channel, program-this-channel-set-belongs-to)\n\nUX = 'ux'\nSE = 'se'\n\nchannel_map = ChannelMap(slack_conn=client)\n\nchannels = [\n (\"joe-slackbot-testing\", \"joe-slackbot-coaches\", SE),\n # software engineering channels\n (\"se-july-2020\", \"se-july-2020-coaches\", SE),\n (\"se-april-2020\", \"se-april-2020-coaches\", SE),\n (\"se-october-2019\", \"se-q4-staff\", SE),\n (\"se-january-2020\", \"se-q3-staff\", SE),\n # user experience channels\n (\"ux-5\", \"ux-triage-uie\", UX),\n (\"ux-6\", \"ux-triage-uxd\", UX),\n (\"ux-7\", \"ux-triage-uxd\", UX),\n # old maps\n (\"ux-4-indy\", \"ux-triage-uie\", UX),\n (\"ux-4-remote\", \"ux-triage-uie\", UX)\n]\nfor channel in channels:\n channel_map.add_channel(\n listen_to=channel[0], post_to=channel[1], airtable=channel[2]\n )\n\n# for responses returned to the student\nemoji_list = [\n 'party',\n 'thepuff',\n 'carlton',\n 'fire',\n 'spinning',\n 'party-parrot',\n 'heykirbyhey',\n 'capemario'\n]\n# *********************************************\n# DO NOT EDIT BEYOND THIS POINT\n# *********************************************\n\napp = Flask(__name__)\n\nif __name__ != \"__main__\":\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\nelse:\n app.logger.setLevel(logging.INFO)\n\n\[email protected]('/questionfollowup/', methods=['POST'])\ndef questionfollowup():\n with app.app_context():\n process_question_followup(request.form.to_dict(), channel_map, emoji_list)\n # this endpoint spawns another thread to do its dirty work, so we need to\n # return the 200 OK ASAP so that Slack will be happy.\n return (\"\", 200)\n\n\[email protected]('/question/', methods=['POST'])\ndef question():\n with app.app_context():\n return process_question(request.form.to_dict(), channel_map)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8000)\n" }, { "alpha_fraction": 0.6112558841705322, "alphanum_fraction": 0.6164669394493103, "avg_line_length": 30.983333587646484, "blob_id": "457249ba81b82cf26f394a1ff60432ebc3da5ca7", "content_id": "4fa9ba2935ca1365505718e8ec5b4bd212ba127b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1919, "license_type": "no_license", "max_line_length": 94, "num_lines": 60, "path": "/quackers/helpers.py", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "import threading\nimport logging\n\nlogger = logging.getLogger('gunicorn.error')\n\n\n# https://stackoverflow.com/a/59043636\ndef fire_and_forget(f, *args, **kwargs):\n def wrapped(*args, **kwargs):\n threading.Thread(target=f, args=(args), kwargs=kwargs).start()\n\n return wrapped\n\n\nclass ChannelMap(object):\n\n def __init__(self, slack_conn):\n self.client = slack_conn\n self.mapping = {}\n\n def add_channel(self, listen_to: str=None, post_to: str=None, airtable: str=None):\n if not listen_to or not post_to or not airtable:\n raise ValueError(\"Must pass in all three variables!\")\n self.mapping.update({listen_to: {'target': post_to, 'airtable': airtable}})\n logger.info(f\"Registered {listen_to} -> {post_to} for the {airtable.upper()} program\")\n\n def get_coach_channel(self, c):\n result = self.mapping[c]\n if not result:\n raise Exception(\"No matching channel found!\")\n if not result['target'].startswith(\"#\"):\n result = \"#{}\".format(result['target'])\n\n return result\n\n def get_channel_id(self, channel_name):\n # reference: https://github.com/KenzieAcademy/quackers/issues/8\n # https://github.com/KenzieAcademy/quackers/issues/7\n channels = self.client.users_conversations(\n types=\"public_channel,private_channel\"\n ).data['channels']\n for c in channels:\n if c.get('name') == channel_name:\n return c['id']\n logger.error(f'Unable to resolve channel {channel_name}!')\n\n def get_base(self, channel):\n result = self.mapping[channel]\n if not result:\n raise Exception(\"No matching channel found!\")\n return result['airtable']\n\n def get(self, item):\n return self.mapping.get(item)\n\n def keys(self):\n return self.mapping.keys()\n\n def items(self):\n return self.mapping.items()\n" }, { "alpha_fraction": 0.557692289352417, "alphanum_fraction": 0.6431623697280884, "avg_line_length": 20.272727966308594, "blob_id": "673f9ccb78432b74c544e91540afc405f2c62c81", "content_id": "d8a6dc403b39b7ec267dee27312ffa83aa426c2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 468, "license_type": "no_license", "max_line_length": 49, "num_lines": 22, "path": "/pyproject.toml", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"quackers\"\nversion = \"1.0.0\"\ndescription = \"A Q&A slackbot to assist with Q1.\"\nauthors = [\"Joe Kaufeld <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.8\"\nslackclient = \"^2.5.0\"\npython-dotenv = \"^0.10.5\"\nflask = \"^1.1.1\"\nrequests = \"^2.22.0\"\nairtable-python-wrapper = \"^0.12.0\"\ngunicorn = \"^20.0.4\"\n\n[tool.poetry.dev-dependencies]\nblack = \"^19.10b0\"\nipdb = \"^0.12.3\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n" }, { "alpha_fraction": 0.38142549991607666, "alphanum_fraction": 0.39395248889923096, "avg_line_length": 25.609195709228516, "blob_id": "4a87fdf27349270dbf2b0418a807f9a1881c201d", "content_id": "c650108683bfa46272a88f58d9ba6d5088f92f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2318, "license_type": "no_license", "max_line_length": 146, "num_lines": 87, "path": "/quackers/data.py", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "start_modal = {\n \"type\": \"modal\",\n \"title\": {\n \"type\": \"plain_text\",\n \"text\": \"Quackers!\",\n \"emoji\": True\n },\n \"submit\": {\n \"type\": \"plain_text\",\n \"text\": \"Submit\",\n \"emoji\": True\n },\n \"close\": {\n \"type\": \"plain_text\",\n \"text\": \"Cancel\",\n \"emoji\": True\n },\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"The question was: {0}\\nYour channel: {1}\",\n \"emoji\": True\n }\n },\n {\n \"type\": \"divider\"\n },\n {\n \"type\": \"input\",\n \"element\": {\n \"type\": \"plain_text_input\",\n \"action_id\": \"ml_input\",\n \"multiline\": True\n },\n \"label\": {\n \"type\": \"plain_text\",\n \"text\": \"What else should we know about the problem you're facing?\"\n },\n \"hint\": {\n \"type\": \"plain_text\",\n \"text\": \"Any context you can provide will help!\"\n }\n },\n {\n \"type\": \"divider\"\n },\n {\n \"type\": \"context\",\n \"elements\": [\n {\n \"type\": \"mrkdwn\",\n \"text\": \"*NOTE*: Your question won't get sent to the coaches until you click submit!\\nID: {}\"\n }\n ]\n }\n ]\n}\n\nerror_modal = {\n \"type\": \"modal\",\n \"title\": {\n \"type\": \"plain_text\",\n \"text\": \"Hey! Listen! 🌟\",\n \"emoji\": True\n },\n \"close\": {\n \"type\": \"plain_text\",\n \"text\": \"OK\",\n \"emoji\": True\n },\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"I'm not set up to run in this channel; you'll have to call me from your cohort channel. Sorry!\"\n }\n },\n {\n \"type\": \"image\",\n \"image_url\": \"https://gamepedia.cursecdn.com/zelda_gamepedia_en/0/08/OoT3D_Navi_Artwork.png?version=61b243ef9637615abdf7534b17361c7a\",\n \"alt_text\": \"Navi from The Legend of Zelda - a blue glowing orb with fairy wings. Artwork from the Ocarina of Time 3D.\"\n }\n ]\n}\n" }, { "alpha_fraction": 0.5769084692001343, "alphanum_fraction": 0.5818458199501038, "avg_line_length": 30.722890853881836, "blob_id": "f5550b7a4f4577f423fd8c76eb647e47b3c8f91e", "content_id": "e9d4be3a04a4a2d1b5034a29ffd05edc56aed3ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2633, "license_type": "no_license", "max_line_length": 88, "num_lines": 83, "path": "/scripts/add_slack_ids_to_airtable.py", "repo_name": "alefaggravo/quackers", "src_encoding": "UTF-8", "text": "import dotenv\nimport os\nimport slack\nfrom airtable import Airtable\nimport json\n\ndotenv.load_dotenv()\nclient = slack.WebClient(token=os.environ[\"BOT_USER_OAUTH_ACCESS_TOKEN\"])\n\na = Airtable(os.environ.get('SE_AIRTABLE_BASE_ID'), 'Students')\n# a = Airtable(os.environ.get('CT_AIRTABLE_BASE_ID'), 'Students')\n# a = Airtable(os.environ.get('UX_AIRTABLE_BASE_ID'), 'Students')\nstudents = a.get_all()\n\nresult = client.users_list()\nusers = [u for u in result.data['members'] if u['deleted'] is False]\nprocessed_results = [\n [\n u['real_name'], u['profile']['display_name'], u['profile'].get('email'), u['id']\n ] for u in users\n]\n\nfor record in students:\n student_email = record['fields'].get('Email')\n if record['fields'].get('Slack ID'):\n try:\n fname = record['fields']['Name']\n print(f\"Record {fname} is up to date!\")\n except KeyError:\n pass\n if not student_email:\n continue\n for i in processed_results:\n if i[2]:\n i[2] = i[2].lower()\n if student_email.lower() == i[2]:\n try:\n # SE airtable\n print('Updating {}'.format(record['fields']['Name']))\n except KeyError:\n # UX airtable\n print('Updating {}'.format(record['fields']['Name']))\n a.update(record['id'], {'Slack ID': i[3]})\n i.append('PROCESSED')\n\nunprocessed = [u for u in processed_results if len(u) == 4]\nprocessed = [u for u in processed_results if len(u) == 5]\nprint(\"Unprocessed Slack IDs: \", len(unprocessed))\nprint(\"Number of students in Airtable: \", len(students))\nprint(\"Updated Slack IDs: \", len(processed))\n\nstudents = a.get_all()\nno_slack_id = [u for u in students if u['fields'].get('Slack ID') == None]\nif len(no_slack_id) == 0:\n print(\n \"Everyone present and accounted for! All student records in Airtable\"\n \" have a Slack ID.\"\n )\nelse:\n print(\"Found {} students in Airtable with no Slack ID.\".format(len(no_slack_id)))\n print(\"This will require manual intervention.\")\n print()\n print(\"Accounts that need attention:\")\n for i in no_slack_id:\n try:\n print(i['fields']['Name'])\n except KeyError:\n print(i)\n\nprint()\nprint('The full unprocessed results from Slack are found in slack_data.json')\n\nwith open('slack_data.json', 'w') as f:\n data = {'data': []}\n [\n data['data'].append({\n 'Real name': u[0],\n 'Display name': u[1],\n 'Email': u[2],\n 'Slack ID': u[3]\n }) for u in unprocessed\n ]\n f.write(json.dumps(data, indent=2))\n" } ]
7
MehmetUstek/HomeSecurity_VideoProcessing
https://github.com/MehmetUstek/HomeSecurity_VideoProcessing
998b30ae47aff82d4e287a3c6ca28c0e0b426b60
4017f85b3666e6ba6dd9cbd46f9d4438898d2bac
32e686c3be0af2299b7a9e89513f053c7c2020c2
refs/heads/master
2023-06-05T10:44:57.938635
2021-06-15T15:07:03
2021-06-15T15:07:03
377,204,234
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5566316246986389, "alphanum_fraction": 0.609159529209137, "avg_line_length": 34.829410552978516, "blob_id": "770ea11c6b213db6f8ebdd27dac110a577be45fe", "content_id": "cf6aa05385e8873fd5a9e482af5964dd236100db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6094, "license_type": "no_license", "max_line_length": 121, "num_lines": 170, "path": "/humandetect.py", "repo_name": "MehmetUstek/HomeSecurity_VideoProcessing", "src_encoding": "UTF-8", "text": "# import the necessary packages\nimport numpy as np\nimport cv2\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport time\n\n#PIXEL aralıkları\nTEKNE_PIXEL_X1 = 20\nTEKNE_PIXEL_X2 = 250\nTEKNE_PIXEL_Y1 = 37\n# TEKNE_PIXEL_Y2 = 320\nTEKNE_PIXEL_Y2 = 400\nRESIZE_X = 320\nRESIZE_Y = 240\nMAIL = \"mailname\"\nPASS = \"password\"\n\nFRAME_INCLUDED_AREA_X1= 400\nFRAME_INCLUDED_AREA_X2= 1200\nFRAME_INCLUDED_AREA_Y1= 700\nFRAME_INCLUDED_AREA_Y2= 1800\n\n# initialize the HOG descriptor/person detector\nhog = cv2.HOGDescriptor()\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n\ndef sendMail():\n msg = MIMEMultipart() # create a message\n\n # add in the actual person name to the message template\n message = \"Mail test\"\n\n # setup the parameters of the message\n msg['From'] = MAIL\n msg['To'] = \"mailto\"\n msg['Subject'] = \"This is TEST\"\n\n # add in the message body\n msg.attach(MIMEText(message, 'plain'))\n\n # send the message via the server set up earlier.\n s.send_message(msg)\n\n del msg\n#mail\ns = smtplib.SMTP(host='smtp.gmail.com', port=587)\ns.starttls()\ns.login(MAIL, PASS)\n\n\n\ncv2.startWindowThread()\n\n# open webcam video stream\ncap = cv2.VideoCapture('rtsp://usr:pass@ip')\nret, frame1 = cap.read()\nret, frame2 = cap.read()\n\n# frame1 = cv2.resize(frame1, (RESIZE_X, RESIZE_Y))\n# frame2 = cv2.resize(frame2, (RESIZE_X, RESIZE_Y))\nframe1= frame1[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\nframe1 = cv2.resize(frame1, (RESIZE_X, RESIZE_Y))\nframe2= frame2[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\nframe2 = cv2.resize(frame2, (RESIZE_X, RESIZE_Y))\n# the output will be written to output.avi\n\n\ncounter = 0\ntemp_now = 0\nwhile (True):\n ret, frame1 = cap.read()\n # frame1 = frame1[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\n # frame1 = cv2.resize(frame1, (RESIZE_X, RESIZE_Y))\n frame1 = frame1[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\n frame1 = cv2.resize(frame1, (RESIZE_X, RESIZE_Y))\n # Capture frame-by-frame\n # ret, frame = cap.read()\n cv2.rectangle(frame1, (TEKNE_PIXEL_X1, TEKNE_PIXEL_Y1), (TEKNE_PIXEL_X2, TEKNE_PIXEL_Y2),\n (255, 0, 0), 2)\n diff = cv2.absdiff(frame1, frame2)\n\n # resizing for faster detection\n # frame = cv2.resize(frame, (640, 480))\n # using a greyscale picture, also for faster detection\n # gray = cv2.cvtColor(diff, cv2.COLOR_RGB2GRAY)\n gray = cv2.cvtColor(diff, cv2.COLOR_RGB2GRAY)\n # for contour in contours:\n # (x, y, w, h) = cv2.boundingRect(contour)\n #\n # if cv2.contourArea(contour) < 50000:\n # continue\n # cv2.rectangle(frame1,(x,y),(x+w, y+h), (0,255,0), 2)\n # cv2.putText(frame1, \"Status: {}\".format('Movement'), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n\n # detect people in the image\n # returns the bounding boxes for the detected objects\n # boxes, weights = hog.detectMultiScale(frame1, winStride=(4,4),scale=1.03,useMeanshiftGrouping=True)\n #\n # boxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes])\n #\n # for (xA, yA, xB, yB) in boxes:\n # # display the detected boxes in the colour picture\n # cv2.rectangle(frame1, (xA, yA), (xB, yB),\n # (0, 255, 0), 2)\n # if TEKNE_PIXEL_X1 < xA and TEKNE_PIXEL_X2 > xB and TEKNE_PIXEL_Y2 > yB and TEKNE_PIXEL_Y1 < yA:\n # print(\"detected person\")\n # sendMail()\n # cv2.rectangle(frame1, (xA, yA), (xB, yB),\n # (0, 0, 255), 5)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=3)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for contour in contours:\n (x1, y1, w1, h1) = cv2.boundingRect(contour)\n\n if cv2.contourArea(contour) < 10000:\n continue\n counter += 1\n\n if counter > 100:\n cv2.rectangle(frame1,(x1,y1),(x1+w1, y1+h1), (0,255,0), 2)\n cv2.putText(frame1, \"Status: {}\".format('Movement'), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n print(\"detected person\")\n sendMail()\n cv2.rectangle(frame1, (x1, y1), (x1+w1, y1+h1),\n (0, 0, 255), 5)\n img_name = \"opencv_frame_{}.png\".format(counter)\n cv2.imwrite(img_name, frame1)\n # cv2.rectangle(frame1, (xA, yA), (xB, yB), (0, 255, 0), 2)\n # cv2.putText(frame1, \"Status: {}\".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)\n # counter += 1\n # temp_now = time.perf_counter()\n # if counter > 5:\n # print(\"detected person\")\n # sendMail()\n # cv2.rectangle(frame1, (xA, yA), (xB, yB),\n # (0, 0, 255), 5)\n # img_name = \"opencv_frame_{}.png\".format(counter)\n # cv2.imwrite(img_name, frame1)\n # else:\n # if time.perf_counter() - temp_now > 5:\n # print(counter)\n # counter = 0\n if time.perf_counter() - temp_now > 10:\n print(counter)\n temp_now = time.perf_counter()\n counter = 0\n\n\n # Display the resulting frame\n cv2.imshow('frame', frame1)\n frame1 = frame2\n ret, frame2 = cap.read()\n # frame2= frame2[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\n\n\n frame2 = frame2[FRAME_INCLUDED_AREA_X1:FRAME_INCLUDED_AREA_X2, FRAME_INCLUDED_AREA_Y1:FRAME_INCLUDED_AREA_Y2]\n frame2 = cv2.resize(frame2, (RESIZE_X, RESIZE_Y))\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\n# finally, close the window\ncv2.destroyAllWindows()\ncv2.waitKey(1)\n\n" } ]
1
ZephSibley/agrimetrics_challenge
https://github.com/ZephSibley/agrimetrics_challenge
4ec8e4487ea96ce2250ef29ad27fdc17b52e1182
ce169ef984974ee87dcad0f66679060489128c59
6f678f18b87027129ab4de407c37a3ce793ceb8a
refs/heads/master
2022-12-04T20:58:59.587541
2020-08-27T16:56:36
2020-08-27T16:56:36
290,831,371
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6210407018661499, "alphanum_fraction": 0.6300904750823975, "avg_line_length": 24.257143020629883, "blob_id": "f2d46be948fa4205a5505f6552d671d564ad77fd", "content_id": "d9631ec3f05da0cb8ecc93ca52ce25b290c14b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 75, "num_lines": 35, "path": "/main.py", "repo_name": "ZephSibley/agrimetrics_challenge", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\n\napp = FastAPI()\n\nSCHEDULE = []\n\[email protected](\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\[email protected](\"/order\")\nasync def order():\n from datetime import datetime, timedelta\n current_time = datetime.now()\n make_time = current_time\n\n if SCHEDULE:\n latest_scheduled_time = SCHEDULE[-1][0]\n if current_time < latest_scheduled_time:\n # The latest entry is always 'Take a break'\n SCHEDULE.pop()\n time_diff = latest_scheduled_time - current_time\n make_time += time_diff\n\n SCHEDULE.append((make_time, 'Make sandwich'))\n SCHEDULE.append((make_time + timedelta(seconds=150), 'Serve sandwich'))\n SCHEDULE.append((make_time + timedelta(seconds=210), 'Take a break'))\n\n return {\"message\": \"OK\"}\n\n\[email protected](\"/schedule\")\nasync def schedule():\n return {\"schedule\": SCHEDULE}\n" }, { "alpha_fraction": 0.6172608137130737, "alphanum_fraction": 0.6522826552391052, "avg_line_length": 30.352941513061523, "blob_id": "5b7c53bedba9217164593b200e2a9ade9bccce91", "content_id": "a7bdbb0d2f4ace88e1e9b7ac065823b3a9dcbaf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "no_license", "max_line_length": 72, "num_lines": 51, "path": "/test_main.py", "repo_name": "ZephSibley/agrimetrics_challenge", "src_encoding": "UTF-8", "text": "from fastapi.testclient import TestClient\nfrom datetime import timedelta\n\nfrom main import app\nfrom main import SCHEDULE\n\nclient = TestClient(app)\n\n# TODO: Mock out the SCHEDULE list out so we're not testing global state\n\n\ndef test_read_main():\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\"message\": \"Hello World\"}\n\n\ndef test_read_schedule():\n response = client.get(\"/schedule\")\n assert response.status_code == 200\n assert response.json() == {\"schedule\": []}\n\n\ndef test_read_order():\n response = client.get(\"/order\")\n assert response.status_code == 200\n assert response.json() == {\"message\": \"OK\"}\n assert len(SCHEDULE) == 3\n assert SCHEDULE[-1][1] == 'Take a break'\n assert SCHEDULE[-1][0] - SCHEDULE[-2][0] == timedelta(seconds=60)\n assert SCHEDULE[-2][0] - SCHEDULE[-3][0] == timedelta(seconds=150)\n\n\ndef test_read_order_again():\n response = client.get(\"/order\")\n assert response.status_code == 200\n assert response.json() == {\"message\": \"OK\"}\n assert len(SCHEDULE) == 5\n assert SCHEDULE[-1][1] == 'Take a break'\n assert SCHEDULE[-1][0] - SCHEDULE[-2][0] == timedelta(seconds=60)\n assert SCHEDULE[-2][0] - SCHEDULE[-3][0] == timedelta(seconds=150)\n assert SCHEDULE[-4][1] != 'Take a break'\n assert SCHEDULE[-3][0] - SCHEDULE[-4][0] == timedelta(seconds=60)\n\n\ndef test_read_schedule_with_orders():\n response = client.get(\"/schedule\")\n assert response.status_code == 200\n schedule = response.json()['schedule']\n assert type(schedule) == list\n assert len(schedule) == 5\n" }, { "alpha_fraction": 0.7547169923782349, "alphanum_fraction": 0.7547169923782349, "avg_line_length": 34.33333206176758, "blob_id": "f8e3b8f36a6028dc3c1e08b8242610a48b93cf43", "content_id": "3577727bab26ba20e3c2139427b3c13fd73f4149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 57, "num_lines": 3, "path": "/README.md", "repo_name": "ZephSibley/agrimetrics_challenge", "src_encoding": "UTF-8", "text": "### Running the app\nTo run the app execute the following from this directory:\n`uvicorn main:app --reload`\n" } ]
3
smokeinside/game
https://github.com/smokeinside/game
5a953391b5d833dfe4f564c7dfd9df05ed0f3cee
b61b09c486acd60c9d0396bb9b65eb6e2e723caf
4c230c956af61d36caa3942e723667e9c1c1e9ee
refs/heads/master
2023-04-09T10:09:57.257703
2021-04-25T09:59:18
2021-04-25T09:59:18
361,387,133
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6291532516479492, "alphanum_fraction": 0.633976399898529, "avg_line_length": 28.15625, "blob_id": "9312627fd4ceaa6f2c7222aaec17d1381a875681", "content_id": "490a00cbd6f771170e9da06cc19ac2f83dfc8115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/moves.py", "repo_name": "smokeinside/game", "src_encoding": "UTF-8", "text": "import logging\nimport random\n\nfrom players import Player\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(name=__name__)\n\n\nclass Move:\n def __init__(self, name, is_self_action=False):\n self.name = name\n self.is_self_action = is_self_action\n\n def execute(self, target: Player):\n raise NotImplementedError\n\n def __str__(self):\n return self.name\n\n\nclass MoveWithRange(Move):\n def __init__(self, minimum, maximum, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.minimum = minimum\n self.maximum = maximum\n\n\nclass Attack(MoveWithRange):\n def execute(self, target: Player):\n damage = random.randrange(self.minimum, self.maximum + 1)\n logger.info(f\"{target} received {damage} damage\")\n target.health = 0 if damage >= target.health else target.health - damage\n\n\nclass Heal(MoveWithRange):\n def execute(self, target: Player):\n heal = random.randrange(self.minimum, self.maximum + 1)\n logger.info(f\"{target} received {heal} heal\")\n after_heal = target.health + heal\n target.health = (\n target.max_health if after_heal > target.max_health else after_heal\n )\n\n\nclass MoveSelector:\n def select(self, player, moves):\n move = random.choice(moves)\n logger.info(f\"{move} was chosen for {player}\")\n return move\n\n\nclass IncreasedHealMoveSelector(MoveSelector):\n def __init__(self, critical_health):\n self.critical_health = critical_health\n\n def select(self, player, moves):\n if player.health <= self.critical_health:\n weights = [2.0 if isinstance(move, Heal) else 1.0 for move in moves]\n else:\n weights = [1.0 for _ in moves]\n [move] = random.choices(moves, weights)\n logger.info(f\"{move} was chosen for {player}\")\n return move\n" }, { "alpha_fraction": 0.5720984935760498, "alphanum_fraction": 0.5744431614875793, "avg_line_length": 29.464284896850586, "blob_id": "69411e6949017cf619fa655dc3fb658f6478857a", "content_id": "0842e7f6bdfeba87c29705fe194786e2986d5ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 113, "num_lines": 28, "path": "/games.py", "repo_name": "smokeinside/game", "src_encoding": "UTF-8", "text": "import logging\nimport random\nfrom typing import List\n\nfrom players import Player\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(name=__name__)\n\n\nclass Game:\n def __init__(self, players: List[Player]):\n self.players = players\n\n def play(self) -> Player:\n executor, target = random.sample(self.players, 2)\n while True:\n executor.make_move(target)\n if target.health == 0:\n logger.info(\n f\"{executor} has won in the battle with {target} and has {executor.health} health left\"\n )\n return executor\n else:\n logger.info(\n f\"Round result: {executor} has {executor.health} health; {target} has {target.health} health\"\n )\n executor, target = target, executor\n" }, { "alpha_fraction": 0.5755919814109802, "alphanum_fraction": 0.5816636085510254, "avg_line_length": 27.39655113220215, "blob_id": "226c7d918b1ce15c52d2c796c87304bd43e98bfe", "content_id": "cc9b4e97e956aafefefee4e0f7d17aa7cb4eb571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 82, "num_lines": 58, "path": "/players.py", "repo_name": "smokeinside/game", "src_encoding": "UTF-8", "text": "import logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(name=__name__)\n\n\nclass ValidationError(Exception):\n pass\n\n\nclass Player:\n def __init__(self, name, moves, move_selector, health=100, max_health=100):\n self.name = name\n self.moves = moves\n self.move_selector = move_selector\n self.max_health = max_health\n self.health = health\n\n @property\n def moves(self):\n return self._moves\n\n @moves.setter\n def moves(self, value):\n if not isinstance(value, list) or len(value) < 1:\n raise ValidationError(\n \"Moves has to be list of length greater or equal to 1\"\n )\n self._moves = value\n\n @property\n def health(self):\n return self._health\n\n @health.setter\n def health(self, value):\n if value > self.max_health:\n raise ValidationError(\n f\"Health value is {value} which is greater then {self.max_health}\"\n )\n if value < 0:\n raise ValidationError(\n f\"Health value is {value.health} which is lower then 0\"\n )\n if hasattr(self, \"_health\"):\n logger.info(f\"{self} health changed from {self._health} to {value}\")\n self._health = value\n else:\n self._health = value\n logger.info(f\"{self} health was initialized as {self._health}\")\n\n def make_move(self, opponent):\n move = self.move_selector.select(self, self.moves)\n target = self if move.is_self_action else opponent\n move.execute(target)\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.727642297744751, "avg_line_length": 21.363636016845703, "blob_id": "6656c7ac0cff0ee25a26589bebea97583f772c13", "content_id": "0f394a099121272c9a4770696787087a8f003af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 246, "license_type": "no_license", "max_line_length": 74, "num_lines": 11, "path": "/README.md", "repo_name": "smokeinside/game", "src_encoding": "UTF-8", "text": "# Fighting Game project\n\n- Project requirements and dependencies:\n\n 1. Python 3.6+ is required\n\n 2. Only build-in modules are used, no additional requirements required\n\n- To run this project:\n\n 1. From project folder run: python play.py\n" }, { "alpha_fraction": 0.626719057559967, "alphanum_fraction": 0.6542239785194397, "avg_line_length": 35.35714340209961, "blob_id": "278dd1fe941488c9c2bcc2a3340a18250a2f24e1", "content_id": "7a5fd98629f2873b7d81168ba4a077766b627b13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 85, "num_lines": 14, "path": "/play.py", "repo_name": "smokeinside/game", "src_encoding": "UTF-8", "text": "import moves\nfrom games import Game\nfrom players import Player\n\nif __name__ == \"__main__\":\n defalut_moves = [\n moves.Attack(18, 25, name=\"Simple attack\"),\n moves.Attack(10, 35, name=\"Wide range attack\"),\n moves.Heal(18, 25, name=\"Simple self heal\", is_self_action=True),\n ]\n computer = Player(\"Computer\", defalut_moves, moves.IncreasedHealMoveSelector(35))\n player = Player(\"Alexander\", defalut_moves, moves.MoveSelector())\n game = Game([computer, player])\n game.play()\n" } ]
5
elaugier/aiohttp_samples
https://github.com/elaugier/aiohttp_samples
9a3370c64638e63755f9b606ffff1a3f765c760f
2af552ddbbb1761e4d19c6a22daad50e73517666
3b8da679d694c35238ac773030b05d92e4ba2ca2
refs/heads/master
2020-04-02T11:32:10.152977
2018-10-23T22:40:21
2018-10-23T22:40:21
154,394,091
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5584825873374939, "alphanum_fraction": 0.5605900883674622, "avg_line_length": 28.3125, "blob_id": "61bd89e432e83ad188a61b13d0df9244c76de344", "content_id": "e81a787adf63d3062e07fd61cb95b0b1f6f88620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/controllers/sendfile.py", "repo_name": "elaugier/aiohttp_samples", "src_encoding": "UTF-8", "text": "\nfrom aiohttp import web\nimport mimetypes\nimport os\nimport tempfile\n\nclass sendfile():\n def __init__(self):\n pass\n\n async def get(self, request):\n fh = tempfile.NamedTemporaryFile(delete=False)\n fh.write(bytes(\"test;test2\" + os.linesep, 'UTF-8'))\n fh.close()\n filename = os.path.abspath(fh.name)\n with open(filename, \"rb\") as f:\n resp = web.StreamResponse()\n resp.content_type, _ = mimetypes.guess_type(filename)\n\n disposition = 'filename=\"{}\"'.format(filename)\n if 'text' not in resp.content_type:\n disposition = 'attachment; ' + disposition\n\n resp.headers['CONTENT-DISPOSITION'] = disposition\n\n data = f.read()\n f.close()\n os.remove(fh.name)\n resp.content_length = len(data)\n await resp.prepare(request)\n\n await resp.write(data)\n return resp\n\n \n" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7676767706871033, "avg_line_length": 18.700000762939453, "blob_id": "4ca9731f3360574ad303b2fd76cb2b2c8b1dbdeb", "content_id": "db94d669f1bb5bd128314df0e2d68225a4f3dde6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/app.py", "repo_name": "elaugier/aiohttp_samples", "src_encoding": "UTF-8", "text": "from aiohttp import web\nfrom controllers.sendfile import sendfile\n\napp = web.Application()\n\nsendfileCtrl = sendfile()\n\napp.router.add_get(\"/sendfile\", sendfileCtrl.get)\n\nweb.run_app(app,port=4563)\n\n" } ]
2
meteFANS/metview-python
https://github.com/meteFANS/metview-python
30f480a83a967725f7e82e388fdcf4af9bd12867
9178bbabeb47b539ad3e673ebedf74e4dccd7841
67412a4036b6284edda2f9108423061e84208e38
refs/heads/master
2020-05-06T12:14:30.626198
2019-03-05T16:26:21
2019-03-05T16:26:21
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4488603174686432, "alphanum_fraction": 0.46171829104423523, "avg_line_length": 32.54901885986328, "blob_id": "ea4dd2be54d5eea3fcadbefade7d81f7c4641b74", "content_id": "98d6f596551470c98ced5ac9b1f37b54e2b43c53", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1711, "license_type": "permissive", "max_line_length": 86, "num_lines": 51, "path": "/examples/UC-07-bufr-pandas.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\"\"\"\nMetview Python use case\n\nUC-07-pandas. The Analyst compute simple differences between observations and analysis\nand use pandas to perform further computations\n\nBUFR version - BUFR is not tabular or gridded, but we can use Metview Python\nframework to extract a particular parameter to a tabular format (geopoints)\n\n--------------------------------------------------------------------------------\n1. Analyst retrieves the analysis from a gridded data file\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n2. Analyst retrieves an observational parameter from a tabular or a gridded file\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n3. Analyst calculates the difference between the observational data and the\n analysis\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n4. Analyst converts this data to a pandas dataframe and computes the number\n of outliers based on the zscore\n--------------------------------------------------------------------------------\n\"\"\"\n\nimport metview as mv\nimport numpy as np\nfrom scipy import stats\n\nt2m_grib = mv.read('./t2m_grib.grib')\n\nobs_3day = mv.read('./obs_3day.bufr')\n\nt2m_gpt = mv.obsfilter(\n parameter = '012004',\n output = 'geopoints',\n data = obs_3day\n)\n\ndiff = t2m_grib - t2m_gpt\n\ndf = diff.to_dataframe()\n\nprint(df)\n\noutliers = np.abs(stats.zscore(df['value'])) > 1.5\n\nprint('# of outliers:', outliers.sum())\n" }, { "alpha_fraction": 0.5989222526550293, "alphanum_fraction": 0.6096997857093811, "avg_line_length": 25.510204315185547, "blob_id": "b18d03f68a7f669b0c1c0a59930137f7f4de42dd", "content_id": "d2ee62729eb8d2c4c60a85f3bc98646949142ebf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "permissive", "max_line_length": 80, "num_lines": 49, "path": "/examples/UC-01.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\"\"\"\nMetview Python use case\n\nUC-01. The Analyst produces plots and files for the Product user\n\n--------------------------------------------------------------------------------\n1. Analyst creates plots and files thanks to his Python applications and scripts\n that benefits from the underlying tools of the framework\n--------------------------------------------------------------------------------\n\nAnalyst reads data from a GRIB file and derives another quantity from it. Then,\nAnalyst saves his data as a GRIB file and creates a plot in PNG format. \n\"\"\"\n\nimport metview as mv\n\n\nmydata = mv.read('../tests/test.grib')\n\nderived = mydata * 2 + 5\n\nmv.write('derived_data.grib', derived)\n\ngrid_shade = mv.mcont(\n legend = True,\n contour = False,\n contour_highlight = True,\n contour_shade = True,\n contour_shade_technique = 'grid_shading',\n contour_shade_max_level_colour = 'red',\n contour_shade_min_level_colour = 'blue',\n contour_shade_colour_direction = 'clockwise',\n)\n\n\n# Macro-like PNG creation:\npng = mv.png_output(output_width = 1200, output_name = './myplot')\n\nmv.plot(png, derived, grid_shade)\n\n\n# Using a different notation:\npng_output = {\n 'output_type': 'png',\n 'output_width': 1200,\n 'output_name': './myplot2'\n}\n\nmv.plot(derived, grid_shade, **png_output)\n" }, { "alpha_fraction": 0.5485140085220337, "alphanum_fraction": 0.560751736164093, "avg_line_length": 35.90322494506836, "blob_id": "20e9e83cafffe6eb4ddac16028958fd0fd5196ee", "content_id": "49f0420edcdd71b30352957a1c914f23b4dc9ca8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2288, "license_type": "permissive", "max_line_length": 82, "num_lines": 62, "path": "/examples/UC-04-grib.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\"\"\"\nMetview Python use case\n\nUC-04. The Analyst retrieves, for a given time interval, the values of\ntwo parameters and combines their values on the same map\n\n--------------------------------------------------------------------------------\n1. Analyst retrieves, for a given time interval, the values of two chosen\n parameters (e.g. temperature, and geopotential) from a given source (i.e. MARS,\n files, observation databases)\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n2. Analyst customises many features of his map for each field he wants to plot\n (e.g. temperature field as shaded areas and geopotenti2. al field as isolines)\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n3. Analyst plots the data\n--------------------------------------------------------------------------------\nAnalyst plots data variable t2 with contouring definition t_shade_c, and data\nvariable z with contouring definition mslp_isolines.\nThe fields will be plotted in the order they appear in the mv.plot() command,\nwith the shaded temperature at the bottom, and the geopotential on top.\n\"\"\"\n\nimport metview as mv\n\n\n# read 2m temperature\nt2 = mv.read('./t2_for_UC-04.grib')\n\n# read geopotential\nz = mv.read('./z_for_UC-04.grib')\n\nt_shade_c = mv.mcont(\n legend = True,\n contour_highlight = False,\n contour_level_selection_type = \"interval\",\n contour_interval = 10,\n contour_shade = True,\n contour_shade_max_level = 60,\n contour_shade_min_level = -60,\n contour_shade_method = \"area_fill\",\n contour_shade_max_level_colour = \"red\",\n contour_shade_min_level_colour = \"blue\",\n contour_shade_colour_direction = \"clockwise\"\n )\n\nz_isolines = mv.mcont(\n legend = True,\n contour_line_thickness = 2,\n contour_line_colour = 'black',\n contour_highlight_colour = 'black',\n contour_highlight_thickness = 4,\n contour_level_selection_type = 'interval',\n contour_interval = 5,\n contour_legend_text = 'Geopotential',\n)\n\nmv.setoutput(mv.png_output(output_width = 1000, output_name = './gribplot'))\nmv.plot(t2, t_shade_c, z, z_isolines)\n" }, { "alpha_fraction": 0.5650450587272644, "alphanum_fraction": 0.5855855941772461, "avg_line_length": 30.89655113220215, "blob_id": "b79796c1905d742169b6b06c18c1717f2e795600", "content_id": "3283a5e19f8328a28fda8f0eac13fd0f9873d306", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2775, "license_type": "permissive", "max_line_length": 89, "num_lines": 87, "path": "/examples/UC-03-bufr.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\"\"\"\nMetview Python use case\n\nThe Python analyst reads some BUFR data and plots it in various ways\n\n--------------------------------------------------------------------------------\n1. Python analyst reads BUFR data and plots it using the default style\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n2. Python analyst reads BUFR data and applies a visual definition\n to alter its plotting style\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n3. Python analyst reads BUFR data and filters a single parameter from it\n and plots it with a colour scale\n--------------------------------------------------------------------------------\n\n\"\"\"\nimport metview as mv\n\n\n# define a view over the area of interest and set land shading on\n\nland_shade = mv.mcoast(\n map_coastline_land_shade = True,\n map_coastline_land_shade_colour = \"RGB(0.98,0.95,0.82)\",\n map_coastline_sea_shade = False,\n map_coastline_sea_shade_colour = \"RGB(0.85,0.93,1)\"\n)\n\narea_view = mv.geoview(\n map_area_definition = 'corners',\n area = [45.83,-13.87,62.03,8.92],\n coastlines = land_shade\n)\n\n\n# Simplest plot:\n# NOTE that when plotting a 'raw' BUFR file, Magics will plot synop symbols as shown in\n# https://software.ecmwf.int/wiki/display/METV/Data+Part+1 \"Plotting BUFR Data\"\n\nobs = mv.read('../tests/obs_3day.bufr')\n\nmv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot1'))\nmv.plot(area_view, obs)\n\n\n# ALTERNATIVELY, add an Observations Plotting visual definition\n\nobs_plotting = mv.mobs(\n obs_temperature = False,\n obs_cloud = False,\n obs_low_cloud = False,\n obs_dewpoint_colour = 'purple'\n)\n\n\nmv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot2'))\nmv.plot(area_view, obs, obs_plotting)\n\n\n# ALTERNATIVELY, if we don't want to plot the whole observations, but instead want to\n# extract a specific parameter from the BUFR messages, then we use the Observation Filter\n# as shown here:\n\n# dewpoint_t is a 'geopoints' variable\ndewpoint_t = mv.obsfilter(\n output = \"geopoints\",\n parameter = '012006',\n data = obs\n)\n\n# add an optional Symbol Plotting definition to get nice coloured circles\n# at each point\nsymb_visdef = mv.msymb(\n legend = True,\n symbol_type = 'marker',\n symbol_table_mode = 'advanced',\n symbol_advanced_table_max_level_colour = 'red',\n symbol_advanced_table_min_level_colour = 'blue',\n symbol_advanced_table_colour_direction = 'clockwise'\n)\n\nmv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot3'))\nmv.plot(area_view, dewpoint_t, symb_visdef)\n" }, { "alpha_fraction": 0.631155788898468, "alphanum_fraction": 0.6412060260772705, "avg_line_length": 33.91228103637695, "blob_id": "fbb1aa39d434f2956a92e1788ade874666db80b3", "content_id": "fc858c8bdddb7e303a87dc9985f5a428b0bf92d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1990, "license_type": "permissive", "max_line_length": 94, "num_lines": 57, "path": "/metview/__main__.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "#\n# Copyright 2017-2019 B-Open Solutions srl.\n# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\n\ndef main(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('command')\n args = parser.parse_args(args=argv)\n if args.command == 'selfcheck':\n sys.argv = []\n print('Trying to connect to a Metview installation...')\n try:\n from . import bindings as _bindings\n except Exception as exp:\n print('Could not find a valid Metview installation')\n raise(exp)\n mv = dict()\n _bindings.bind_functions(mv, module_name='mv')\n del _bindings\n\n try:\n mv['print']('Hello world - printed from Metview!')\n except Exception as exp:\n print('Could not print a greeting from Metview')\n raise(exp)\n\n mv_version_f = mv['version_info']\n mv_version = mv_version_f()\n mv_maj = str(int(mv_version['metview_major']))\n mv_min = str(int(mv_version['metview_minor']))\n mv_rev = str(int(mv_version['metview_revision']))\n mv_version_string = mv_maj + '.' + mv_min + '.' + mv_rev\n print('Metview version', mv_version_string, 'found')\n\n print(\"Your system is ready.\")\n else:\n raise RuntimeError(\"Command not recognised %r. See usage with --help.\" % args.command)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5072357654571533, "alphanum_fraction": 0.5317823886871338, "avg_line_length": 38.84671401977539, "blob_id": "07444ac6c4d0ae4612ba64cdfd52c04e87141d04", "content_id": "d46cec31ef591213dcdbf9dd5ee329b7d1f35ea1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5459, "license_type": "permissive", "max_line_length": 122, "num_lines": 137, "path": "/examples/seaIce_CO2_correlation.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "# ==============================================================================\n# Authors: ralf mueller, stephan siemen\n#\n#\n# Plan is to create a plot similar to the scatter plot for co2 concentration and\n# september minimum of sea ice extent\n#\n# reference:\n# https://www.mpg.de/10579957/W004_Environment_climate_062-069.pdf, p. 7\n#\n# ==============================================================================\nimport os\nfrom ecmwfapi import ECMWFDataServer\nfrom cdo import Cdo\nfrom multiprocessing import Pool\nfrom tarfile import TarFile\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as mtrans\n# basic setup {{{ ===========================================================\nserver = ECMWFDataServer()\ncdo = Cdo()\ncdo.debug = True\ntasks = 4\nstartYear = 1980\nendYear = 2014\n# }}} ==========================================================================\n# helper methods {{{ ===========================================================\ndef getDataFromTarfile(tarfile):\n tf = TarFile(tarfile)\n members = [ m.name for m in tf.getmembers()] \n if (list(set([os.path.exists(x) for x in members])) != [True]):\n tf.extractall()\n tf.close\n return members\n\ndef computeTimeSeries(file,varname,useCellArea=False):\n if (useCellArea):\n ofile = cdo.mul(input = '-fldmean -selname,%s %s -fldsum -gridarea %s'%(varname,file,file),\n options = '-b F32',output = '_'+os.path.basename(file),force=False)\n else:\n ofile = cdo.fldmean(input = '-selname,%s %s'%(varname,file),\n options = '-b F32',output = '_'+os.path.basename(file),force=False)\n return ofile\n\ndef computeTimeSeriesOfFilelist(pool,files,varname,ofile,useCellArea=False):\n results = dict()\n for file in files:\n rfile = pool.apply_async(computeTimeSeries,(file,varname,False))\n results[file] = rfile\n pool.close()\n pool.join()\n\n for k,v in results.items():\n results[k] = v.get()\n\n cdo.yearmean(input = '-cat %s'%(' '.join([results[x] for x in files])),\n output = ofile, force=False,\n options = '-f nc')\n return ofile\n# }}} ==========================================================================\n# Sea Ice Cover retrival + processing {{{\niceCover_file = \"ci_interim_%s-%s-NH.grb\"%(startYear, endYear)\nif ( not os.path.exists(iceCover_file) ):\n server.retrieve({\n 'stream' : \"oper\",\n 'levtype' : \"sfc\",\n 'param' : \"31.128\",\n 'dataset' : \"interim\",\n 'step' : \"0\",\n 'grid' : \"0.5/0.5\",\n 'time' : \"12\",\n 'date' : \"%s-01-01/to/%s-01-01\"%(startYear,endYear),\n 'type' : \"an\",\n 'class' : \"ei\",\n 'area' : \"90/-180/0/180\",\n 'target' : iceCover_file\n })\nelse:\n print(\"use existing file '%s'\"%(iceCover_file))\n# compute the nh ice extent: minimum usually happens in September\niceExtent = 'ice_extent_%s-%s-daymean-SeptMin.nc'%(startYear,endYear)\ncdo.setattribute('sea_ice_extent@unit=m2,sea_ice_extent@standard_name=sea_ice_extent',\n input = '-setname,sea_ice_extent -yearmin -fldsum -mul -selmon,9 %s -gridarea %s'%(iceCover_file,iceCover_file),\n output = iceExtent,force=False,\n options = '-f nc')\niceExtent_ds = cdo.readXDataset(iceExtent)\n# }}} ==========================================================================\n# {{{ CO2 retrieval + processing ===========================================================\n# cams return tarballs of netcdf files\nco2_tarball = \"co2_totalColumn_%s-%s.tar\"%(startYear, endYear)\nif ( not os.path.exists(co2_tarball) ):\n server.retrieve({ #CO2\n \"dataset\" : \"cams_ghg_inversions\",\n \"datatype\" : \"ra\",\n \"date\" : \"%s-01-01/to/%s-01-01\"%(startYear,endYear),\n \"frequency\" : \"3h\",\n \"param\" : \"co2\",\n \"quantity\" : \"total_column\",\n \"version\" : \"v16r2\",\n \"target\" : co2_tarball\n })\nelse:\n print(\"use existing file '%s'\"%(co2_tarball))\nco2_files = getDataFromTarfile(co2_tarball)\nco2_timeSeries = 'co2_timeseries_%s-%s.nc'%(startYear,endYear)\ncomputeTimeSeriesOfFilelist(Pool(tasks),co2_files,'XCO2',co2_timeSeries,False)\nco2_ds = cdo.readXDataset(co2_timeSeries)\n\n# }}} ==========================================================================\n# scatter plot {{{ =============================================================\n# some debugging output\niceExtent_ds.info()\nco2_ds.info()\n# shaping the data for plotting it\nxSelection = co2_ds.sel(time=slice('%s-01-01'%(startYear), '%s-01-01'%(endYear)))\nySelection = iceExtent_ds.sel(time=slice('%s-01-01'%(startYear), '%s-01-01'%(endYear)))\n# create the final scatter plot\nfig = plt.figure(figsize=(10, 7))\nax = plt.subplot(1, 1, 1)\ntrans_offset = mtrans.offset_copy(ax.transData, fig=fig,\n x=0.05, y=-0.20, units='inches') # inches because we are in UK\n\nx = xSelection.to_array()[1,:,0,0,0]\ny = ySelection.to_array()[1,:,0,0,0]\nplt.scatter( x , y)\n# put years as labels\nyears = xSelection.time.dt.year\nfor _x,_y,_year in zip(x,y,years):\n plt.text(_x, _y, '%d'%(_year), transform=trans_offset)\n\nplt.grid(True)\nplt.ylabel('sea ice extent [m2]')\nplt.xlabel('co2 concentration [ppm]')\nplt.title('Correlation of NH Sea Ice extent minimum and CO2 emissions')\nplt.savefig('seaIce_CO2_correlation.png')\n# }}} ==========================================================================\n# vim:fdm=marker\n" }, { "alpha_fraction": 0.6638705730438232, "alphanum_fraction": 0.6926302909851074, "avg_line_length": 49.54545593261719, "blob_id": "2a521526d5b0352720917e43ff0733c87b5ba633", "content_id": "db3c5447424038d50d3b276f3770705fbd754f1c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1669, "license_type": "permissive", "max_line_length": 407, "num_lines": 33, "path": "/CHANGELOG.rst", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\nChangelog for metview\n=====================\n\n1.1.0 (2019-03-04)\n------------------\n\n- added equality (``==``) and non-equality (``!=``) operators for Fieldset and Geopoints objects, e.g. ``same = (a == b)`` will produce a new Fieldset with 1s where the values are the same, and 0s elsewhere.\n- added new thermodynamic, gradient and utility functions: 'thermo_data_info', 'thermo_parcel_path', 'thermo_parcel_area', 'xy_curve', 'potential_temperature', 'temperature_from_potential_temperature', 'saturation_mixing_ratio', 'mixing_ratio', 'vapour_pressure', 'saturation_vapour_pressure', 'lifted_condensation_level', 'divergence', 'vorticity', 'laplacian', 'geostrophic_wind_pl', 'geostrophic_wind_ml'\n- improved conversion from geopoints to pandas dataframe to cope with new NCOLS subformat\n- make conversion from Fieldset to xarray dataset compatible with latest versions of cfgrib\n\n\n1.0.0 (2018-12-20)\n------------------\n\n- code cleanup so that tox and pyflakes pass the tests\n\n\n0.9.1 (2018-11-24)\n------------------\n\n- fixed issue where creating ``Fieldset`` slices of more than 10 fields or so did not work\n- allow the creation of a ``Fieldset`` object, either empty ``Fieldsest()`` or with a path to GRIB ``Fieldset('/path/to/grib')``\n- added ``append()`` method to a ``Fieldset`` to append ``Fieldset``s to ``Fieldset``s\n- the ``dataset_to_fieldset`` function that converts an xarray dataset to a Metview ``Fieldset`` now accepts the ``no_warn=True`` argument to suppress warnings while the xarray GRIB writer is pre-beta\n- ignore errors on exit from a data examiner\n- added more example Jupyter notebooks\n\n\n0.9.0 (2018-10-29)\n------------------\n\n- Beta release.\n" }, { "alpha_fraction": 0.700386106967926, "alphanum_fraction": 0.7212355136871338, "avg_line_length": 36, "blob_id": "6381b110df6c984402e25c2192f6649aefb4bf66", "content_id": "ea365ca8c0b1e9bdde57fe492d2cc375c97a3bf8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "permissive", "max_line_length": 96, "num_lines": 35, "path": "/metview/__init__.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "#\n# Copyright 2017-2019 B-Open Solutions srl.\n# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# requires a Python 3 interpreter\nimport sys\nif sys.version_info[0] < 3:\n raise EnvironmentError(\"Metview's Python interface requires Python 3. You are using Python \"\n + repr(sys.version_info))\n\n\n# if the user has started via \"python -m metview selfcheck\"\n# then we do not want to import anything yet because we want to\n# catch errors differently\n\nif len(sys.argv) != 2 or sys.argv[0] != \"-m\" or sys.argv[1] != \"selfcheck\":\n\n from . import bindings as _bindings\n\n _bindings.bind_functions(globals(), module_name=__name__)\n\n # Remove \"_bindings\" from the public API.\n del _bindings\n" }, { "alpha_fraction": 0.5747151970863342, "alphanum_fraction": 0.5786070823669434, "avg_line_length": 32.57323455810547, "blob_id": "0b5dfac20582cfb57f7cdf16f817cea4d8a7039f", "content_id": "55da22bcf44db6e071f4b6f1a73caccb723ac91c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31861, "license_type": "permissive", "max_line_length": 99, "num_lines": 949, "path": "/metview/bindings.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "#\n# Copyright 2017-2019 B-Open Solutions srl.\n# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport keyword\nimport os\nimport pkgutil\nimport signal\nimport tempfile\nimport builtins\nfrom enum import Enum\n\nimport cffi\nimport numpy as np\n\n\ndef string_from_ffi(s):\n return ffi.string(s).decode('utf-8')\n\n\n# -----------------------------------------------------------------------------\n# Startup\n# -----------------------------------------------------------------------------\n\n\nclass MetviewInvoker:\n \"\"\"Starts a new Metview session on construction and terminates it on program exit\"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor - starts a Metview session and reads its environment information\n Raises an exception if Metview does not respond within 5 seconds\n \"\"\"\n\n self.debug = (os.environ.get(\"METVIEW_PYTHON_DEBUG\", '0') == '1')\n\n # check whether we're in a running Metview session\n if 'METVIEW_TITLE_PROD' in os.environ:\n self.persistent_session = True\n self.info_section = {'METVIEW_LIB': os.environ['METVIEW_LIB']}\n return\n\n import atexit\n import time\n import subprocess\n\n if self.debug:\n print('MetviewInvoker: Invoking Metview')\n self.persistent_session = False\n self.metview_replied = False\n self.metview_startup_timeout = 5 # seconds\n\n # start Metview with command-line parameters that will let it communicate back to us\n env_file = tempfile.NamedTemporaryFile(mode='rt')\n pid = os.getpid()\n # print('PYTHON:', pid, ' ', env_file.name, ' ', repr(signal.SIGUSR1))\n signal.signal(signal.SIGUSR1, self.signal_from_metview)\n # p = subprocess.Popen(['metview', '-edbg', 'tv8 -a', '-slog', '-python-serve',\n # env_file.name, str(pid)], stdout=subprocess.PIPE)\n metview_startup_cmd = os.environ.get(\"METVIEW_PYTHON_START_CMD\", 'metview')\n metview_flags = [metview_startup_cmd, '-nocreatehome', '-python-serve',\n env_file.name, str(pid)]\n if self.debug:\n metview_flags.insert(2, '-slog')\n print('Starting Metview using these command args:')\n print(metview_flags)\n\n try:\n subprocess.Popen(metview_flags)\n except Exception as exp:\n print(\"Could not run the Metview executable ('\" + metview_startup_cmd + \"'); \"\n \"check that the binaries for Metview (version 5 at least) are installed \"\n \"and are in the PATH.\")\n raise exp\n\n # wait for Metview to respond...\n wait_start = time.time()\n while (not(self.metview_replied) and\n (time.time() - wait_start < self.metview_startup_timeout)):\n time.sleep(0.001)\n\n if not(self.metview_replied):\n raise Exception('Command \"metview\" did not respond within '\n + str(self.metview_startup_timeout) + ' seconds. '\n 'At least Metview 5 is required, so please ensure it is in your PATH, '\n 'as earlier versions will not work with the Python interface.')\n\n self.read_metview_settings(env_file.name)\n\n # when the Python session terminates, we should destroy this object so that the Metview\n # session is properly cleaned up. We can also do this in a __del__ function, but there can\n # be problems with the order of cleanup - e.g. the 'os' module might be deleted before\n # this destructor is called.\n atexit.register(self.destroy)\n\n def destroy(self):\n \"\"\"Kills the Metview session. Raises an exception if it could not do it.\"\"\"\n\n if self.persistent_session:\n return\n\n if self.metview_replied:\n if self.debug:\n print('MetviewInvoker: Closing Metview')\n metview_pid = self.info('EVENT_PID')\n try:\n os.kill(int(metview_pid), signal.SIGUSR1)\n except Exception as exp:\n print(\"Could not terminate the Metview process pid=\" + metview_pid)\n raise exp\n\n def signal_from_metview(self, *args):\n \"\"\"Called when Metview sends a signal back to Python to say that it's started\"\"\"\n # print ('PYTHON: GOT SIGNAL BACK FROM METVIEW!')\n self.metview_replied = True\n\n def read_metview_settings(self, settings_file):\n \"\"\"Parses the settings file generated by Metview and sets the corresponding env vars\"\"\"\n import configparser\n\n cf = configparser.ConfigParser()\n cf.read(settings_file)\n env_section = cf['Environment']\n for envar in env_section:\n # print('set ', envar.upper(), ' = ', env_section[envar])\n os.environ[envar.upper()] = env_section[envar]\n self.info_section = cf['Info']\n\n def info(self, key):\n \"\"\"Returns a piece of Metview information that was not set as an env var\"\"\"\n return self.info_section[key]\n\n def store_signal_handlers(self):\n \"\"\"Stores the set of signal handlers that Metview will override\"\"\"\n self.sigint = signal.getsignal(signal.SIGINT)\n self.sighup = signal.getsignal(signal.SIGHUP)\n self.sighquit = signal.getsignal(signal.SIGQUIT)\n self.sigterm = signal.getsignal(signal.SIGTERM)\n self.sigalarm = signal.getsignal(signal.SIGALRM)\n\n def restore_signal_handlers(self):\n \"\"\"Restores the set of signal handlers that Metview has overridden\"\"\"\n signal.signal(signal.SIGINT, self.sigint)\n signal.signal(signal.SIGHUP, self.sighup)\n signal.signal(signal.SIGQUIT, self.sighquit)\n signal.signal(signal.SIGTERM, self.sigterm)\n signal.signal(signal.SIGALRM, self.sigalarm)\n\n\nmi = MetviewInvoker()\n\ntry:\n ffi = cffi.FFI()\n ffi.cdef(pkgutil.get_data('metview', 'metview.h').decode('ascii'))\n mv_lib = mi.info('METVIEW_LIB')\n # is there a more general way to add to a path to a list of paths?\n os.environ[\"LD_LIBRARY_PATH\"] = mv_lib + ':' + os.environ.get(\"LD_LIBRARY_PATH\", '')\n\n try:\n # Linux / Unix systems\n lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro.so'))\n except OSError:\n # MacOS systems\n lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro'))\n\nexcept Exception as exp:\n print('Error loading Metview/libMvMacro. LD_LIBRARY_PATH='\n + os.environ.get(\"LD_LIBRARY_PATH\", ''))\n raise exp\n\n\n# The C/C++ code behind lib.p_init() will call marsinit(), which overrides various signal\n# handlers. We don't necessarily want this when running a Python script - we should use\n# the default Python behaviour for handling signals, so we save the current signals\n# before calling p_init() and restore them after.\nmi.store_signal_handlers()\nlib.p_init()\nmi.restore_signal_handlers()\n\n\n# -----------------------------------------------------------------------------\n# Classes to handle complex Macro types\n# -----------------------------------------------------------------------------\n\n\nclass Value:\n\n def __init__(self, val_pointer):\n self.val_pointer = val_pointer\n\n def push(self):\n if self.val_pointer is None:\n lib.p_push_nil()\n else:\n lib.p_push_value(self.val_pointer)\n\n # if we steal a value pointer from a temporary Value object, we need to\n # ensure that the Metview Value is not destroyed when the temporary object\n # is destroyed by setting its pointer to None\n def steal_val_pointer(self, other):\n self.val_pointer = other.val_pointer\n other.val_pointer = None\n\n # enable a more object-oriented interface, e.g. a = fs.interpolate(10, 29.4)\n def __getattr__(self, fname):\n def call_func_with_self(*args, **kwargs):\n return call(fname, self, *args, **kwargs)\n return call_func_with_self\n\n # on destruction, ensure that the Macro Value is also destroyed\n def __del__(self):\n try:\n if self.val_pointer is not None and lib is not None:\n lib.p_destroy_value(self.val_pointer)\n self.val_pointer = None\n except Exception as exp:\n print(\"Could not destroy Metview variable \", self)\n raise exp\n\n\nclass Request(dict, Value):\n verb = \"UNKNOWN\"\n\n def __init__(self, req):\n self.val_pointer = None\n\n # initialise from Python object (dict/Request)\n if isinstance(req, dict):\n self.update(req)\n self.to_metview_style()\n if isinstance(req, Request):\n self.verb = req.verb\n self.val_pointer = req.val_pointer\n\n # initialise from a Macro pointer\n else:\n Value.__init__(self, req)\n self.verb = string_from_ffi(lib.p_get_req_verb(req))\n n = lib.p_get_req_num_params(req)\n for i in range(0, n):\n param = string_from_ffi(lib.p_get_req_param(req, i))\n raw_val = lib.p_get_req_value(req, param.encode('utf-8'))\n if raw_val != ffi.NULL:\n val = string_from_ffi(raw_val)\n self[param] = val\n # self['_MACRO'] = 'BLANK'\n # self['_PATH'] = 'BLANK'\n\n def __str__(self):\n return \"VERB: \" + self.verb + super().__str__()\n\n # translate Python classes into Metview ones where needed\n def to_metview_style(self):\n for k, v in self.items():\n\n # bool -> on/off\n if isinstance(v, bool):\n conversion_dict = {True: 'on', False: 'off'}\n self[k] = conversion_dict[v]\n\n # class_ -> class (because 'class' is a Python keyword and cannot be\n # used as a named parameter)\n elif k == 'class_':\n self['class'] = v\n del self['class_']\n\n def push(self):\n # if we have a pointer to a Metview Value, then use that because it's more\n # complete than the dict\n if self.val_pointer:\n Value.push(self)\n else:\n r = lib.p_new_request(self.verb.encode('utf-8'))\n\n # to populate a request on the Macro side, we push each\n # value onto its stack, and then tell it to create a new\n # parameter with that name for the request. This allows us to\n # use Macro to handle the addition of complex data types to\n # a request\n for k, v in self.items():\n push_arg(v)\n lib.p_set_request_value_from_pop(r, k.encode('utf-8'))\n\n lib.p_push_request(r)\n\n def __getitem__(self, index):\n return subset(self, index)\n\n\ndef push_bytes(b):\n lib.p_push_string(b)\n\n\ndef push_str(s):\n push_bytes(s.encode('utf-8'))\n\n\ndef push_list(lst):\n # ask Metview to create a new list, then add each element by\n # pusing it onto the stack and asking Metview to pop it off\n # and add it to the list\n mlist = lib.p_new_list(len(lst))\n for i, val in enumerate(lst):\n push_arg(val)\n lib.p_add_value_from_pop_to_list(mlist, i)\n lib.p_push_list(mlist)\n\n\ndef push_date(d):\n lib.p_push_datestring(np.datetime_as_string(d).encode('utf-8'))\n\n\ndef push_datetime(d):\n lib.p_push_datestring(d.isoformat().encode('utf-8'))\n\n\ndef push_datetime_date(d):\n s = d.isoformat() + 'T00:00:00'\n lib.p_push_datestring(s.encode('utf-8'))\n\n\ndef push_vector(npa):\n\n # convert numpy array to CData\n if npa.dtype == np.float64:\n cffi_buffer = ffi.cast('double*', npa.ctypes.data)\n lib.p_push_vector_from_double_array(cffi_buffer, len(npa), np.nan)\n elif npa.dtype == np.float32:\n cffi_buffer = ffi.cast('float*', npa.ctypes.data)\n lib.p_push_vector_from_float32_array(cffi_buffer, len(npa), np.nan)\n else:\n raise Exception('Only float32 and float64 numPy arrays can be passed to Metview, not ',\n npa.dtype)\n\n\nclass FileBackedValue(Value):\n\n def __init__(self, val_pointer):\n Value.__init__(self, val_pointer)\n\n def url(self):\n # ask Metview for the file relating to this data (Metview will write it if necessary)\n return string_from_ffi(lib.p_data_path(self.val_pointer))\n\n\nclass FileBackedValueWithOperators(FileBackedValue):\n\n def __init__(self, val_pointer):\n FileBackedValue.__init__(self, val_pointer)\n\n def __add__(self, other):\n return add(self, other)\n\n def __sub__(self, other):\n return sub(self, other)\n\n def __mul__(self, other):\n return prod(self, other)\n\n def __truediv__(self, other):\n return div(self, other)\n\n def __pow__(self, other):\n return power(self, other)\n\n def __ge__(self, other):\n return greater_equal_than(self, other)\n\n def __gt__(self, other):\n return greater_than(self, other)\n\n def __le__(self, other):\n return lower_equal_than(self, other)\n\n def __lt__(self, other):\n return lower_than(self, other)\n\n def __eq__(self, other):\n return equal(self, other)\n\n def __ne__(self, other):\n return met_not_eq(self, other)\n\n\nclass ContainerValue(Value):\n def __init__(self, val_pointer, macro_index_base, element_type, support_slicing):\n Value.__init__(self, val_pointer)\n self.idx = 0\n self.macro_index_base = macro_index_base\n self.element_type = element_type # the type of elements that the container contains\n self.support_slicing = support_slicing\n\n def __len__(self):\n if self.val_pointer is None:\n return 0\n else:\n return int(count(self))\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n if self.support_slicing:\n indices = index.indices(len(self))\n fields = [self[i] for i in range(*indices)]\n if len(fields) == 0:\n return None\n else:\n f = fields[0]\n for i in range(1, len(fields)):\n f = merge(f, fields[i])\n return f\n else:\n raise Exception('This object does not support extended slicing: ' + str(self))\n else: # normal index\n if isinstance(index, str): # can have a string as an index\n return subset(self, index)\n else:\n return subset(self, index + self.macro_index_base) # numeric index: 0->1-based\n\n def __setitem__(self, index, value):\n if (isinstance(value, self.element_type)):\n lib.p_set_subvalue(self.val_pointer, index + self.macro_index_base, value.val_pointer)\n else:\n raise Exception('Cannot assign ', value, ' as element of ', self)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.idx >= self.__len__():\n self.idx = 0\n raise StopIteration\n else:\n self.idx += 1\n return self.__getitem__(self.idx - 1)\n\n\nclass Fieldset(FileBackedValueWithOperators, ContainerValue):\n\n def __init__(self, val_pointer=None, path=None):\n FileBackedValue.__init__(self, val_pointer)\n ContainerValue.__init__(self, val_pointer, 1, Fieldset, True)\n if path is not None:\n temp = read(path)\n self.steal_val_pointer(temp)\n\n def append(self, other):\n temp = merge(self, other)\n self.steal_val_pointer(temp)\n\n def to_dataset(self):\n # soft dependency on cfgrib\n try:\n from cfgrib import xarray_store\n except ImportError:\n print(\"Package cfgrib/xarray_store not found. Try running 'pip install cfgrib'.\")\n raise\n dataset = xarray_store.open_dataset(self.url())\n return dataset\n\n\nclass Bufr(FileBackedValue):\n\n def __init__(self, val_pointer):\n FileBackedValue.__init__(self, val_pointer)\n\n\nclass Geopoints(FileBackedValueWithOperators, ContainerValue):\n\n def __init__(self, val_pointer):\n FileBackedValueWithOperators.__init__(self, val_pointer)\n ContainerValue.__init__(self, val_pointer, 0, None, False)\n\n def to_dataframe(self):\n try:\n import pandas as pd\n except ImportError:\n print(\"Package pandas not found. Try running 'pip install pandas'.\")\n raise\n\n # create a dictionary of columns (note that we do not include 'time'\n # because it is incorporated into 'date')\n cols = self.columns()\n if 'time' in cols:\n cols.remove('time')\n\n pddict = {}\n for c in cols:\n pddict[c] = self[c]\n\n df = pd.DataFrame(pddict)\n return df\n\n\nclass NetCDF(FileBackedValueWithOperators):\n def __init__(self, val_pointer):\n FileBackedValueWithOperators.__init__(self, val_pointer)\n\n def to_dataset(self):\n # soft dependency on xarray\n try:\n import xarray as xr\n except ImportError:\n print(\"Package xarray not found. Try running 'pip install xarray'.\")\n raise\n dataset = xr.open_dataset(self.url())\n return dataset\n\n\nclass Odb(FileBackedValue):\n\n def __init__(self, val_pointer):\n FileBackedValue.__init__(self, val_pointer)\n\n def to_dataframe(self):\n try:\n import pandas as pd\n except ImportError:\n print(\"Package pandas not found. Try running 'pip install pandas'.\")\n raise\n\n cols = self.columns()\n pddict = {}\n\n for col in cols:\n pddict[col] = self.values(col)\n\n df = pd.DataFrame(pddict)\n return df\n\n\nclass Table(FileBackedValue):\n\n def __init__(self, val_pointer):\n FileBackedValue.__init__(self, val_pointer)\n\n def to_dataframe(self):\n try:\n import pandas as pd\n except ImportError:\n print(\"Package pandas not found. Try running 'pip install pandas'.\")\n raise\n\n df = pd.read_csv(self.url())\n return df\n\n\nclass GeopointSet(FileBackedValueWithOperators, ContainerValue):\n\n def __init__(self, val_pointer):\n FileBackedValueWithOperators.__init__(self, val_pointer)\n ContainerValue.__init__(self, val_pointer, 1, Geopoints, False)\n\n\n# -----------------------------------------------------------------------------\n# Pushing data types to Macro\n# -----------------------------------------------------------------------------\n\n\ndef dataset_to_fieldset(val, **kwarg):\n # we try to import xarray as locally as possible to reduce startup time\n # try to write the xarray as a GRIB file, then read into a fieldset\n import xarray as xr\n import cfgrib\n\n if not isinstance(val, xr.core.dataset.Dataset):\n raise TypeError('dataset_to_fieldset requires a variable of type xr.core.dataset.Dataset;'\n ' was supplied with ', builtins.type(val))\n\n f, tmp = tempfile.mkstemp(\".grib\")\n os.close(f)\n\n try:\n # could add keys, e.g. grib_keys={'centre': 'ecmf'})\n cfgrib.to_grib(val, tmp, **kwarg)\n except:\n print(\"Error trying to write xarray dataset to GRIB for conversion to Metview Fieldset\")\n raise\n\n # TODO: tell Metview that this is a temporary file that should be deleted when no longer needed\n fs = read(tmp)\n return fs\n\n\ndef push_xarray_dataset(val):\n fs = dataset_to_fieldset(val)\n fs.push()\n\n\n# try_to_push_complex_type exists as a separate function so that we don't have\n# to import xarray at the top of the module - this saves some time on startup\ndef try_to_push_complex_type(val):\n import xarray as xr\n if isinstance(val, xr.core.dataset.Dataset):\n push_xarray_dataset(val)\n else:\n raise TypeError('Cannot push this type of argument to Metview: ', builtins.type(val))\n\n\nclass ValuePusher():\n \"\"\"Class to handle pushing values to the Macro library\"\"\"\n\n def __init__(self):\n # a set of pairs linking value types with functions to push them to Macro\n # note that Request must come before dict, because a Request inherits from dict;\n # this ordering requirement also means we should use list or tuple instead of a dict\n self.funcs = (\n (float, lambda n : lib.p_push_number(n)),\n ((int, np.number), lambda n : lib.p_push_number(float(n))),\n (str, lambda n : push_str(n)),\n (Request, lambda n : n.push()),\n (dict, lambda n : Request(n).push()),\n ((list, tuple), lambda n : push_list(n)),\n (type(None), lambda n : lib.p_push_nil()),\n (FileBackedValue, lambda n : n.push()),\n (np.datetime64, lambda n : push_date(n)),\n (datetime.datetime, lambda n : push_datetime(n)),\n (datetime.date, lambda n : push_datetime_date(n)),\n (np.ndarray, lambda n : push_vector(n)),\n )\n\n def push_value(self, val):\n for typekey, typefunc in self.funcs:\n if isinstance(val, typekey):\n typefunc(val)\n return 1\n\n # if we haven't returned yet, then try the more complex types\n try_to_push_complex_type(val)\n return 1\n\nvp = ValuePusher()\n\n\ndef push_arg(n):\n return vp.push_value(n)\n\n\ndef dict_to_pushed_args(d):\n\n # push each key and value onto the argument stack\n for k, v in d.items():\n push_str(k)\n push_arg(v)\n\n return 2 * len(d) # return the number of arguments generated\n\n\n# -----------------------------------------------------------------------------\n# Returning data types from Macro\n# -----------------------------------------------------------------------------\n\n\ndef list_from_metview(val):\n\n mlist = lib.p_value_as_list(val)\n result = []\n n = lib.p_list_count(mlist)\n all_vectors = True\n for i in range(0, n):\n mval = lib.p_list_element_as_value(mlist, i)\n v = value_from_metview(mval)\n if all_vectors and not isinstance(v, np.ndarray):\n all_vectors = False\n result.append(v)\n\n # if this is a list of vectors, then create a 2-D numPy array\n if all_vectors and n > 0:\n result = np.stack(result, axis=0)\n\n return result\n\n\ndef datestring_from_metview(val):\n\n mdate = string_from_ffi(lib.p_value_as_datestring(val))\n dt = datetime.datetime.strptime(mdate, \"%Y-%m-%dT%H:%M:%S\")\n return dt\n\n\ndef vector_from_metview(val):\n\n vec = lib.p_value_as_vector(val, np.nan)\n\n n = lib.p_vector_count(vec)\n s = lib.p_vector_elem_size(vec)\n\n if s == 4:\n nptype = np.float32\n b = lib.p_vector_float32_array(vec)\n elif s == 8:\n nptype = np.float64\n b = lib.p_vector_double_array(vec)\n else:\n raise Exception('Metview vector data type cannot be handled: ', s)\n\n bsize = n * s\n c_buffer = ffi.buffer(b, bsize)\n np_array = np.frombuffer(c_buffer, dtype=nptype)\n return np_array\n\n\ndef handle_error(val):\n msg = string_from_ffi(lib.p_error_message(val))\n if \"Service\" in msg and \"Examiner\" in msg:\n return None\n else:\n return Exception('Metview error: ' + (msg))\n\n\ndef string_from_metview(val):\n return string_from_ffi(lib.p_value_as_string(val))\n\n\nclass MvRetVal(Enum):\n tnumber = 0\n tstring = 1\n tgrib = 2\n trequest = 3\n tbufr = 4\n tgeopts = 5\n tlist = 6\n tnetcdf = 7\n tnil = 8\n terror = 9\n tdate = 10\n tvector = 11\n todb = 12\n ttable = 13\n tgptset = 14\n tunknown = 99\n\n\nclass ValueReturner():\n \"\"\"Class to handle return values from the Macro library\"\"\"\n def __init__(self):\n self.funcs = {}\n self.funcs[MvRetVal.tnumber.value] = lambda val : lib.p_value_as_number(val)\n self.funcs[MvRetVal.tstring.value] = lambda val : string_from_metview(val)\n self.funcs[MvRetVal.tgrib.value] = lambda val : Fieldset(val)\n self.funcs[MvRetVal.trequest.value] = lambda val : Request(val)\n self.funcs[MvRetVal.tbufr.value] = lambda val : Bufr(val)\n self.funcs[MvRetVal.tgeopts.value] = lambda val : Geopoints(val)\n self.funcs[MvRetVal.tlist.value] = lambda val : list_from_metview(val)\n self.funcs[MvRetVal.tnetcdf.value] = lambda val : NetCDF(val)\n self.funcs[MvRetVal.tnil.value] = lambda val : None\n self.funcs[MvRetVal.terror.value] = lambda val : handle_error(val)\n self.funcs[MvRetVal.tdate.value] = lambda val : datestring_from_metview(val)\n self.funcs[MvRetVal.tvector.value] = lambda val : vector_from_metview(val)\n self.funcs[MvRetVal.todb.value] = lambda val : Odb(val)\n self.funcs[MvRetVal.ttable.value] = lambda val : Table(val)\n self.funcs[MvRetVal.tgptset.value] = lambda val : GeopointSet(val)\n\n def translate_return_val(self, val):\n rt = lib.p_value_type(val)\n try:\n return self.funcs[rt](val)\n except Exception:\n raise Exception('value_from_metview got an unhandled return type: ' + str(rt))\n\n\nvr = ValueReturner()\n\n\ndef value_from_metview(val):\n retval = vr.translate_return_val(val)\n if isinstance(retval, Exception):\n raise retval\n return retval\n\n\n# -----------------------------------------------------------------------------\n# Creating and calling Macro functions\n# -----------------------------------------------------------------------------\n\ndef _call_function(mfname, *args, **kwargs):\n\n nargs = 0\n\n for n in args:\n actual_n_args = push_arg(n)\n nargs += actual_n_args\n\n merged_dict = {}\n merged_dict.update(kwargs)\n if len(merged_dict) > 0:\n dn = dict_to_pushed_args(Request(merged_dict))\n nargs += dn\n\n lib.p_call_function(mfname.encode('utf-8'), nargs)\n\n\ndef make(mfname):\n\n def wrapped(*args, **kwargs):\n err = _call_function(mfname, *args, **kwargs)\n if err:\n pass # throw Exceception\n\n val = lib.p_result_as_value()\n return value_from_metview(val)\n\n return wrapped\n\n\ndef bind_functions(namespace, module_name=None):\n \"\"\"Add to the module globals all metview functions except operators like: +, &, etc.\"\"\"\n for metview_name in make('dictionary')():\n if metview_name.isidentifier():\n python_name = metview_name\n # NOTE: we append a '_' to metview functions that clash with python reserved keywords\n # as they cannot be used as identifiers, for example: 'in' -> 'in_'\n if keyword.iskeyword(metview_name):\n python_name += '_'\n python_func = make(metview_name)\n python_func.__name__ = python_name\n python_func.__qualname__ = python_name\n if module_name:\n python_func.__module__ = module_name\n namespace[python_name] = python_func\n # else:\n # print('metview function %r not bound to python' % metview_name)\n # add the 'mvl' functions, which are written in Macro and therefore not\n # listed by the dictionary() function\n for f in ['mvl_ml2hPa', 'mvl_create_netcdf_2d', 'mvl_flextra_etadot', 'mvl_geocircle',\n 'mvl_geoline', 'mvl_geopotential_on_ml', 'mvl_mxn_subframes', 'mvl_plot_scm_data',\n 'mvl_regular_layout', 'mvl_regular_layout_area', 'thermo_data_info',\n 'thermo_parcel_path', 'thermo_parcel_area', 'xy_curve', 'potential_temperature',\n 'temperature_from_potential_temperature', 'saturation_mixing_ratio', 'mixing_ratio',\n 'vapour_pressure', 'saturation_vapour_pressure',\n 'lifted_condensation_level', 'divergence', 'vorticity', 'laplacian',\n 'geostrophic_wind_pl', 'geostrophic_wind_ml']:\n namespace[f] = make(f)\n\n # HACK: some fuctions are missing from the 'dictionary' call.\n namespace['neg'] = make('neg')\n namespace['nil'] = make('nil')\n # override some functions that need special treatment\n # FIXME: this needs to be more structured\n namespace['plot'] = plot\n namespace['setoutput'] = setoutput\n namespace['dataset_to_fieldset'] = dataset_to_fieldset\n\n namespace['Fieldset'] = Fieldset\n\n\n# some explicit bindings are used here\nadd = make('+')\ncall = make('call')\ncount = make('count')\ndiv = make('/')\nequal = make('=')\nfilter = make('filter')\ngreater_equal_than = make('>=')\ngreater_than = make('>')\nlower_equal_than = make('<=')\nlower_than = make('<')\nmerge = make('&')\nmet_not_eq = make('<>')\nmet_plot = make('plot')\nnil = make('nil')\npng_output = make('png_output')\npower = make('^')\nprod = make('*')\nps_output = make('ps_output')\nread = make('read')\nmet_setoutput = make('setoutput')\nsub = make('-')\nsubset = make('[]')\n\n\n# -----------------------------------------------------------------------------\n# Particular code for calling the plot() command\n# -----------------------------------------------------------------------------\n\nclass Plot():\n\n def __init__(self):\n self.plot_to_jupyter = False\n\n def __call__(self, *args, **kwargs):\n if self.plot_to_jupyter:\n f, tmp = tempfile.mkstemp(\".png\")\n os.close(f)\n\n base, ext = os.path.splitext(tmp)\n\n met_setoutput(png_output(output_name=base, output_name_first_page_number='off'))\n met_plot(*args)\n\n image = Image(tmp)\n os.unlink(tmp)\n return image\n else:\n map_outputs = {\n 'png': png_output,\n 'ps': ps_output,\n }\n if 'output_type' in kwargs:\n output_function = map_outputs[kwargs['output_type'].lower()]\n kwargs.pop('output_type')\n met_plot(output_function(kwargs), *args)\n else:\n met_plot(*args)\n # the Macro plot command returns an empty definition, but\n # None is better for Python\n return None\n\n\nplot = Plot()\n\n\n# On a test system, importing IPython took approx 0.5 seconds, so to avoid that hit\n# under most circumstances, we only import it when the user asks for Jupyter\n# functionality. Since this occurs within a function, we need a little trickery to\n# get the IPython functions into the global namespace so that the plot object can use them\ndef setoutput(*args):\n if 'jupyter' in args:\n try:\n global Image\n global get_ipython\n IPython = __import__('IPython', globals(), locals())\n Image = IPython.display.Image\n get_ipython = IPython.get_ipython\n except ImportError as imperr:\n print('Could not import IPython module - plotting to Jupyter will not work')\n raise imperr\n\n # test whether we're in the Jupyter environment\n if get_ipython() is not None:\n plot.plot_to_jupyter = True\n else:\n print(\"ERROR: setoutput('jupyter') was set, but we are not in a Jupyter environment\")\n raise(Exception('Could not set output to jupyter'))\n else:\n plot.plot_to_jupyter = False\n met_setoutput(*args)\n" }, { "alpha_fraction": 0.4862692654132843, "alphanum_fraction": 0.5053583383560181, "avg_line_length": 30.43157958984375, "blob_id": "e49a2727c1ac19cce6901d8c515938c77914ac17", "content_id": "c76886d41240fb772c5c7a9a9dfee52de22a5ec8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2986, "license_type": "permissive", "max_line_length": 80, "num_lines": 95, "path": "/examples/UC-07-bufr.py", "repo_name": "meteFANS/metview-python", "src_encoding": "UTF-8", "text": "\"\"\"\nMetview Python use case\n\nUC-07. The Analyst compute simple differences between observations and analysis\nand plot the values\n\nBUFR version - BUFR is not tabular or gridded, but we can use Metview Python\nframework to extract a particular parameter to a tabular format (geopoints)\n\n--------------------------------------------------------------------------------\n1. Analyst retrieves the analysis from a gridded data file\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n2. Analyst retrieves an observational parameter from a tabular or a gridded file\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n3. Analyst calculates the difference between the observational data and the\n analysis and classified the field values according to the magnitude of the\n difference\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n4. Analyst customises many features of his graph in order to create\n publication-quality plots\n--------------------------------------------------------------------------------\n\n--------------------------------------------------------------------------------\n5. Analyst plots the data\n--------------------------------------------------------------------------------\n\"\"\"\n\nimport metview as mv\n\n# define a view over the area of interest\n\narea_view = mv.geoview(\n map_area_definition = 'corners',\n area = [45.83,-13.87,62.03,8.92]\n)\n\nt2m_grib = mv.read('./t2m_grib.grib')\n\nobs_3day = mv.read('./obs_3day.bufr')\n\nt2m_gpt = mv.obsfilter(\n parameter = '012004',\n output = 'geopoints',\n data = obs_3day\n)\n\ndiff = t2m_grib - t2m_gpt\n\ndiff_symb = mv.msymb(\n legend = True,\n symbol_type = 'marker',\n symbol_table_mode = 'advanced',\n)\n\nmv.setoutput(mv.png_output(output_width = 1000, output_name = './obsdiff1'))\nmv.plot(area_view, diff, diff_symb)\n\n\n# Extract geopoints that are hotter by 1 deg or more\n#hotter = mv.filter(diff, diff >= 1)\nhotter = diff.filter(diff >= 1)\n\n# Extract geopoints that are colder by 1 deg or more\n#colder = mv.filter(diff, diff <= -1)\ncolder = diff.filter(diff <= -1)\n\n# Get geopoints that are within +/-1\n#exact = mv.filter(diff, (diff > -1) * (diff < 1))\nexact = diff.filter((diff > -1) * (diff < 1))\n\n# Symbol visdefs for each classification\nred = mv.msymb(\n symbol_type = 'marker',\n symbol_colour = 'red'\n)\n\nblue = mv.msymb(\n symbol_type = 'marker',\n symbol_colour = 'blue'\n)\n\ngrey = mv.msymb(\n symbol_type = 'marker',\n symbol_colour = 'grey'\n)\n\n# plot the 'exact' data set with visdef 'grey', 'hotter' with 'red', etc.\nmv.setoutput(mv.png_output(output_width = 1000, output_name = './obsdiff2'))\nmv.plot(area_view, exact, grey, hotter, red, colder, blue)\n" } ]
10
dera1992/biosec
https://github.com/dera1992/biosec
ba78e459289e2f4929a30927ab37ee9c6561741f
2037f313527283cf9627c71fbfa992bef0ccc738
1644428539db03518bb37fdf4a60fec129b18f3b
refs/heads/main
2023-04-14T21:29:08.784208
2021-04-07T12:53:15
2021-04-07T12:53:15
355,472,678
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7121879458427429, "alphanum_fraction": 0.7342143654823303, "avg_line_length": 33.099998474121094, "blob_id": "7ac1c26e7aa6d202656a03b0a3a828d5fa08059d", "content_id": "3f044a65d38470d323a2b89fb7f0bfebbef67dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/home/models.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom simple_history.models import HistoricalRecords\n\n# Create your models here.\n# the model for the operation is created here.\n\nclass Employee(models.Model):\n id=models.AutoField(primary_key=True)\n name=models.CharField(max_length=255)\n phone=models.CharField(max_length=255)\n age = models.CharField(max_length=255)\n address=models.CharField(max_length=255)\n email = models.CharField(max_length=255, null=True, blank=True)\n added_on=models.DateTimeField(auto_now_add=True)\n active = models.BooleanField(default=True)\n history = HistoricalRecords()\n objects=models.Manager()\n\n def __str__(self):\n return self.name" }, { "alpha_fraction": 0.7337278127670288, "alphanum_fraction": 0.7337278127670288, "avg_line_length": 21.600000381469727, "blob_id": "25ac26d2c02e16a33e7a2ae1ba95fa0f0904575a", "content_id": "67ced7e2c17125970b8089fc204c7d79b95d52f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/home/urls.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom rest_framework import routers\nfrom . import views\n\napp_name = 'home'\n\nrouter=routers.DefaultRouter()\nrouter.register(\"employee\",views.EmployeeViewset,basename=\"employee\")\n\n\nurlpatterns = [\n path('api/',include(router.urls)),\n path('api/archive/<str:pk>', views.archive, name=\"archive\"),\n\n]" }, { "alpha_fraction": 0.5477386713027954, "alphanum_fraction": 0.7236180901527405, "avg_line_length": 18.899999618530273, "blob_id": "6c0882a5e524ecc241b2aebd61bc03c012dda519", "content_id": "ec03ac64309953e7f3df6e06a74a9e5eaac86728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 199, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/requirements.txt", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "asgiref==3.3.2\nDjango==3.2\ndjango-cors-headers==3.6.0\ndjango-simple-history==2.12.0\ndjangorestframework==3.12.2\ndrf-history==0.0.9\npytz==2021.1\nsix==1.15.0\nsqlparse==0.4.1\ntyping-extensions==3.7.4.3\n" }, { "alpha_fraction": 0.7031098008155823, "alphanum_fraction": 0.7074829936027527, "avg_line_length": 37.8301887512207, "blob_id": "a5c4163114e357bf9ce753bea888dd693f1053e6", "content_id": "95f3caaed15aab8370e81fa857b11ca15428db73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2058, "license_type": "no_license", "max_line_length": 95, "num_lines": 53, "path": "/home/views.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets, generics\n\nfrom rest_framework.response import Response\nfrom rest_framework.generics import get_object_or_404\n\nfrom home.models import Employee\n\nfrom home.serializers import EmployeeSerializer\nfrom rest_framework.decorators import api_view\n\n\n#Employee Class Base Viewset for create,retrieve,list and update a record\nclass EmployeeViewset(viewsets.ViewSet):\n\n def create(self,request):\n try:\n serializer=EmployeeSerializer(data=request.data,context={\"request\":request})\n serializer.is_valid(raise_exception=True)\n serializer.save()\n dict_response={\"error\":False,\"message\":\"Employee Data Save Successfully\"}\n except:\n dict_response={\"error\":True,\"message\":\"Error During Saving Employee Data\"}\n return Response(dict_response)\n\n def list(self,request):\n employee=Employee.objects.filter(active=True)\n serializer=EmployeeSerializer(employee,many=True,context={\"request\":request})\n response_dict={\"error\":False,\"message\":\"All Employee List Data\",\"data\":serializer.data}\n return Response(response_dict)\n\n def retrieve(self,request,pk=None):\n queryset=Employee.objects.all()\n employee=get_object_or_404(queryset,pk=pk)\n serializer=EmployeeSerializer(employee,context={\"request\":request})\n return Response({\"error\":False,\"message\":\"Single Data Fetch\",\"data\":serializer.data})\n\n def update(self,request,pk=None):\n queryset=Employee.objects.all()\n employee=get_object_or_404(queryset,pk=pk)\n serializer=EmployeeSerializer(employee,data=request.data,context={\"request\":request})\n serializer.is_valid()\n serializer.save()\n return Response({\"error\":False,\"message\":\"Data Has Been Updated\"})\n\n\n\n# Funtion base view of django restframe work for achieving a record\n@api_view(['DELETE'])\ndef archive(request, pk=None):\n employee = Employee.objects.get(id=pk)\n employee.active = False\n employee.save()\n return Response(data='delete success')\n" }, { "alpha_fraction": 0.5423728823661804, "alphanum_fraction": 0.5830508470535278, "avg_line_length": 24.65217399597168, "blob_id": "79be0ba4201c5c1d76341b6ec4efcf5e59434fe4", "content_id": "0fb6e95723f07ca43736ee96033a0a706a8e7f4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/home/migrations/0004_auto_20210407_0410.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-07 03:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0003_historicalemployee'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='employee',\n name='email',\n field=models.CharField(blank=True, max_length=255, null=True),\n ),\n migrations.AddField(\n model_name='historicalemployee',\n name='email',\n field=models.CharField(blank=True, max_length=255, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.7539682388305664, "alphanum_fraction": 0.7539682388305664, "avg_line_length": 24.200000762939453, "blob_id": "bb2aeba2e9d87864df6e2d2ad49c9870fdf5905a", "content_id": "b2d3740f92ccb45b96daaaba5e893a03c14738fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/home/serializers.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom home.models import Employee\n\n#The employee serializer is done here for json generation\n\nclass EmployeeSerializer(serializers.ModelSerializer):\n class Meta:\n model=Employee\n fields=\"__all__\"\n" }, { "alpha_fraction": 0.7529923915863037, "alphanum_fraction": 0.77366703748703, "avg_line_length": 56.4375, "blob_id": "5493555c7eb439d4fe3f2cf4fff8968dc1da89df", "content_id": "a80d3c5af90111d5b0c1f216eb11d534735180fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 919, "license_type": "no_license", "max_line_length": 98, "num_lines": 16, "path": "/README.md", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "REST API\nThe API is built with Django a python web framework.\nTo run the API on your system, do the following\n (1) Install python 3.6 and above not installed if not installed on you environmet\n(2) Confirm that python is installed by typing Python on your command prompt or terminal\n(3) cd to the foldr directory you want to run the application\n(4) Clone the application with this command 'git clone https://github.com/dera1992/biosec.git'\n(5) cd to clone directory where requirements.txt is located by using command prompt or terminal\n(6) Activate your virtualenv (optional)\n(7) run pip install -r requirements.txt to install all the dependencies including Django framework\n(8) run python manage.py runserver to start the application\n(9) To have access to the admin part: http://localhost:8000/admin\n Username =biosec\n Password = adminbiosec\n \n NB:You can gett= the logs as you login inside the admin portal\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5663716793060303, "avg_line_length": 20.5238094329834, "blob_id": "a2f200c679f0a20d54f638b82eabeb4c4e8c0862", "content_id": "5f46f645792d5398ec0fc755d57aa50093e9764d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 45, "num_lines": 21, "path": "/home/migrations/0005_auto_20210407_0427.py", "repo_name": "dera1992/biosec", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-07 03:27\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0004_auto_20210407_0410'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='employee',\n name='joining_date',\n ),\n migrations.RemoveField(\n model_name='historicalemployee',\n name='joining_date',\n ),\n ]\n" } ]
8
pranavg000/Social-Network-Django
https://github.com/pranavg000/Social-Network-Django
5a27fab75ef5b050d00ea30228a3620f4fb11205
172a9726f0138344e0df4ad4300033b4efa569ef
2df7eafb478b9d81cd2fa77d91438c53f6e5ba50
refs/heads/master
2021-06-16T01:10:53.052735
2020-08-05T19:05:46
2020-08-05T19:05:46
166,949,146
9
7
null
2019-01-22T07:35:29
2021-03-27T18:41:12
2021-01-11T20:55:11
CSS
[ { "alpha_fraction": 0.724304735660553, "alphanum_fraction": 0.7315598726272583, "avg_line_length": 21.351350784301758, "blob_id": "c42535a98570b870291199dcdedcb47c4ff34539", "content_id": "1ced9c09d24430d0ebb53569d72bfaa4f8432d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "no_license", "max_line_length": 69, "num_lines": 37, "path": "/Social Network/core/forms.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django import forms\nfrom .models import Profile,Post,Comment\n\nclass UserForm(forms.ModelForm):\n\tpassword = forms.CharField(widget=forms.PasswordInput)\n\temail = forms.EmailField(max_length=254, help_text='Required field')\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username','email','password']\n\n\nclass UpdateUserForm(forms.ModelForm):\n\temail = forms.EmailField(max_length=254, help_text='Required field')\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['email']\n\n\nclass UpdateProfileForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ['status_info','profile_photo']\n\nclass CreatePost(forms.ModelForm):\n\t\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = ['post_text','post_picture']\n\nclass CreateComment(forms.ModelForm):\n\t\n\tclass Meta:\n\t\tmodel = Comment\n\t\tfields = ['comment_text']\n" }, { "alpha_fraction": 0.7099999785423279, "alphanum_fraction": 0.7107999920845032, "avg_line_length": 26.4780216217041, "blob_id": "15e1d425ac855a05bdabb2a5f7f69376a446cca3", "content_id": "f9a0c1fdf027c109022bb4433902a06e39dcc9f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5000, "license_type": "no_license", "max_line_length": 88, "num_lines": 182, "path": "/Social Network/core/views.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.views import generic\nfrom django.views.generic import View\nfrom django.views.generic.edit import CreateView,UpdateView,DeleteView\nfrom .forms import UserForm,UpdateUserForm,UpdateProfileForm,CreatePost,CreateComment\nfrom django.http import HttpResponse \nfrom django.contrib.auth import authenticate,login\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom .models import User,Following,Follower,Post\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\n\n\n\n\ndef index(request):\n\treturn render(request,'core/index.html')\n\n\n\n@login_required\ndef profile(request, username):\n\t\n\tif request.method == 'POST':\n\t\tu_form = UpdateUserForm(request.POST,instance=request.user)\n\t\tp_form = UpdateProfileForm(request.POST, request.FILES, instance=request.user.profile)\n\t\t\n\n\t\tif u_form.is_valid() and p_form.is_valid():\n\n\t\t\tu_form.save()\n\t\t\tp_form.save()\n\t\t\t\n\n\t\t\tmessages.success(request,f'Your Profile has been updated!')\n\t\t\turl = reverse('profile', kwargs = {'username' : username})\n\t\t\treturn redirect(url)\n\n\telse:\n\t\tif username == request.user.username:\n\t\t\tu_form = UpdateUserForm(instance=request.user)\n\t\t\tp_form = UpdateProfileForm(instance=request.user.profile)\n\t\t\tpost_form = CreatePost()\n\t\t\tperson = User.objects.get(username = username)\n\n\t\t\tcontext = {\n\t\t\t\t\t'u_form':u_form,\n\t\t\t\t\t'p_form':p_form,\n\t\t\t\t\t'post_form':post_form,\n\t\t\t\t\t'person':person,\n\t\t\t\t\t\n\t\t\t}\t\n\t\telse:\n\t\t\tperson = User.objects.get(username = username)\n\t\t\talready_a_follower=0\n\t\t\tfor followers in person.follower_set.all():\n\t\t\t\tif (followers.follower_user == request.user.username):\n\t\t\t\t\talready_a_follower=1\n\t\t\t\t\tbreak;\n\n\t\t\tif already_a_follower==1:\n\t\t\t\tcontext = {\n\t\t\t\t\t\t'person':person,\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t}\n\t\t\telse:\n\t\t\t\tcontext = {\n\t\t\t\t\t\t'person':person,\n\t\t\t\t\t\t'f':1,\n\t\t\t\t\t\t\n\t\t\t\t\t}\n\t\tcomment_form = CreateComment()\n\t\tcontext.update({'comment_form':comment_form})\n\n\treturn render(request, 'core/profile.html', context)\n\n\n\nclass UserFormView(View):\n\tform_class = UserForm\n\ttemplate_name = 'core/registration_form.html'\n\n\tdef get(self, request):\n\t\tform = self.form_class(None)\n\t\treturn render(request,self.template_name,{'form':form})\n\n\tdef post(self, request):\n\t\tform = self.form_class(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tuser = form.save(commit=False)\n\t\t\tusername = form.cleaned_data['username']\n\t\t\tpassword = form.cleaned_data['password']\n\t\t\tuser.set_password(password)\n\t\t\tuser.save()\n\n\t\t\tuser = authenticate(username=username,password=password)\n\t\t\tif user is not None:\n\t\t\t\tif user.is_active:\n\t\t\t\t\tlogin(request, user)\n\t\t\t\t\tmessages.success(request,f'Account created for {username}!')\n\t\t\t\t\treturn redirect('core:index')\n\n\n\t\treturn render(request,self.template_name, {'form':form}) \n\n\n\ndef followweb(request, username):\n\tif request.user.username != username:\n\t\tif request.method == 'POST':\n\t\t\tdisciple = User.objects.get(username=request.user.username)\n\t\t\tleader = User.objects.get(username=username)\n\t\t\t\n\t\t\tleader.follower_set.create(follower_user = disciple)\n\t\t\tdisciple.following_set.create(following_user = leader)\n\t\t\turl = reverse('profile', kwargs = {'username' : username})\n\t\t\treturn redirect(url)\n\t\t\n\ndef unfollowweb(request, username):\n\n\tif request.method == 'POST':\n\t\tdisciple = User.objects.get(username=request.user.username)\n\t\tleader = User.objects.get(username=username)\n\t\t\n\t\tleader.follower_set.get(follower_user = disciple).delete()\n\t\tdisciple.following_set.get(following_user = leader).delete()\n\t\turl = reverse('profile', kwargs = {'username' : username})\n\t\treturn redirect(url)\n\n\n\ndef welcome(request):\n\turl = reverse('profile', kwargs = {'username' : request.user.username})\n\treturn redirect(url)\n\n\n\ndef postweb(request, username):\n\tif request.method == 'POST':\n\n\t\tpost_form = CreatePost(request.POST,request.FILES)\n\t\tif post_form.is_valid():\n\t\t\tpost_text = post_form.cleaned_data['post_text']\n\t\t\tpost_picture = request.FILES.get('post_picture')\n\t\t\trequest.user.post_set.create(post_text=post_text, post_picture = post_picture)\n\t\t\tmessages.success(request,f'You have successfully posted!')\n\t\t\t\n\turl = reverse('profile', kwargs = {'username' : username})\n\treturn redirect(url)\n\n\ndef commentweb(request, username, post_id):\n\tif request.method == 'POST':\n\n\t\tcomment_form = CreateComment(request.POST)\n\t\tif comment_form.is_valid():\n\t\t\tcomment_text = comment_form.cleaned_data['comment_text']\n\t\t\tuser = User.objects.get(username=username)\n\t\t\tpost = user.post_set.get(pk=post_id)\n\t\t\tpost.comment_set.create(user=request.user,comment_text=comment_text)\n\n\t\t\tmessages.success(request,f'Your Comment has been posted')\n\t\t\t\n\turl = reverse('profile', kwargs = {'username' : username})\n\treturn redirect(url)\n\t\n\ndef feed(request):\n\n\tpost_all = Post.objects.order_by('created_at').reverse()\n\n\tcomment_form = CreateComment()\n\tcontext = {\n\t'post_all' : post_all,\n\t'comment_form':comment_form,\n\t}\n\treturn render(request,'core/feed.html',context)" }, { "alpha_fraction": 0.742222249507904, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 31.119047164916992, "blob_id": "1a27744820dfc5dc7381e4c3c43ab47e9e4ddfe8", "content_id": "c020dce9dbe5e5c6fd49afaa164672674cde7398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1350, "license_type": "no_license", "max_line_length": 84, "num_lines": 42, "path": "/Social Network/core/models.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Profile(models.Model):\n\n\tuser = models.OneToOneField(User, on_delete= models.CASCADE)\n\tprofile_photo = models.FileField(default='default.jpg', upload_to='profile_photos')\n\tstatus_info = models.CharField(default=\"Enter status\", max_length=1000) \n\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} Profile'\n\nclass Post(models.Model):\n\tuser = models.ForeignKey(User,on_delete = models.CASCADE,null=True)\n\tcreated_at = models.DateTimeField(auto_now_add=True)\n\tpost_text = models.CharField(max_length=2000)\n\tpost_picture = models.FileField(default=\"default.jpg\",upload_to='post_picture')\n\n\n\nclass Following(models.Model):\n\tuser = models.ForeignKey(User,on_delete = models.CASCADE,null=True)\n\tfollowing_user = models.CharField(max_length=100,null=True)\n\t\n\n\tdef __str__(self):\n\t\treturn self.following_user.username\n\n\nclass Follower(models.Model):\n\tuser = models.ForeignKey(User,on_delete = models.CASCADE,null=True)\n\tfollower_user = models.CharField(max_length=100,null=True) \n\n\tdef __str__(self):\n\t\treturn self.follower_user.username\n\n\nclass Comment(models.Model):\n\tpost = models.ForeignKey(Post,on_delete = models.CASCADE)\n\tuser = models.ForeignKey(User,on_delete = models.CASCADE,null=True)\n\tcomment_text = models.CharField(default=\"Enter Comment Here\",max_length=2000)\n\t" }, { "alpha_fraction": 0.8291666507720947, "alphanum_fraction": 0.8291666507720947, "avg_line_length": 28.875, "blob_id": "4e60ce1b6c724f2f114720479799df5832e912a9", "content_id": "6abf18025c05cc6abd8e78e86f5bb0f335f219a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 59, "num_lines": 8, "path": "/Social Network/core/admin.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Profile,Following,Follower,Post,Comment\n\nadmin.site.register(Profile)\nadmin.site.register(Follower)\nadmin.site.register(Following)\nadmin.site.register(Post)\nadmin.site.register(Comment)\n\n" }, { "alpha_fraction": 0.7056478261947632, "alphanum_fraction": 0.7056478261947632, "avg_line_length": 49.20000076293945, "blob_id": "1f8fbd01f74283c478a2f1bc6cd21e6db59b2acf", "content_id": "b738e91418273cb58405135c27a81bddb694a939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1505, "license_type": "no_license", "max_line_length": 116, "num_lines": 30, "path": "/Social Network/SocialNetwork/urls.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path,include\nfrom django.contrib.auth import views as auth_views\nfrom core import views as user_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n path('', user_views.feed, name='home'),\n path('admin/', admin.site.urls),\n path('core/',include('core.urls')),\n path('login/',auth_views.LoginView.as_view(template_name='core/login.html'),name='login'),\n path('welcome/',user_views.welcome,name=\"welcome\"),\n path('logout/',auth_views.LogoutView.as_view(template_name='core/logout.html'),name='logout'),\n path('register/',user_views.UserFormView.as_view(template_name='core/registration_form.html'),name='register'),\n url(r'^profile/(?P<username>\\w+)/$',user_views.profile,name='profile'),\n url(r'^followweb/(?P<username>\\w+)/$',user_views.followweb,name=\"followweb\"),\n url(r'^unfollowweb/(?P<username>\\w+)/$',user_views.unfollowweb,name=\"unfollowweb\"),\n url(r'^postweb/(?P<username>\\w+)/$',user_views.postweb,name=\"postweb\"),\n url(r'^commentweb/(?P<username>\\w+)/(?P<post_id>\\d+)/$', user_views.commentweb,name = \"commentweb\"),\n path('feed/',user_views.feed,name=\"feed\"),\n \n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.7347418069839478, "alphanum_fraction": 0.7464788556098938, "avg_line_length": 18.363636016845703, "blob_id": "3ef321f55bf436c884d3cb034e013379029181c5", "content_id": "0ba9ce189002494e936e112e6ae3518ae8e92b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 426, "license_type": "no_license", "max_line_length": 64, "num_lines": 22, "path": "/README.md", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "# Social Network\n\nSocial Network is a social networking website made using Django.\n\n### Screenshots\n![](Social%20Network/screenshots/social2.png)\n\n![](Social%20Network/screenshots/social.png)\n\n### Installation\n\n```sh\n$ pip install django\n$ pip install --upgrade django-crispy-forms\n```\n\nMake migrations and start the server.\n```sh\n$ python manage.py makemigrations\n$ python manage.py migrate\n$ python manage.py runserver\n```\n" }, { "alpha_fraction": 0.5751463770866394, "alphanum_fraction": 0.5858815908432007, "avg_line_length": 43.550724029541016, "blob_id": "36127739dff188292a9913717272f09d2f0b43e0", "content_id": "0d2eec859bfa412c227512102c85f8d7153856a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3074, "license_type": "no_license", "max_line_length": 129, "num_lines": 69, "path": "/Social Network/core/migrations/0001_initial.py", "repo_name": "pranavg000/Social-Network-Django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-03-03 06:29\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('comment_text', models.CharField(default='Enter Comment Here', max_length=2000)),\n ],\n ),\n migrations.CreateModel(\n name='Follower',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('follower_user', models.CharField(max_length=100, null=True)),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Following',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('following_user', models.CharField(max_length=100, null=True)),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('post_text', models.CharField(max_length=2000)),\n ('post_picture', models.FileField(default='default.jpg', upload_to='post_picture')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('profile_photo', models.FileField(default='default.jpg', upload_to='profile_photos')),\n ('status_info', models.CharField(default='Enter status', max_length=1000)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='post',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Post'),\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n" } ]
7
jivankandel/Rename-automatically-files-in-a-folder
https://github.com/jivankandel/Rename-automatically-files-in-a-folder
e30629f873988b20c525ed7b1db686232c43f9db
604b2efeacf0274ae2b86fbb92d779dbfbdc366e
c8bd4b309b346550fb56d2705dba8cc3b53c2636
refs/heads/master
2020-04-08T03:15:02.802406
2018-11-07T05:03:16
2018-11-07T05:03:16
158,968,163
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4027649760246277, "alphanum_fraction": 0.40645161271095276, "avg_line_length": 49.095237731933594, "blob_id": "4df4a460d56a0657350b5363b5e2d69c78f68452", "content_id": "84fa2fb8eb9baf4f93344212211e702c2a678b54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 100, "num_lines": 21, "path": "/main.py", "repo_name": "jivankandel/Rename-automatically-files-in-a-folder", "src_encoding": "UTF-8", "text": "import os\nfrom tkinter import messagebox\nimport tkinter\nos.chdir('c:\\\\users\\\\jivan\\\\desktop\\\\test')\nwhile True: \n for f in os.listdir() :\n if not f.startswith('.'):\n file_object=open(\"\\\\Users\\\\jivan\\\\Desktop\\\\Renamer\\\\number.txt\",\"r\")\n number=file_object.read()\n file_object.close()\n file_name,file_ext=os.path.splitext(f)\n if file_name[0]!='m':\n new_name='m '+ str(number)\n os.rename(f,new_name + \".jpg\")\n number1=int(number)+1\n file_object=open(\"\\\\Users\\\\jivan\\\\Desktop\\\\Renamer\\\\number.txt\",\"w\")\n file_object.write(str(number1))\n root = tkinter.Tk()\n root.withdraw()\n messagebox.showinfo('Note This',new_name)\n file_object.close()\n \n" } ]
1
daritter/DEPFETReader
https://github.com/daritter/DEPFETReader
ce5aaf0135d72c3b77bc1a61ba5d561de642f11b
513d24f482806bda40500899166f5fef624307e5
ca6b39c30679fbc8e83a7de7f254f1a87140bb27
refs/heads/master
2021-01-19T18:55:58.438800
2013-04-03T15:13:52
2013-04-03T15:16:43
2,749,193
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4865044951438904, "alphanum_fraction": 0.49583470821380615, "avg_line_length": 30.589473724365234, "blob_id": "17a06802633fae141c537e49d302deb24291ba9f", "content_id": "fcb60aeba90dcbfe0a2c28cbbecc79e5f343ed90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3001, "license_type": "no_license", "max_line_length": 85, "num_lines": 95, "path": "/include/AdaptivePedestals.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "/**************************************************************************\n * BASF2 (Belle Analysis Framework 2) *\n * Copyright(C) 2010 - Belle II Collaboration *\n * *\n * Author: The Belle II Collaboration *\n * Contributors: Martin Ritter *\n * *\n * This software is provided \"as is\" without any warranty. *\n **************************************************************************/\n\n#ifndef ADAPTIVEPEDESTALS_H\n#define ADAPTIVEPEDESTALS_H\n\n#include <vector>\n#include <cmath>\n#include <cstring>\n#include <iostream>\n\nnamespace DEPFET {\n template<class T> class RingBuffer {\n public:\n typedef T value_type;\n RingBuffer(size_t size): m_pos(0), m_size(size) {\n m_buffer.reserve(size);\n }\n size_t capacity() const { return m_size; }\n size_t size() const { return m_buffer.size(); }\n size_t pos() const { return m_pos; }\n T& operator[](size_t i) { return m_buffer[(m_pos + i) % m_size]; }\n T operator[](size_t i) const { return m_buffer[(m_pos + i) % m_size]; }\n void add(const T& element) {\n if (size() < capacity()) {\n m_buffer.push_back(element);\n } else {\n m_buffer[(m_pos++ + m_size) % m_size] = element;\n if (m_pos >= m_size) m_pos %= m_size;\n }\n }\n protected:\n size_t m_pos;\n size_t m_size;\n std::vector<T> m_buffer;\n };\n\n\n class AdaptivePedestal {\n public:\n enum { NSIGMA = 4 };\n AdaptivePedestal(int interval = 100, int events = 200):\n m_mean(0), m_sigma(1e5), m_interval(interval), m_pos(0), m_buffer(events) {}\n operator double() {\n return getMean();\n }\n double getMean() const { return m_mean; }\n double getSigma() const { return m_sigma; }\n\n void addRaw(int adu) {\n m_buffer.add(adu);\n if (++m_pos > m_interval) calculate();\n }\n\n void calculate() {\n m_pos = 0;\n calculateMeanSigma(m_mean, m_sigma);\n if (NSIGMA > 0) calculateMeanSigma(m_mean, m_sigma, NSIGMA * m_sigma);\n }\n\n void setMean(double mean, double sigma = 1e5) { m_mean = mean; m_sigma = sigma; }\n\n protected:\n void calculateMeanSigma(double& mean, double& sigma, double cutValue = 0) {\n int entries(0);\n double newMean(0), variance(0);\n for (size_t i = 0; i < m_buffer.size(); ++i) {\n const int x = m_buffer[i];\n if (cutValue > 0 && std::fabs(x - mean) > cutValue) continue;\n entries++;\n const double oldMean = newMean;\n newMean += (x - oldMean) / entries;\n variance += (x - oldMean) * (x - newMean);\n }\n mean = newMean;\n sigma = std::sqrt(variance / entries);\n }\n\n double m_mean;\n double m_sigma;\n int m_interval;\n int m_pos;\n RingBuffer<int> m_buffer;\n };\n\n\n} //DEPFET namespace\n#endif\n" }, { "alpha_fraction": 0.6114212274551392, "alphanum_fraction": 0.6148561835289001, "avg_line_length": 35.96825408935547, "blob_id": "48b0d4d9c91c8b5e5c1318b865afe0dea7914ccb", "content_id": "3b297ce47ce3eb97921e60937c3f2f06fe3a2ccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2329, "license_type": "no_license", "max_line_length": 89, "num_lines": 63, "path": "/include/DataView.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_DATAVIEW_H\n#define DEPFET_DATAVIEW_H\n\n#include <DEPFETReader/Exception.h>\n\nnamespace DEPFET {\n\n /** Class to provide and indexed view to a given data array. This class is\n * used to provide a readonly view of a chunk of data, probably as a\n * different type. This can be used to e.g. treat the memory of an array of\n * integers as a matrix of doubles or unsigned short.\n *\n * @tparam VIEWTYPE type of the view\n * @tparam STORAGETYPE type of the array\n */\n template < class VIEWTYPE, class STORAGETYPE = unsigned int > class DataView {\n public:\n /** Create a view of a given array.\n * The parameters nX and nY specify the dimensions of the view. If one is\n * zero, the other will be calculated from the size of the array. If both\n * are zero nY will be assumed to be 1\n *\n * @param data pointer to the array to use\n * @param size size of the array\n * @param nX size of the resulting view along X\n * @param nY size of the resulting view along Y\n */\n DataView(const STORAGETYPE* data, size_t size, size_t nX, size_t nY) {\n if (nX == 0 && nY == 0) { nY = 1; }\n if (nY == 0 && nX > 0) {\n nY = size * sizeof(STORAGETYPE) / (nX * sizeof(VIEWTYPE));\n } else if (nX == 0 && nY > 0) {\n nX = size * sizeof(STORAGETYPE) / (nY * sizeof(VIEWTYPE));\n } else if (nX * nY * sizeof(VIEWTYPE) > size * sizeof(STORAGETYPE)) {\n throw Exception(\"Data buffer not large enough\");\n }\n m_data = (const VIEWTYPE*) data;\n m_nX = nX;\n m_nY = nY;\n }\n /** Return an element of the view\n * @param x X coordinate of the element\n * @param y Y coordinate of the element\n */\n const VIEWTYPE& operator()(size_t x, size_t y) const { return m_data[x * m_nY + y]; }\n /** Return an element from the flat view\n * @param index index of the element\n */\n const VIEWTYPE& operator[](size_t index) const { return m_data[index]; }\n /** Return the size of the view along X */\n size_t getSizeX() const { return m_nX; }\n /** Return the size of the view along Y */\n size_t getSizeY() const { return m_nY; }\n protected:\n /** Pointer to the data */\n const VIEWTYPE* m_data;\n /** Size of the view along X */\n size_t m_nX;\n /** Size of the view along Y */\n size_t m_nY;\n };\n}\n#endif\n" }, { "alpha_fraction": 0.5942549109458923, "alphanum_fraction": 0.6409335732460022, "avg_line_length": 31.764705657958984, "blob_id": "ae1bf66915cd95f63cc5ddb83d74db0e321cad5d", "content_id": "2b6f3a948249334617c9b643f5c1ba2835d1ea37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 557, "license_type": "no_license", "max_line_length": 121, "num_lines": 17, "path": "/src/S3AConverter.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/S3AConverter.h>\n\nnamespace DEPFET {\n\n size_t S3AConverter::operator()(const RawData& rawData, ADCValues& adcValues)\n {\n adcValues.setSize(64, 128);\n DataView<unsigned int> data = rawData.getView<unsigned int>();\n for (size_t ipix = 0; ipix < adcValues.getSizeX()*adcValues.getSizeY(); ++ipix) { //-- raspakowka daty ---- loop 8000\n int x = data[ipix] >> 16 & 0x3F;\n int y = data[ipix] >> 22 & 0x7F;\n adcValues(x, y) = data[ipix] & 0xffff;\n }\n return rawData.getFrameSize<unsigned int>(64, 128);\n }\n\n}\n" }, { "alpha_fraction": 0.604938268661499, "alphanum_fraction": 0.604938268661499, "avg_line_length": 15.199999809265137, "blob_id": "53eac2a59779d08f5cf5b5ae6cf9d7eb9c544dfe", "content_id": "59b35b4a879349841c9b1996bd0d6006f10538bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/modules/SConscript", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "Import('env')\n\nenv['LIBS'] = ['pxd', 'framework', 'DEPFETReader']\n\nReturn('env')\n" }, { "alpha_fraction": 0.5305052399635315, "alphanum_fraction": 0.5367016196250916, "avg_line_length": 27.351350784301758, "blob_id": "cf8bd5e6f0d5ef6d47850cf610baf5b3b7857a80", "content_id": "5e0460c95e9abf486c955528ccdd0c6a2aabf906", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2098, "license_type": "no_license", "max_line_length": 76, "num_lines": 74, "path": "/modules/include/DEPFETReaderModule.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "/**************************************************************************\n * BASF2 (Belle Analysis Framework 2) *\n * Copyright(C) 2010-2011 Belle II Collaboration *\n * *\n * Author: The Belle II Collaboration *\n * Contributors: Martin Ritter *\n * *\n * This software is provided \"as is\" without any warranty. *\n **************************************************************************/\n\n#ifndef DEPFETREADERMODULE_H\n#define DEPFETREADERMODULE_H\n\n#include <framework/core/Module.h>\n#include <string>\n\n#include <DEPFETReader/DataReader.h>\n#include <DEPFETReader/CommonMode.h>\n#include <DEPFETReader/IncrementalMean.h>\nnamespace DEPFET {\n typedef ValueMatrix<double> Pedestals;\n typedef ValueMatrix<double> Noise;\n}\n\nnamespace Belle2 {\n\n /** The DEPFETReader module.\n * Loads events from a HepEvt file and stores the content\n * into the MCParticle class.\n */\n class DEPFETReaderModule : public Module {\n\n public:\n\n /**\n * Constructor.\n * Sets the module parameters.\n */\n DEPFETReaderModule();\n\n /** Destructor. */\n virtual ~DEPFETReaderModule() {}\n\n /** Initializes the module. */\n virtual void initialize();\n\n /** Method is called for each event. */\n virtual void event();\n\n protected:\n void progress(int event, int maxOrder = 4);\n void calculatePedestals();\n void calculateNoise();\n\n std::vector<std::string> m_inputFiles;\n std::string m_calibrationFile;\n int m_readoutFold;\n int m_calibrationEvents;\n double m_sigmaCut;\n int m_skipEvents;\n int m_dcd;\n int m_trailingFrames;\n int m_currentFrame;\n\n DEPFET::DataReader m_reader;\n DEPFET::Pedestals m_pedestals;\n DEPFET::Noise m_noise;\n DEPFET::PixelMask m_mask;\n DEPFET::CommonMode m_commonMode;\n };\n\n} // end namespace Belle2\n\n#endif // DEPFETREADERMODULE_H\n" }, { "alpha_fraction": 0.6009730100631714, "alphanum_fraction": 0.6162282228469849, "avg_line_length": 32.87430191040039, "blob_id": "804d3996614295083745ab6e763225efa6f88276", "content_id": "31413c0f6dfeca4f0402d020988e7d691ef6ed10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12127, "license_type": "no_license", "max_line_length": 138, "num_lines": 358, "path": "/tools/depfetCalibration.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/DataReader.h>\n#include <DEPFETReader/CommonMode.h>\n#include <DEPFETReader/IncrementalMean.h>\n\n#include <cmath>\n#include <iostream>\n#include <iomanip>\n#include <boost/program_options.hpp>\n#include <boost/foreach.hpp>\n#include <boost/format.hpp>\n#include <boost/iostreams/char_traits.hpp> // EOF, WOULD_BLOCK\n#include <boost/iostreams/concepts.hpp> // input_filter\n#include <boost/iostreams/operations.hpp> // get\n#include <boost/iostreams/device/file.hpp>\n#include <boost/iostreams/filtering_stream.hpp>\n\n#include <TH1D.h>\n#include <TFile.h>\n#include <TCanvas.h>\n#include <TStyle.h>\n#include <TF1.h>\n#include <TGraph.h>\n#include <TMath.h>\n\nusing namespace std;\nnamespace po = boost::program_options;\nnamespace io = boost::iostreams;\n\nclass CommentFilter : public boost::iostreams::input_filter {\npublic:\n explicit CommentFilter(char commentChar = '#'): m_commentChar(commentChar), m_skip(false)\n {}\n\n template<typename Source>\n int get(Source& src) {\n int c;\n while (true) {\n c = boost::iostreams::get(src);\n if (c == EOF || c == boost::iostreams::WOULD_BLOCK) {\n break;\n }\n if (c == m_commentChar) {\n m_skip = true;\n } else if (c == '\\n') {\n m_skip = false;\n }\n\n if (!m_skip) break;\n }\n return c;\n }\n\n template<typename Source>\n void close(Source&) { m_skip = false; }\nprivate:\n char m_commentChar;\n bool m_skip;\n};\n\ndouble setRangeFraction(TH1* hist, double fraction = 0.9)\n{\n double mean = hist->GetMean();\n double maxEntries = hist->GetEntries() * fraction;\n if (maxEntries < 1) {\n cerr << hist->GetName() << \": Not enough Entries (\" << hist->GetEntries() << \") for RangeFraction (\" << fraction << \")\\n\";\n return 0;\n }\n int binLeft = hist->FindBin(mean);\n int binRight = binLeft;\n int binMax = hist->GetNbinsX();\n double entries = hist->GetBinContent(binLeft);\n while (entries < maxEntries) {\n if (--binLeft > 0) {\n entries += hist->GetBinContent(binLeft);\n }\n if (++binRight <= binMax) {\n entries += hist->GetBinContent(binRight);\n }\n if (binLeft <= 0 && binRight > binMax) {\n cerr << \"Did not reach \" << fraction << \" with \" << hist->GetName() << std::endl;\n break;\n }\n }\n hist->GetXaxis()->SetRange(binLeft, binRight);\n return entries / hist->GetEntries();\n}\n\n\nbool showProgress(int event, int minOrder = 1, int maxOrder = 3)\n{\n int order = (event == 0) ? 1 : max(min((int)log10(event), maxOrder), minOrder);\n int interval = static_cast<int>(pow(10., order));\n return (event % interval == 0);\n}\n\ntypedef DEPFET::ValueMatrix<DEPFET::IncrementalMean> PixelMean;\ntypedef DEPFET::ValueMatrix<TH1D*> HistGrid;\ntypedef DEPFET::ValueMatrix<TGraph*> GraphGrid;\n\n//Output a single value to file\ninline void dumpValue(ostream& output, double value, double scale)\n{\n if (isnan(value)) value = 0;\n if (output) output << setprecision(2) << setw(8) << fixed << (value * scale) << \" \";\n}\n\n//Calculate the pedestals: Determine mean and sigma of every pixel, optionally\n//applying a cut using mean and sigma of a previous run\nvoid calculatePedestals(DEPFET::DataReader& reader, PixelMean& pedestals, double sigmaCut, DEPFET::PixelMask& masked, int frameNr)\n{\n PixelMean newPedestals;\n int eventNr(1);\n while (reader.next()) {\n DEPFET::Event& event = reader.getEvent();\n BOOST_FOREACH(DEPFET::ADCValues & data, event) {\n if (frameNr >= 0 && data.getFrameNr() != frameNr) continue;\n if (!newPedestals) newPedestals.setSize(data);\n\n for (size_t x = 0; x < data.getSizeX(); ++x) {\n for (size_t y = 0; y < data.getSizeY(); ++y) {\n if (masked(x, y)) continue;\n if (sigmaCut > 0) {\n const double mean = pedestals(x, y).getMean();\n const double width = pedestals(x, y).getSigma() * sigmaCut;\n if (std::fabs(data(x, y) - mean) > width) continue;\n }\n newPedestals(x, y).add(data(x, y));\n }\n }\n }\n if (showProgress(eventNr)) {\n cout << \"Pedestal calculation (\" << sigmaCut << \" sigma cut): \" << eventNr << \" events read\" << endl;\n }\n ++eventNr;\n }\n swap(newPedestals, pedestals);\n}\n\nint main(int argc, char* argv[])\n{\n int skipEvents(0);\n int maxEvents(-1);\n double sigmaCut(5.0);\n double scaleFactor(1.0);\n vector<string> inputFiles;\n string outputFile;\n string maskFile(\"MaskCh-Mod%1%.txt\");\n int frameNr(-1);\n\n //Parse program arguments\n po::options_description desc(\"Allowed options\");\n desc.add_options()\n (\"help,h\", \"Show help message\")\n (\"sigma\", po::value<double>(&sigmaCut)->default_value(5.0), \"sigma cut\")\n (\"skip,s\", po::value<int>(&skipEvents)->default_value(0), \"Number of events to skip before reading\")\n (\"nevents,n\", po::value<int>(&maxEvents)->default_value(5000), \"Max. number of output events\")\n (\"mask\", po::value<string>(&maskFile)->default_value(maskFile), \"Filename for reading pixel masks\")\n (\"input,i\", po::value< vector<string> >(&inputFiles)->composing(), \"Input files\")\n (\"output,o\", po::value<string>(&outputFile)->default_value(\"output.dat\"), \"Output file\")\n (\"scale\", po::value<double>(&scaleFactor)->default_value(1.0), \"Scaling factor for ADC values\")\n (\"4fold\", \"If set, data is read out in 4fold mode, otherwise 2fold\")\n (\"dcd\", \"If set, common mode corretion is set to DCD mode (4 full rows), otherwise curo topology is used (two half rows\")\n (\"frame,f\", po::value<int>(&frameNr)->default_value(frameNr), \"Set the frame number to be used: -1=all, 0=original, 1=1st tailing, ...\")\n ;\n\n po::variables_map vm;\n po::positional_options_description p;\n p.add(\"input\", -1);\n\n po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);\n po::notify(vm);\n if (vm.count(\"help\")) {\n cout << desc << \"\\n\";\n return 1;\n }\n\n //check program arguments\n if (inputFiles.empty()) {\n cerr << \"No input files given\" << endl;\n return 2;\n }\n\n DEPFET::DataReader reader;\n reader.setReadoutFold(2);\n reader.setUseDCDBMapping(true);\n //Common mode correction: row wise correction using two half rows and one column\n DEPFET::CommonMode commonMode(2, 1, 2, 1);\n if (vm.count(\"4fold\")) {\n reader.setReadoutFold(4);\n }\n if (vm.count(\"dcd\")) {\n commonMode = DEPFET::CommonMode(4, 0, 1, 1);\n }\n\n PixelMean pedestals;\n HistGrid noise;\n GraphGrid raw;\n DEPFET::PixelMask masked;\n\n reader.open(inputFiles, maxEvents);\n if (!reader.next()) {\n cerr << \"Could not read a single event from the file\" << cerr;\n return 5;\n }\n DEPFET::Event& event = reader.getEvent();\n boost::format maskFileFormat(maskFile);\n DEPFET::ADCValues& data = event[0];\n noise.setSize(data);\n raw.setSize(data);\n masked.setSize(data);\n if (!maskFile.empty()) {\n io::filtering_istream maskStream;\n maskStream.push(CommentFilter());\n maskStream.push(io::file_source((maskFileFormat % data.getModuleNr()).str()));\n std::cout << data.getModuleNr() << std::endl;\n //ifstream maskStream((maskFileFormat%data.getModuleNr()).str().c_str());\n if (!maskStream) {\n cerr << \"Could not open mask file for module \" << data.getModuleNr() << \", not masking any pixels\" << endl;\n }\n //Skip first line\n if (maskStream.peek() == '*') {\n maskStream.ignore(1000, '\\n');\n }\n //Use the rest for masking\n while (maskStream) {\n int col, row;\n maskStream >> col >> row;\n if (!maskStream) break;\n if (col < 0) for (col = 0; col < (int)masked.getSizeX(); ++col) {\n masked(col, row) = 1;\n } else if (row < 0) for (row = 0; row < (int)masked.getSizeY(); ++row) {\n masked(col, row) = 1;\n } else\n masked(col, row) = 1;\n }\n }\n\n gStyle->SetOptFit(11111);\n boost::format name(\"noise-%02dx%02d\");\n for (unsigned int col = 0; col < noise.getSizeX(); ++col) {\n for (unsigned int row = 0; row < noise.getSizeY(); ++row) {\n noise(col, row) = new TH1D((name % col % row).str().c_str(), \"\", 80, 0, -1);\n noise(col, row)->SetBuffer(1000);\n //raw(col, row) = new TGraph(1000);\n }\n }\n\n //Calibration: Calculate pedestals, first run: determine mean and sigma for each pixel\n reader.open(inputFiles, maxEvents);\n reader.skip(skipEvents);\n calculatePedestals(reader, pedestals, 0, masked, frameNr);\n\n //Second run, this time exclude values outside of sigmaCut * pedestal spread\n reader.open(inputFiles, maxEvents);\n reader.skip(skipEvents);\n calculatePedestals(reader, pedestals, sigmaCut, masked, frameNr);\n\n //Third run to determine noise level of pixels\n reader.open(inputFiles, maxEvents);\n reader.skip(skipEvents);\n int eventNr(1);\n TH1D* noiseFitProb = new TH1D(\"noiseFitPval\", \"NoiseFit p-value;p-value,#\", 100, 0, 1);\n TH1D* cMRHist = new TH1D(\"commonModeR\", \"common mode, row wise\", 160, 0, -1);\n TH1D* cMCHist = new TH1D(\"commonModeC\", \"common mode, column wise\", 160, 0, -1);\n TH1D* rawHist = new TH1D(\"raw\", \"Raw adc values\", 256, 0, -1);\n TH1D* adcHist = new TH1D(\"adc\", \"Corrected adc values\", 256, 0, -1);\n commonMode.setMask(&masked);\n while (reader.next()) {\n DEPFET::Event& event = reader.getEvent();\n BOOST_FOREACH(DEPFET::ADCValues & data, event) {\n if (frameNr >= 0 && data.getFrameNr() != frameNr) continue;\n for (size_t x = 0; x < data.getSizeX(); ++x) {\n for (size_t y = 0; y < data.getSizeY(); ++y) {\n //raw(x, y)->SetPoint(eventNr - 1, eventNr, data(x, y));\n rawHist->Fill(data(x, y));\n }\n }\n //Pedestal substraction\n data.substract(pedestals);\n //Common Mode correction\n commonMode.apply(data);\n BOOST_FOREACH(double c, commonMode.getCommonModesRow()) {\n cMRHist->Fill(c);\n }\n BOOST_FOREACH(double c, commonMode.getCommonModesCol()) {\n cMCHist->Fill(c);\n }\n\n for (size_t x = 0; x < data.getSizeX(); ++x) {\n for (size_t y = 0; y < data.getSizeY(); ++y) {\n if (masked(x, y)) continue;\n double signal = data(x, y);\n //Add signal to noise map if it is below nSigma*(sigma of pedestal)\n adcHist->Fill(signal);\n if (std::fabs(signal) > sigmaCut * pedestals(x, y).getSigma()) continue;\n noise(x, y)->Fill(signal);\n }\n }\n }\n if (showProgress(eventNr)) {\n cout << \"Calculating Noise: \" << eventNr << \" events read\" << endl;\n }\n ++eventNr;\n }\n\n ofstream output(outputFile.c_str());\n if (!output) {\n cerr << \"Could not open output file \" << outputFile << endl;\n return 3;\n }\n TFile* rootFile = new TFile(\"noise.root\", \"RECREATE\");\n TCanvas* c1 = new TCanvas(\"c1\", \"c1\");\n TF1* func = new TF1(\"f1\", \"gaus\");\n c1->cd();\n TH1D* pedHist = new TH1D(\"pedestals\", \"Pedestals\", 256, 0, -1);\n pedHist->SetBuffer(5000);\n boost::format canvasName(\"noiseFit-%02dx%02d\");\n for (unsigned int col = 0; col < pedestals.getSizeX(); ++col) {\n for (unsigned int row = 0; row < pedestals.getSizeY(); ++row) {\n output << setw(6) << col << setw(6) << row << setw(2) << (int)masked(col, row) << \" \";\n dumpValue(output, pedestals(col, row).getMean(), scaleFactor);\n if (!masked(col, row)) pedHist->Fill(pedestals(col, row).getMean());\n noise(col, row)->Draw();\n if (masked(col, row)) {\n dumpValue(output, 0, 0);\n } else {\n noise(col, row)->BufferEmpty();\n setRangeFraction(noise(col, row));\n noise(col, row)->Fit(func, \"Q\");\n noiseFitProb->Fill(TMath::Prob(func->GetChisquare(), func->GetNDF()));\n dumpValue(output, func->GetParameter(2), scaleFactor);\n //noise(col, row)->GetXaxis()->UnZoom();\n //func->Draw(\"same\");\n //c1->Update();\n //c1->Write((canvasName % col % row).str().c_str());\n }\n output << endl;\n }\n }\n pedHist->BufferEmpty();\n output.close();\n noiseFitProb->Write();\n cMRHist->Write();\n cMCHist->Write();\n rawHist->Write();\n adcHist->Write();\n pedHist->Write();\n\n boost::format rawName(\"raw-%02dx%02d\");\n for (size_t x = 0; x < raw.getSizeX(); ++x) {\n for (size_t y = 0; y < raw.getSizeY(); ++y) {\n //raw(x, y)->Write((rawName % x % y).str().c_str());\n }\n }\n\n rootFile->Write();\n rootFile->Close();\n}\n" }, { "alpha_fraction": 0.6172019243240356, "alphanum_fraction": 0.6223698854446411, "avg_line_length": 41, "blob_id": "89088c34da06d24182525d50e056ba653da17658", "content_id": "4037ba990162f1b5ecd1ade983e9d53cbd6755f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5418, "license_type": "no_license", "max_line_length": 114, "num_lines": 129, "path": "/include/ADCValues.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_ADCVALUES_H\n#define DEPFET_ADCVALUES_H\n\n#include <stdexcept>\n\nnamespace DEPFET {\n /** Class to represent a matrix of values.\n * Offers flat or 2D access to the values and to add and substract other matrices */\n template<class T = double> class ValueMatrix {\n public:\n /** Datatype for the matrix */\n typedef typename std::vector<T>::value_type value_type;\n /** Construct an empty matrix with no elements and zero size */\n ValueMatrix(): m_sizeX(0), m_sizeY(0) {}\n /** Construct a matrix with a given size */\n ValueMatrix(size_t sizeX, size_t sizeY): m_sizeX(sizeX), m_sizeY(sizeY), m_data(sizeX* sizeY) {}\n\n /** resize the matrix to the given dimensions */\n void setSize(size_t sizeX, size_t sizeY) { m_sizeX = sizeX; m_sizeY = sizeY; clear(); }\n /** resize the matrix to match the size of another matrix */\n template<class T2> void setSize(const ValueMatrix<T2>& other) { setSize(other.getSizeX(), other.getSizeY()); }\n /** clear all elements */\n void clear() { m_data.clear(); m_data.resize(m_sizeX * m_sizeY); }\n\n /** get size in x */\n size_t getSizeX() const { return m_sizeX; }\n /** get size in y */\n size_t getSizeY() const { return m_sizeY; }\n /** get total number of elements */\n size_t getSize() const { return m_data.size(); }\n /** check if the matrix has a nonzero size */\n bool operator!() const { return m_data.empty(); }\n\n /** return value of a given position, no boundary check */\n value_type operator()(size_t x, size_t y) const { return m_data[x * m_sizeY + y]; }\n /** return value of a given position with boundary check */\n value_type at(size_t x, size_t y) const {\n if (0 > x || x >= m_sizeX || 0 > y || y >= m_sizeY)\n throw std::runtime_error(\"index out of bounds\");\n return m_data[x * m_sizeY + y];\n }\n /** return value of an element of the flat array, no boundary check */\n value_type operator[](size_t index) const { return m_data[index]; }\n\n /** return reference to a given position, no boundary check */\n value_type& operator()(size_t x, size_t y) { return m_data[x * m_sizeY + y]; }\n /** return reference to a given position with boundary check */\n value_type& operator[](size_t index) { return m_data[index]; }\n /** return reference to an element of the flat array, no boundary check */\n value_type& at(size_t x, size_t y) {\n if (0 > x || x >= m_sizeX || 0 > y || y >= m_sizeY)\n throw std::runtime_error(\"index out of bounds\");\n return m_data[x * m_sizeY + y];\n }\n\n /** substract another matrix */\n template<class T2> void substract(const ValueMatrix<T2>& other, double scale = 1) {\n if (other.getSizeX() != m_sizeX || other.getSizeY() != m_sizeY) {\n throw std::runtime_error(\"Dimensions do not match\");\n }\n for (size_t i = 0; i < m_data.size(); ++i) m_data[i] -= scale * (value_type) other[i];\n }\n\n /** add another matrix */\n template<class T2> void add(const ValueMatrix<T2>& other, double scale = 1) {\n if (other.getSizeX() != m_sizeX || other.getSizeY() != m_sizeY) {\n throw std::runtime_error(\"Dimensions do not match\");\n }\n for (size_t i = 0; i < m_data.size(); ++i) m_data[i] += scale * (value_type) other[i];\n }\n\n /** set matrix from given matrix */\n template<class T2> void set(const ValueMatrix<T2>& other, double scale = 1) {\n if (other.getSizeX() != m_sizeX || other.getSizeY() != m_sizeY) {\n throw std::runtime_error(\"Dimensions do not match\");\n }\n for (size_t i = 0; i < m_data.size(); ++i) m_data[i] = scale * (value_type) other[i];\n }\n\n protected:\n /** size in X */\n size_t m_sizeX;\n /** size in Y */\n size_t m_sizeY;\n /** vector containing the data */\n std::vector<T> m_data;\n };\n\n /** Typedef used for a Pixel Mask. Normally we would use bool but\n * std::vector<bool> ist specialized so unsigned char is more suitable */\n typedef ValueMatrix<unsigned char> PixelMask;\n /** Typedef used for the pixel noise map */\n typedef ValueMatrix<double> PixelNoise;\n\n /** Class for adc values from a matrix, including some additional information */\n class ADCValues: public ValueMatrix<double> {\n public:\n /** default constructor */\n ADCValues(): ValueMatrix<double>(), m_moduleNr(0), m_triggerNr(0), m_startGate(-1) {}\n /** get the number of the module */\n int getModuleNr() const { return m_moduleNr; }\n /** get the trigger number */\n int getTriggerNr() const { return m_triggerNr; }\n /** get the start gate where readout starts */\n int getStartGate() const { return m_startGate; }\n /** get the frame number. 0 for the normal frame, 1..n for trailing frames */\n int getFrameNr() const { return m_frameNr; }\n /** set the number of the module */\n void setModuleNr(int moduleNr) { m_moduleNr = moduleNr; }\n /** set the trigger number */\n void setTriggerNr(int triggerNr) { m_triggerNr = triggerNr; }\n /** set the start gate where readout starts */\n void setStartGate(int startGate) { m_startGate = startGate; }\n /** set the frame number */\n void setFrameNr(int frameNr) { m_frameNr = frameNr; }\n protected:\n /** module number */\n int m_moduleNr;\n /** trigger number */\n int m_triggerNr;\n /** start gate */\n int m_startGate;\n /** frame number, 0 for the normal frame, 1..n for trailing frames */\n int m_frameNr;\n };\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7421602606773376, "alphanum_fraction": 0.7491289377212524, "avg_line_length": 26.33333396911621, "blob_id": "3e27bfd74cfb424972db8d1aaa859cba84f0ac83", "content_id": "f831288bb3fa22e99e8930d8e63458a87c3497a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 574, "license_type": "no_license", "max_line_length": 76, "num_lines": 21, "path": "/include/DCDConverter.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_DCDCONVERTER_H\n#define DEPFET_DCDCONVERTER_H\n\n#include <DEPFETReader/RawData.h>\n#include <DEPFETReader/ADCValues.h>\n\nnamespace DEPFET {\n\n struct DCDConverter2Fold {\n DCDConverter2Fold(bool useDCDMapping): m_useDCDMapping(useDCDMapping) {}\n bool m_useDCDMapping;\n size_t operator()(const RawData& rawData, ADCValues& adcValues);\n };\n\n struct DCDConverter4Fold {\n DCDConverter4Fold(bool useDCDMapping): m_useDCDMapping(useDCDMapping) {}\n bool m_useDCDMapping;\n size_t operator()(const RawData& rawData, ADCValues& adcValues);\n };\n}\n#endif\n" }, { "alpha_fraction": 0.6283034682273865, "alphanum_fraction": 0.6368286609649658, "avg_line_length": 29.076923370361328, "blob_id": "4f64ac732074e7131477ab5424b98e85d3d8a760", "content_id": "12644bc47c7e2769db03489e98c92770f47fa31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 89, "num_lines": 39, "path": "/examples/hitmap.py", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as pl\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndef save_all(basename,pdf=True,png=True,close=True):\n \"\"\"Save all figures\"\"\"\n if pdf: pp = PdfPages(basename+\".pdf\")\n for i in pl.get_fignums():\n fig = pl.figure(i)\n if pdf: pp.savefig(fig)\n if png:\n fig.patch.set_alpha(0.0)\n fig.savefig(basename+\"-%02d.png\" % i)\n if pdf: pp.close()\n if close: pl.close(\"all\")\n\nfor filename in sys.argv[1:]:\n datafile = open(filename)\n line = datafile.readline()\n cols,rows = map(int,line.split())\n data = np.fromfile(datafile, count=cols*rows, sep=\" \")\n data.shape = (cols,rows)\n data = np.ma.masked_less(data,0)\n\n fig = pl.figure()\n ax = fig.add_subplot(111)\n img = ax.imshow(data.T,interpolation=\"nearest\",origin=\"lower\", aspect=\"auto\", vmin=0)\n ax.set_title(filename)\n ax.set_xlabel(\"column\")\n ax.set_ylabel(\"row\")\n print \"Average ADC count:\", filename, data.mean()\n print \"Total ADC sum:\", filename, data.sum()\n fig.colorbar(img)\n\nsave_all(\"hitmap\",png=False)\n#pl.show()\n" }, { "alpha_fraction": 0.7076271176338196, "alphanum_fraction": 0.7076271176338196, "avg_line_length": 17.153846740722656, "blob_id": "30ea5e6cc767af4ce1f6f85c6d3575aadc6d3f05", "content_id": "2fec25c1fd43f128b12a5b7183361517964e9e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 236, "license_type": "no_license", "max_line_length": 67, "num_lines": 13, "path": "/include/Exception.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_EXCEPTION_H\n#define DEPFET_EXCEPTION_H\n\n#include <stdexcept>\n\nnamespace DEPFET {\n class Exception: public std::runtime_error {\n public:\n Exception(const std::string& what): std::runtime_error(what) {}\n };\n}\n\n#endif\n" }, { "alpha_fraction": 0.4767259359359741, "alphanum_fraction": 0.5721757411956787, "avg_line_length": 40.11827850341797, "blob_id": "22b9f94fd219febba80887550b73bc1fd665e3b6", "content_id": "1692d4cf5072be01f908c4c4051a4bf202b68393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3824, "license_type": "no_license", "max_line_length": 104, "num_lines": 93, "path": "/src/DCDConverter.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/DCDConverter.h>\nnamespace DEPFET {\n\n int SWBChannelMap[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};\n\n int FPGAToDrainMap[128] = {\n 0, 4, 8, 12, 2, 6, 10, 14, 124, 120,\n 116, 112, 126, 122, 118, 114, 16, 20, 24, 28,\n 18, 22, 26, 30, 108, 104, 100, 96, 110, 106,\n 102, 98, 36, 35, 37, 34, 52, 51, 53, 50,\n 68, 67, 69, 66, 84, 83, 85, 82, 38, 33,\n 39, 32, 54, 49, 55, 48, 70, 65, 71, 64,\n 86, 81, 87, 80, 1, 5, 9, 13, 3, 7,\n 11, 15, 125, 121, 117, 113, 127, 123, 119, 115, 17,\n 21, 25, 29, 19, 23, 27, 31, 109, 105,\n 101, 97, 111, 107, 103, 99, 44, 43, 45,\n 42, 60, 59, 61, 58, 76, 75, 77, 74, 92,\n 91, 93, 90, 46, 41, 47, 40, 62, 57, 63,\n 56, 78, 73, 79, 72, 94, 89, 95, 88\n };\n\n size_t DCDConverter2Fold::operator()(const RawData& rawData, ADCValues& adcValues)\n {\n adcValues.setSize(64, 32);\n DataView<signed char> v4data = rawData.getView<signed char>();\n if (m_useDCDMapping) {\n // printf(\"=> try with internal maps \\n\");\n // do not touch maps above!! and cross fingers\n\n int iData = -1; // pointer to raw data\n int noOfDCDBChannels = adcValues.getSizeX() * 2; // used channels only\n int noOfSWBChannels = adcValues.getSizeY() / 2; // used channels only\n\n for (int offset = 0; offset < noOfSWBChannels; ++offset) { // loop over SWB channels\n // this is the SWB channel/pad switched on\n int iSWB = (rawData.getStartGate() + offset) % (noOfSWBChannels);\n // which is bonded to PXD5 double row number\n int iDoubleRow = SWBChannelMap[iSWB];\n\n for (int iFPGA = 0; iFPGA < noOfDCDBChannels; ++iFPGA) { // loop over DCDB channels\n // position iFPGA in serial output is mapped to drain line number\n int iDrain = FPGAToDrainMap[iFPGA];\n // irow and icol are referring to physical pixels on sensor\n int icol, irow;\n icol = iDrain / 2;\n\n // double rows are counted 0,1,2,... starting from DCDB and first\n // connected double row is even!!\n if (iDoubleRow % 2 == 0) {\n // ok, this double row is even\n if (iDrain % 2 == 0) irow = 2 * iDoubleRow;\n else irow = 2 * iDoubleRow + 1;\n } else {\n // ok, this double row is odd\n if (iDrain % 2 == 0) irow = 2 * iDoubleRow + 1 ;\n else irow = 2 * iDoubleRow;\n }\n\n adcValues(icol, irow) = (signed short) v4data[++iData];\n }\n }\n } else {\n // all encodings done on daq (only bonn laser data)\n int ipix = -1;\n for (size_t offset = 0; offset < adcValues.getSizeY(); ++offset) { // loop over Switcher channels\n int igate = (rawData.getStartGate() + offset) % adcValues.getSizeY();\n for (size_t idrain = 0; idrain < adcValues.getSizeX(); ++idrain) { // loop over DCD channels\n adcValues(idrain, igate) = (signed short) v4data[++ipix];\n }\n }\n }\n return rawData.getFrameSize<signed char>(64, 32);\n }\n\n size_t DCDConverter4Fold::operator()(const RawData& rawData, ADCValues& adcValues)\n {\n adcValues.setSize(32, 64);\n DataView<signed char> v4data = rawData.getView<signed char>(32 * 64, 1);\n int iPix(-1);\n int nGates = adcValues.getSizeY() / 4;\n int nColDCD = adcValues.getSizeX() * 4;\n for (int gate = 0; gate < nGates; ++gate) {\n int iRowD1 = (rawData.getStartGate() + gate) % nGates;\n for (int colDCD = 0; colDCD < nColDCD; ++colDCD) {\n int icolD = m_useDCDMapping ? FPGAToDrainMap[colDCD] : colDCD;\n int col = (icolD / 4) % adcValues.getSizeX();\n int row = (iRowD1 * 4 + 3 - icolD % 4) % adcValues.getSizeY();\n adcValues(col, row) = (signed short) v4data[++iPix];\n }\n }\n return rawData.getFrameSize<signed char>(32, 64);\n }\n}\n" }, { "alpha_fraction": 0.6362748742103577, "alphanum_fraction": 0.6401796340942383, "avg_line_length": 35.84892272949219, "blob_id": "9cf0b573ae0ed15df76f31e7fb4105a65c465c18", "content_id": "ff22b0d758f24036cf02263f92953188aebf8d6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5122, "license_type": "no_license", "max_line_length": 104, "num_lines": 139, "path": "/include/CommonMode.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_COMMONMODE_H\n#define DEPFET_COMMONMODE_H\n\n#include <vector>\n#include <algorithm>\n\n#include <DEPFETReader/ADCValues.h>\n\nnamespace DEPFET {\n\n /** Class to calculate and apply common mode corrections to raw depfet data.\n *\n * To accomodate for readout electronics, a common offset can occur for\n * each channel. To accomodate for different types of readout, this class\n * will apply first row wise and than column wise corrections.\n *\n * For row wise common mode correction, the class allows to specify the\n * number of rows which should used together as well es the number of\n * divisions per row. Same for column wise corrections\n */\n class CommonMode {\n public:\n /** Constructor\n * @param nRows number of rows for one row wise correction\n * @param nCols number of columns for one column wise correction\n * @param divRows number of divisions per row wise correction\n * @param divCols number of divisions per column wise correction\n */\n CommonMode(int nRows = 1, int nCols = 1, int divRows = 1, int divCols = 1):\n m_nRows(nRows), m_nCols(nCols), m_divRows(divRows), m_divCols(divCols),\n m_mask(0), m_noise(0), m_cutvalue(0) {};\n /** Apply common mode corrections to raw data */\n void apply(ADCValues& data);\n /** Return the calculated column wise corrections */\n const std::vector<double>& getCommonModesRow() const { return m_commonModeRow; }\n /** Return the calculated row wise corrections */\n const std::vector<double>& getCommonModesCol() const { return m_commonModeCol; }\n /** Set the mask to be used. All pixels which have a nonzero value in mask will be ignored */\n void setMask(const PixelMask* mask) {\n m_mask = mask;\n }\n /** Set noise map and the cut value. All pixels which are more than\n * cutvalue*noise away from 0 are ignored for common mode correction */\n void setNoise(double cutvalue, const PixelNoise* noise) {\n m_cutvalue = cutvalue;\n m_noise = noise;\n }\n protected:\n /** Calculate and apply common mode correction for a part of the matrix */\n double calculate(ADCValues& data, int startCol, int startRow, int nCols, int nRows);\n\n /** Number of rows per row wise correction */\n int m_nRows;\n /** Number of columns per column wise correction */\n int m_nCols;\n /** Number of divisions per row wise correction */\n int m_divRows;\n /** Number of divisions per column wise correction */\n int m_divCols;\n /** Caluclated row wise corrections */\n std::vector<double> m_commonModeRow;\n /** Caluclated column wise corrections */\n std::vector<double> m_commonModeCol;\n /** Matrix containing masked pixels */\n const PixelMask* m_mask;\n /** Matrix containing the pixel noise */\n const PixelNoise* m_noise;\n /** Cut value for ignoring high signal values during common mode correction */\n double m_cutvalue;\n };\n\n inline double CommonMode::calculate(ADCValues& data, int startCol, int startRow, int nCols, int nRows)\n {\n static std::vector<double> pixelValues;\n pixelValues.clear();\n\n //Collect pixel data\n for (int x = startCol; x < startCol + nCols; ++x) {\n for (int y = startRow; y < startRow + nRows; ++y) {\n if (m_mask && (*m_mask)(x, y) != 0) {\n data(x, y) = 0;\n continue;\n }\n if (m_noise && data(x, y) > m_cutvalue * (*m_noise)(x, y)) continue;\n pixelValues.push_back(data(x, y));\n }\n }\n\n //Calculate median of selected pixel data\n if (pixelValues.empty()) return 0;\n std::vector<double>::iterator middle = pixelValues.begin() + (pixelValues.size()) / 2;\n std::nth_element(pixelValues.begin(), middle, pixelValues.end());\n\n //Apply correction to data\n for (int x = startCol; x < startCol + nCols; ++x) {\n for (int y = startRow; y < startRow + nRows; ++y) {\n if (m_mask && (*m_mask)(x, y) != 0) data(x, y) = 0;\n else data(x, y) -= *middle;\n }\n }\n\n //Return applied common mode correction\n return *middle;\n }\n\n inline void CommonMode::apply(ADCValues& data)\n {\n int nCols(0), nRows(0);\n if (m_nRows > 0) {\n m_commonModeRow.resize(data.getSizeY() / m_nRows * m_divRows);\n } else {\n m_commonModeRow.clear();\n }\n if (m_nCols > 0) {\n m_commonModeCol.resize(data.getSizeX() / m_nCols * m_divCols);\n } else {\n m_commonModeCol.clear();\n }\n\n //Calculate and apply row wise common mode correction\n nCols = data.getSizeX() / m_divRows;\n nRows = m_nRows;\n for (size_t i = 0; i < m_commonModeRow.size(); ++i) {\n int startCol = (i % m_divRows) * nCols;\n int startRow = (i / m_divRows) * nRows;\n m_commonModeRow[i] = calculate(data, startCol, startRow, nCols, nRows);\n }\n\n //Calculate and apply column wise common mode correction\n nCols = m_nCols;\n nRows = data.getSizeY() / m_divCols;\n for (size_t i = 0; i < m_commonModeCol.size(); ++i) {\n int startCol = (i / m_divCols) * nCols;\n int startRow = (i % m_divCols) * nRows;\n m_commonModeCol[i] = calculate(data, startCol, startRow, nCols, nRows);\n }\n }\n}\n#endif\n" }, { "alpha_fraction": 0.6894198060035706, "alphanum_fraction": 0.6894198060035706, "avg_line_length": 40.85714340209961, "blob_id": "3abaac2497cc9aaef05ee393e705480069dabd76", "content_id": "b8d16663fc9746ab4b40e711644f9ee674c0a034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 95, "num_lines": 7, "path": "/tools/SConscript", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "Import('env')\n\nenv['TOOLS_LIBS']['depfetDump'] = ['DEPFETReader','$ROOT_LIBS', 'boost_program_options']\nenv['TOOLS_LIBS']['depfetCalibration'] = ['DEPFETReader','$ROOT_LIBS', 'boost_program_options']\nenv['TOOLS_LIBS']['depfetHitmap'] = ['DEPFETReader', 'boost_program_options']\n\nReturn('env')\n" }, { "alpha_fraction": 0.6707021594047546, "alphanum_fraction": 0.6731234788894653, "avg_line_length": 19.649999618530273, "blob_id": "cca05011814c24811334a622b9dc7897f4d419f5", "content_id": "db496cb0988716e8f48d6f7162d35834732e96a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 413, "license_type": "no_license", "max_line_length": 104, "num_lines": 20, "path": "/Makefile", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "SOURCES = $(wildcard src/*.cc)\nHEADERS = $(wildcard include/*.h)\nCXXFLAGS = -O2\n\nALL = depfetCalibration depfetHitmap depfetDump\n\nall: $(ALL)\n\n$(ALL): %: tools/%.cc $(SOURCES)\n\t$(CXX) $(CXXFLAGS) -o $@ $^ -I. -lboost_program_options $(shell root-config --cflags --ldflags --libs)\n\n$(SOURCES): $(HEADERS) DEPFETReader\n\nDEPFETReader:\n\tln -sfT include DEPFETReader\n\nclean:\n\trm -f DEPFETReader $(ALL)\n\n.PHONY: clean\n" }, { "alpha_fraction": 0.6077347993850708, "alphanum_fraction": 0.6178637146949768, "avg_line_length": 30.02857208251953, "blob_id": "089a65314bdf75ea13367f32dccd86fecaa91dc9", "content_id": "4df63eb444f2ec54a49c56f86922c8fa8d976650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 94, "num_lines": 35, "path": "/include/IncrementalMean.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_INCREMENTALMEAN_H\n#define DEPFET_INCREMENTALMEAN_H\n\n#include <cmath>\n\nnamespace DEPFET {\n /** Class to calculate mean and sigma on running data without having to keep all the data */\n class IncrementalMean {\n public:\n IncrementalMean(): m_entries(0), m_mean(0), m_variance(0) {}\n void clear() {\n m_entries = 0;\n m_mean = 0;\n m_variance = 0;\n }\n void add(double x, double weight = 1.0, double sigmaCut = 0.0) {\n if (sigmaCut > 0 && std::fabs(x - getMean()) > getSigma()*sigmaCut) return;\n m_entries += weight;\n const double oldMean = m_mean;\n m_mean += weight * (x - oldMean) / m_entries;\n m_variance += weight * (x - oldMean) * (x - m_mean);\n }\n double getEntries() const { return m_entries; }\n double getMean() const { return m_mean; }\n double getSigma() const { return std::sqrt(m_variance / m_entries); }\n operator double() const { return getMean(); }\n operator int() const { return round(getMean()); }\n protected:\n double m_entries;\n double m_mean;\n double m_variance;\n };\n\n}\n#endif\n" }, { "alpha_fraction": 0.6143386960029602, "alphanum_fraction": 0.6276885271072388, "avg_line_length": 32.70833206176758, "blob_id": "e19dbf174958f6d595163283840c9a9cd20d90ad", "content_id": "207c5cde0f6a2359daec88b30ab4aec73b51dd72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4045, "license_type": "no_license", "max_line_length": 87, "num_lines": 120, "path": "/include/RawData.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_RAWDATA_H\n#define DEPFET_RAWDATA_H\n\n#include <vector>\n#include <istream>\n#include <DEPFETReader/DataView.h>\n\nnamespace DEPFET {\n\n enum DeviceTypes {\n DEVICETYPE_GROUP = 0x0,\n DEVICETYPE_DEPFET = 0x2,\n DEVICETYPE_DEPFET_128 = 0x3,\n DEVICETYPE_DEPFET_DCD = 0x4,\n DEVICETYPE_BAT = 0x5,\n DEVICETYPE_TPLL = 0xA,\n DEVICETYPE_UNKNOWN = 0xB,\n DEVICETYPE_TLU = 0xD, //--- new TLU event\n DEVICETYPE_INFO = 0xE, //--- new info header for run number + etc\n DEVICETYPE_OTHER = 0xF\n };\n\n enum EventTypes {\n EVENTTYPE_RUN_BEGIN = 0x0,\n EVENTTYPE_RUN_END = 0x1,\n EVENTTYPE_DATA = 0x2,\n };\n\n class RawData {\n public:\n /** Struct containing the header information for each record */\n struct Header {\n unsigned int eventSize: 20;\n unsigned short flag0: 1;\n unsigned short flag1: 1;\n unsigned short eventType: 2;\n unsigned short moduleNo: 4;\n unsigned short deviceType: 4;\n unsigned int triggerNumber;\n };\n\n /** Struct containing the frame information for each data blob */\n struct InfoWord {\n unsigned int framecnt: 10; // number of Bits\n unsigned int startGate: 7; //jf new for 128x128\n unsigned int padding: 3;\n unsigned int zerosupp: 1;\n unsigned int startgate_ver: 1;\n unsigned int temperature: 10;\n };\n\n typedef unsigned int value_type;\n\n /** Constructor taking a reference to the stream from which to read the data */\n RawData(std::istream& stream): m_stream(stream) {}\n\n /** Return a view of the data */\n template<class T> DataView<T> getView(size_t nX = 0, size_t nY = 0) const {\n return DataView<T>(&m_data.front() + m_offset, m_data.size() - m_offset, nX, nY);\n };\n\n /** Return the actual framesize in units of value_type used considering nx\n * cols and nx rows of type T were needed to read the frame */\n template<class T> size_t getFrameSize(size_t nX, size_t nY) const {\n return nX * nY * sizeof(T) / sizeof(value_type);\n }\n\n /** Read the next Header record */\n void readHeader() {\n m_data.clear();\n m_offset = 0;\n m_stream.read((char*)&m_header, sizeof(m_header));\n }\n\n /** Read the next data blob */\n void readData() {\n int dataSize = m_header.eventSize - 3;\n m_data.resize(dataSize);\n m_stream.read((char*)&m_infoWord, sizeof(m_infoWord));\n m_stream.read((char*)&m_data.front(), sizeof(value_type)*dataSize);\n }\n\n /** Skip the next data blob */\n void skipData() {\n m_stream.seekg((m_header.eventSize - 2)*sizeof(value_type), std::ios::cur);\n }\n\n /** Return the Event Type */\n int getEventType() const { return m_header.eventType; }\n /** Return the Trigger Number */\n int getTriggerNr() const { return m_header.triggerNumber; }\n /** Return the Module Number */\n int getModuleNr() const { return m_header.moduleNo; }\n /** Return the Device Type */\n int getDeviceType() const { return m_header.deviceType; }\n /** Return the Event size (including header) */\n int getEventSize() const { return m_header.eventSize; }\n /** Return the data size after reading the data blob */\n int getDataSize() const { return m_data.size(); }\n /** Return the start gate of the readout frame */\n int getStartGate() const { return m_infoWord.startGate; }\n /** Return the temperature value */\n float getTemperature() const { return m_infoWord.temperature / 4.0; }\n /** Set the offset for creating views */\n void setOffset(size_t offset) { m_offset = offset; }\n protected:\n /** Reference to the stream of data */\n std::istream& m_stream;\n /** Struct containing the header information */\n Header m_header;\n /** Struct containing the info word at the begin of each data blob */\n InfoWord m_infoWord;\n /** Offset from the start of the data when creating views */\n size_t m_offset;\n /** Array containing the raw data */\n std::vector<value_type> m_data;\n };\n\n}\n#endif\n" }, { "alpha_fraction": 0.6235632300376892, "alphanum_fraction": 0.6340996026992798, "avg_line_length": 30.445783615112305, "blob_id": "8c977e47be201ebacae2b400c3f720363d7e8ff3", "content_id": "4577fed04afaa367dfe6cb1d32bbfe324d7a6327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5220, "license_type": "no_license", "max_line_length": 138, "num_lines": 166, "path": "/tools/depfetHitmap.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/DataReader.h>\n#include <DEPFETReader/CommonMode.h>\n#include <DEPFETReader/IncrementalMean.h>\n\n#include <cmath>\n#include <iostream>\n#include <iomanip>\n#include <boost/program_options.hpp>\n#include <boost/foreach.hpp>\n#include <boost/format.hpp>\n\nusing namespace std;\nnamespace po = boost::program_options;\n\nbool showProgress(int event, int minOrder = 0, int maxOrder = 3)\n{\n int order = (event == 0) ? 1 : max(min((int)log10(event), maxOrder), minOrder);\n int interval = static_cast<int>(pow(10., order));\n return (event % interval == 0);\n}\n\ntypedef DEPFET::ValueMatrix<double> PixelValues;\n\nint main(int argc, char* argv[])\n{\n int skipEvents(0);\n int maxEvents(-1);\n vector<string> inputFiles;\n string outputFile;\n string calibrationFile;\n double sigmaCut(5.0);\n bool do_normalize(false);\n int frameNr(-1);\n\n //Parse program arguments\n po::options_description desc(\"Allowed options\");\n desc.add_options()\n (\"help,h\", \"Show help message\")\n (\"skip,s\", po::value<int>(&skipEvents)->default_value(0), \"Number of events to skip before reading\")\n (\"sigma\", po::value<double>(&sigmaCut)->default_value(5.0), \"Sigma cut to apply to data\")\n (\"nevents,n\", po::value<int>(&maxEvents)->default_value(-1), \"Max. number of output events\")\n (\"calibration,c\", po::value<string>(&calibrationFile), \"Calibration File\")\n (\"input,i\", po::value< vector<string> >(&inputFiles)->composing(), \"Input files\")\n (\"output,o\", po::value<string>(&outputFile)->default_value(\"hitmap.dat\"), \"Output file\")\n (\"4fold\", \"If set, data is read out in 4fold mode, otherwise 2fold\")\n (\"dcd\", \"If set, common mode corretion is set to DCD mode (4 full rows), otherwise curo topology is used (two half rows\")\n (\"normalize\", po::bool_switch(), \"Divide ADC count by number of frames processed\")\n (\"frame,f\", po::value<int>(&frameNr)->default_value(frameNr), \"Set the frame number to be used: -1=all, 0=original, 1=1st tailing, ...\")\n ;\n\n po::variables_map vm;\n po::positional_options_description p;\n p.add(\"input\", -1);\n\n po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);\n po::notify(vm);\n if (vm.count(\"help\")) {\n cout << desc << \"\\n\";\n return 1;\n }\n\n //check program arguments\n if (inputFiles.empty()) {\n cerr << \"No input files given\" << endl;\n return 2;\n }\n\n DEPFET::DataReader reader;\n reader.setReadoutFold(2);\n reader.setUseDCDBMapping(true);\n //Common mode correction: row wise correction using two half rows and one column\n DEPFET::CommonMode commonMode(2, 1, 2, 1);\n if (vm.count(\"4fold\")) {\n reader.setReadoutFold(4);\n }\n if (vm.count(\"dcd\")) {\n commonMode = DEPFET::CommonMode(4, 0, 1, 1);\n }\n\n reader.open(inputFiles, maxEvents);\n if (!reader.next()) {\n cerr << \"Could not read a single event from the file\" << cerr;\n return 5;\n }\n\n if (calibrationFile.empty()) {\n cerr << \"No calibration file given\" << endl;\n return 4;\n }\n ifstream calStream(calibrationFile.c_str());\n if (!calStream) {\n cerr << \"Could not open calibration file \" << calibrationFile << endl;\n return 5;\n }\n\n DEPFET::PixelMask mask;\n PixelValues pedestals;\n PixelValues noise;\n PixelValues hitmap;\n\n DEPFET::Event& event = reader.getEvent();\n mask.setSize(event[0]);\n pedestals.setSize(mask);\n hitmap.setSize(mask);\n noise.setSize(mask);\n\n while (calStream) {\n int col, row, px_masked;\n double px_pedestal, px_noise;\n calStream >> col >> row >> px_masked >> px_pedestal >> px_noise;\n if (!calStream) break;\n mask.at(col, row) = px_masked;\n pedestals.at(col, row) = px_pedestal;\n noise.at(col, row) = px_noise;\n }\n\n hitmap.substract(mask, 1e4);\n commonMode.setMask(&mask);\n commonMode.setNoise(sigmaCut, &noise);\n\n int eventNr(1);\n reader.open(inputFiles, maxEvents);\n reader.skip(skipEvents);\n while (reader.next()) {\n DEPFET::Event& event = reader.getEvent();\n BOOST_FOREACH(DEPFET::ADCValues & data, event) {\n if (frameNr >= 0 && data.getFrameNr() != frameNr) continue;\n // DEPFET::ADCValues &data = event[0];\n //Pedestal substraction\n data.substract(pedestals);\n //Common Mode correction\n commonMode.apply(data);\n //At this point, data(x,y) is the pixel value of column x, row y\n for (size_t y = 0; y < data.getSizeY(); ++y) {\n //Mask startgate\n //if(y%2 == data.getStartGate()) {\n //continue;\n //}\n for (size_t x = 0; x < data.getSizeX(); ++x) {\n if (data(x, y) > sigmaCut * noise(x, y)) {\n hitmap(x, y) += data(x, y);\n }\n }\n }\n }\n if (showProgress(eventNr)) {\n cout << \"Output: \" << eventNr << \" events written\" << endl;\n }\n ++eventNr;\n }\n\n ofstream hitmapFile(outputFile.c_str());\n if (!hitmapFile) {\n cerr << \"Could not open hitmap output file \" << outputFile;\n }\n hitmapFile << hitmap.getSizeX() << \" \" << hitmap.getSizeY() << endl;\n for (unsigned int col = 0; col < hitmap.getSizeX(); ++col) {\n for (unsigned int row = 0; row < hitmap.getSizeY(); ++row) {\n //Normalize\n if (do_normalize) hitmap(col, row) /= (eventNr - 1);\n hitmapFile << hitmap(col, row) << \" \";\n }\n hitmapFile << endl;\n }\n hitmapFile.close();\n}\n" }, { "alpha_fraction": 0.7228260636329651, "alphanum_fraction": 0.739130437374115, "avg_line_length": 20.647058486938477, "blob_id": "fdf5506da2987cd7677e34987150bb0a3d6aa73a", "content_id": "3446a4320b3540924c6ee3028aead1303998bb58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 368, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/include/S3BConverter.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_S3BCONVERTER_H\n#define DEPFET_S3BCONVERTER_H\n\n#include <DEPFETReader/RawData.h>\n#include <DEPFETReader/ADCValues.h>\n\nnamespace DEPFET {\n\n struct S3BConverter2Fold {\n size_t operator()(const RawData& rawData, ADCValues& adcValues);\n };\n\n struct S3BConverter4Fold {\n size_t operator()(const RawData& rawData, ADCValues& adcValues);\n };\n}\n#endif\n" }, { "alpha_fraction": 0.7346153855323792, "alphanum_fraction": 0.7461538314819336, "avg_line_length": 17.571428298950195, "blob_id": "ab0c11a6f5a0e12989b3d8ad7952225772567ea9", "content_id": "9bd994b60ad37d46b8a71d4edbab48b8652f0826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 260, "license_type": "no_license", "max_line_length": 68, "num_lines": 14, "path": "/include/S3AConverter.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_S3ACONVERTER_H\n#define DEPFET_S3ACONVERTER_H\n\n#include <DEPFETReader/RawData.h>\n#include <DEPFETReader/ADCValues.h>\n\nnamespace DEPFET {\n\n struct S3AConverter {\n size_t operator()(const RawData& rawData, ADCValues& adcValues);\n };\n\n}\n#endif\n" }, { "alpha_fraction": 0.6593506932258606, "alphanum_fraction": 0.6625514626502991, "avg_line_length": 34.85245895385742, "blob_id": "c85fa3c4889d1757624cd51df374deabd1939bcc", "content_id": "82400e88f5542481174e8650dc92295beb20e4d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 118, "num_lines": 61, "path": "/include/DataReader.h", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#ifndef DEPFET_DATAREADER_H\n#define DEPFET_DATAREADER_H\n\n#include <DEPFETReader/RawData.h>\n#include <DEPFETReader/Event.h>\n\n#include <fstream>\n#include <map>\n\nnamespace DEPFET {\n /** Class to read binary DEPFET file and return the raw adc values for each readout event */\n class DataReader {\n public:\n /** constructor to create a new instance */\n DataReader(): m_eventNumber(0), m_nEvents(-1), m_fold(2), m_useDCDBMapping(true), m_rawData(m_file), m_event(1) {}\n\n /** open a list of files and limit the readout to nEvents */\n void open(const std::vector<std::string>& filenames, int nEvents = -1);\n /** skip a given number of events from the data file */\n bool skip(int nEvents);\n /** read next event, if skip=true the event data is not updated.\n * Returns false if at end of file or maximum number of events is reached */\n bool next(bool skip = false);\n /** return reference to the event data */\n Event& getEvent() { return m_event; }\n\n /** set the readout fold, has to be known to configure the binary format\n * interpreter, normally 2 fold (curo readout) or 4fold (dcd readout) */\n void setReadoutFold(int fold) { m_fold = fold; }\n /** configure if DCDB mapping should be used, only relevant for dcd readout */\n void setUseDCDBMapping(bool useDCDBmapping) { m_useDCDBMapping = useDCDBmapping; }\n protected:\n /** actually open the next file */\n bool openFile();\n /** read the next event header */\n bool readHeader();\n /** read the next event */\n void readEvent(int dataSize);\n /** convert the raw binary data to ADCValues */\n size_t convertData(RawData& rawdata, ADCValues& adcvalues);\n\n /** current event number */\n int m_eventNumber;\n /** maximal number of events to read */\n int m_nEvents;\n /** configured readout fold */\n int m_fold;\n /** use dcdb mapping? */\n bool m_useDCDBMapping;\n /** list of filenames */\n std::vector<std::string> m_filenames;\n /** currently open file */\n std::ifstream m_file;\n /** rawdata structure used for reading the binary blobs */\n RawData m_rawData;\n /** event structure to fill the data in */\n Event m_event;\n };\n}\n\n#endif\n" }, { "alpha_fraction": 0.6035159230232239, "alphanum_fraction": 0.6163867712020874, "avg_line_length": 31.176767349243164, "blob_id": "f0c6ad891dd572065ff19b00dedc87edac7b39de", "content_id": "465695173bed527f7d4aadd37646591c3f518f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6371, "license_type": "no_license", "max_line_length": 142, "num_lines": 198, "path": "/modules/src/DEPFETReaderModule.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "/**************************************************************************\n * BASF2 (Belle Analysis Framework 2) *\n * Copyright(C) 2010-2011 Belle II Collaboration *\n * *\n * Author: The Belle II Collaboration *\n * Contributors: Martin Ritter *\n * *\n * This software is provided \"as is\" without any warranty. *\n **************************************************************************/\n\n#include <DEPFETReader/modules/DEPFETReaderModule.h>\n\n#include <framework/gearbox/Unit.h>\n#include <framework/logging/Logger.h>\n#include <framework/datastore/DataStore.h>\n#include <framework/datastore/StoreArray.h>\n#include <framework/datastore/StoreObjPtr.h>\n#include <framework/dataobjects/EventMetaData.h>\n#include <pxd/dataobjects/PXDDigit.h>\n#include <vxd/geometry/GeoCache.h>\n\n#include <algorithm>\n#include <boost/format.hpp>\n#include <boost/foreach.hpp>\n\nusing namespace std;\nusing namespace Belle2;\nusing namespace DEPFET;\n\nREG_MODULE(DEPFETReader)\n\nDEPFETReaderModule::DEPFETReaderModule() : Module(), m_commonMode(2, 1, 2, 1), m_currentFrame(0)\n{\n //Set module properties\n setDescription(\"Read raw DEPFET data\");\n setPropertyFlags(c_Input);\n\n //Parameter definition\n addParam(\"inputFiles\", m_inputFiles, \"Name of the data files\");\n addParam(\"sigmaCut\", m_sigmaCut, \"Zero suppression cut to apply\", 5.0);\n addParam(\"readoutFold\", m_readoutFold, \"Readout fold (2 or 4 usually)\", 2);\n addParam(\"isDCD\", m_dcd, \"0 for 2 half row common mode, 1 for 4 row common mode substraction\", 0);\n addParam(\"skipEvents\", m_skipEvents, \"Skip this number of events before starting.\", 0);\n addParam(\"trailingFrames\", m_trailingFrames, \"Number of trailing frames\", 0);\n //addParam(\"calibrationEvents\", m_calibrationEvents, \"Calibrate using this number of events before starting.\", 1000);\n addParam(\"calibrationFile\", m_calibrationFile, \"File to read calibration from\");\n}\n\nvoid DEPFETReaderModule::progress(int event, int maxOrder)\n{\n int order = (event == 0) ? 1 : static_cast<int>(std::min(std::log10(event), (double)maxOrder));\n int interval = static_cast<int>(std::pow(10., order));\n if (event % interval == 0) B2INFO(\"Events read: \" << event);\n}\n\nvoid DEPFETReaderModule::calculatePedestals()\n{\n /*B2INFO(\"Calculating pedestals\");\n int eventNr(1);\n double cutValue(0.0);\n while(m_reader.next()){\n const Event& event = m_reader.getEvent();\n const ADCValues& data = event[0];\n if(eventNr==1) m_pedestals.setSize(data);\n if(eventNr==20) cutValue = m_sigmaCut;\n for(size_t x=0; x<data.getSizeX(); ++x){\n for(size_t y=0; y<data.getSizeY(); ++y){\n m_pedestals(x,y).add(data(x,y),1.0,cutValue);\n }\n }\n progress(eventNr++,4);\n }*/\n}\n\nvoid DEPFETReaderModule::calculateNoise()\n{\n /*B2INFO(\"Calculating noise\");\n ValueMatrix<IncrementalMean> noise;\n noise.setSize(m_pedestals);\n m_noise.setSize(m_pedestals);\n int eventNr(1);\n while(m_reader.next()){\n Event& event = m_reader.getEvent();\n ADCValues& data = event[0];\n data.substract(m_pedestals);\n m_commonMode.apply(data);\n for(size_t x=0; x<data.getSizeX(); ++x){\n for(size_t y=0; y<data.getSizeY(); ++y){\n double signal = data(x,y);\n if(std::fabs(signal)>m_sigmaCut*m_pedestals(x,y).getSigma()) continue;\n noise(x,y).add(signal);\n }\n }\n progress(eventNr++,4);\n }\n\n for(size_t x=0; x<noise.getSizeX(); ++x){\n for(size_t y=0; y<noise.getSizeY(); ++y){\n m_noise(x,y) = noise(x,y).getSigma();\n }\n }*/\n}\n\nvoid DEPFETReaderModule::initialize()\n{\n //Initialize PXDDigits collection\n StoreArray<PXDDigit> PXDDigits;\n\n if (m_inputFiles.empty()) {\n B2ERROR(\"No input files specified\");\n return;\n }\n\n\n /*//calculate pedestals\n m_reader.open(m_inputFiles, m_calibrationEvents);\n m_reader.skip(m_skipEvents);\n calculatePedestals();\n\n //calculate noise\n m_reader.open(m_inputFiles, m_calibrationEvents);\n m_reader.skip(m_skipEvents);\n calculateNoise();*/\n\n //Read calibration files\n m_reader.setReadoutFold(m_readoutFold);\n if (m_dcd > 0) {\n m_commonMode = DEPFET::CommonMode(4, 1, 1, 1);\n m_reader.setTrailingFrames(m_trailingFrames);\n }\n m_reader.open(m_inputFiles);\n if (!m_reader.next()) {\n B2FATAL(\"Could not read a single event from the file\");\n }\n DEPFET::Event& event = m_reader.getEvent();\n ADCValues& data = event[0];\n m_mask.setSize(data);\n m_pedestals.setSize(data);\n m_noise.setSize(data);\n\n //Read calibration data\n if (!m_calibrationFile.empty()) {\n ifstream maskStream(m_calibrationFile.c_str());\n if (!maskStream) {\n B2FATAL(\"Could not open calibration file \" << m_calibrationFile);\n }\n while (maskStream) {\n int col, row, mask;\n double pedestal, noise;\n maskStream >> col >> row >> mask >> pedestal >> noise;\n if (!maskStream) break;\n m_mask(col, row) = mask;\n m_pedestals(col, row) = pedestal;\n m_noise(col, row) = noise;\n }\n }\n\n //Open file again\n m_reader.open(m_inputFiles);\n m_reader.skip(m_skipEvents);\n\n m_commonMode.setMask(&m_mask);\n m_commonMode.setNoise(m_sigmaCut, &m_noise);\n m_currentFrame = event.size();\n}\n\n\nvoid DEPFETReaderModule::event()\n{\n StoreArray<PXDDigit> storeDigits;\n const VXD::SensorInfoBase& info = VXD::GeoCache::get(VxdID(1, 1, 1));\n\n Event& event = m_reader.getEvent();\n\n //Get next event if we read all frames\n if (m_currentFrame >= event.size()) {\n if (!m_reader.next()) {\n StoreObjPtr <EventMetaData> eventMetaDataPtr;\n eventMetaDataPtr->setEndOfData();\n return;\n }\n m_currentFrame = 0;\n }\n\n ADCValues& data = event[m_currentFrame++];\n data.substract(m_pedestals);\n m_commonMode.apply(data);\n for (size_t y = 0; y < data.getSizeY(); ++y) {\n for (size_t x = 0; x < data.getSizeX(); ++x) {\n if (m_mask(x, y)) continue;\n double signal = data(x, y);\n if (signal < m_sigmaCut * m_noise(x, y)) continue;\n //Create new digit\n int digIndex = storeDigits->GetLast() + 1;\n new(storeDigits->AddrAt(digIndex)) PXDDigit(VxdID(1, 1, 1), x, y, info.getUCellPosition(x), info.getVCellPosition(y), max(0.0, signal));\n }\n }\n}\n" }, { "alpha_fraction": 0.5882962346076965, "alphanum_fraction": 0.6110823154449463, "avg_line_length": 29.171875, "blob_id": "1082c55ab9106841bae83209f83c087a4a8254c7", "content_id": "36f88077c89be0f1164fe23f4ee2c37f70debb33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1931, "license_type": "no_license", "max_line_length": 68, "num_lines": 64, "path": "/examples/makemovie.py", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport numpy as np\nimport subprocess\nimport matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot as pl\n\ndatafile = open(sys.argv[1])\nbasename = os.path.splitext(sys.argv[1])[0]\n\nevents = []\nmaxADC = 0\nmaxSUM = 0\nwhile True:\n line = datafile.readline()\n if len(line)==0:\n break\n dummy, run, event, nmodules = line.split()\n run, event = map(int, (run, event))\n for i in range(int(nmodules)):\n tmp, modulenr, cols, rows = datafile.readline().split()\n cols = int(cols)\n rows = int(rows)\n data = np.fromfile(datafile, count=cols*rows, sep=\" \")\n data.shape = (rows, cols)\n data = np.ma.masked_less(data, 0)\n events.append((run, event, data))\n maxADC = max(data.max(), maxADC)\n maxSUM = max(data.sum(), maxSUM)\n\nprint len(events), \"Events read, max ADC value is\", maxADC,\nprint \"with at most\", maxSUM, \"total in one frame\"\n\nsubprocess.call([\"mkdir\", \"-p\", basename])\ncmap = matplotlib.cm.get_cmap(\"binary\")\ntotalframe = None\nif len(events)>0:\n totalframe = np.zeros(data.shape)\n\n\ndef save_frame(i, data, title):\n fig = pl.figure(figsize=(15, 6))\n ax = fig.add_axes((0.05, 0.1, 1.0, 0.85))\n img = ax.imshow(data.T, interpolation=\"nearest\", origin=\"lower\",\n aspect=\"auto\", vmin=0, vmax=maxADC, cmap=cmap)\n ax.set_xlabel(\"column\")\n ax.set_ylabel(\"row\")\n ax.set_xlim(0, data.shape[0])\n ax.set_ylim(0, data.shape[1])\n ax.set_title(title)\n fig.colorbar(img, fraction=0.13, pad=0.01)\n fig.savefig(basename+\"/%04d.png\" % i, dpi=90)\n pl.close(fig)\n\nfor i, (run, event, data) in enumerate(events):\n print \"saving frame \", i\n totalframe += data\n save_frame(i+1, data, \"Run %d, event %d\" % (run, event))\n\nsave_frame(0, totalframe, \"All Frames\")\n\nsubprocess.call([\"ffmpeg\", \"-y\", \"-r\", \"10\", \"-qscale\", \"3\",\n \"-i\", basename+\"/%04d.png\", basename+\".mp4\"])\n" }, { "alpha_fraction": 0.4893246293067932, "alphanum_fraction": 0.5503268241882324, "avg_line_length": 46.8125, "blob_id": "d2018d3cb542d6215551810274cb1d4454f63305", "content_id": "f389de22d6147483206dd6c8f3488967e298fb65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2295, "license_type": "no_license", "max_line_length": 86, "num_lines": 48, "path": "/src/S3BConverter.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/S3BConverter.h>\n\nnamespace DEPFET {\n\n size_t S3BConverter2Fold::operator()(const RawData& rawData, ADCValues& adcValues)\n {\n adcValues.setSize(64, 256);\n DataView<short> data = rawData.getView<short>(128, 128);\n\n for (int gate = 0; gate < 128; ++gate) {\n int readout_gate = (rawData.getStartGate() + gate) % 128;\n int odderon = readout_gate % 2;\n int rgate = readout_gate * 2;\n for (int col = 0; col < 32; col += 2) {\n adcValues(63 - col, rgate + 1 - odderon) = data(gate, (col * 4) + 0) & 0xffff;\n adcValues(col, rgate + odderon) = data(gate, (col * 4) + 1) & 0xffff;\n adcValues(62 - col, rgate + 1 - odderon) = data(gate, (col * 4) + 2) & 0xffff;\n adcValues(col + 1, rgate + odderon) = data(gate, (col * 4) + 3) & 0xffff;\n adcValues(63 - col, rgate + odderon) = data(gate, (col * 4) + 4) & 0xffff;\n adcValues(col, rgate + 1 - odderon) = data(gate, (col * 4) + 5) & 0xffff;\n adcValues(62 - col, rgate + odderon) = data(gate, (col * 4) + 6) & 0xffff;\n adcValues(col + 1, rgate + 1 - odderon) = data(gate, (col * 4) + 7) & 0xffff;\n }\n }\n return rawData.getFrameSize<short>(64, 256);\n }\n\n size_t S3BConverter4Fold::operator()(const RawData& rawData, ADCValues& adcValues)\n {\n adcValues.setSize(32, 512);\n DataView<short> data = rawData.getView<short>(128, 128);\n for (int gate = 0; gate < 128; ++gate) {\n int readout_gate = (rawData.getStartGate() + gate) % 128;\n int rgate = readout_gate * 4;\n for (int col = 0; col < 16; col += 1) {\n adcValues(31 - col, rgate + 3) = data(gate, (col * 8) + 0) & 0xffff;\n adcValues(col, rgate + 0) = data(gate, (col * 8) + 1) & 0xffff;\n adcValues(31 - col, rgate + 2) = data(gate, (col * 8) + 2) & 0xffff;\n adcValues(col, rgate + 1) = data(gate, (col * 8) + 3) & 0xffff;\n adcValues(31 - col, rgate + 1) = data(gate, (col * 8) + 4) & 0xffff;\n adcValues(col, rgate + 2) = data(gate, (col * 8) + 5) & 0xffff;\n adcValues(31 - col, rgate + 0) = data(gate, (col * 8) + 6) & 0xffff;\n adcValues(col, rgate + 3) = data(gate, (col * 8) + 7) & 0xffff;\n }\n }\n return rawData.getFrameSize<short>(32, 512);\n }\n}\n" }, { "alpha_fraction": 0.5895809531211853, "alphanum_fraction": 0.5954700112342834, "avg_line_length": 27.12101936340332, "blob_id": "e1756194675881722c0808fe771b29445c8acf58", "content_id": "89ce8eac8ac4990e4d42d16a162cdf05ddd7d84a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4415, "license_type": "no_license", "max_line_length": 84, "num_lines": 157, "path": "/src/DataReader.cc", "repo_name": "daritter/DEPFETReader", "src_encoding": "UTF-8", "text": "#include <DEPFETReader/DataReader.h>\n#include <DEPFETReader/S3AConverter.h>\n#include <DEPFETReader/S3BConverter.h>\n#include <DEPFETReader/DCDConverter.h>\n#include <algorithm>\n#include <iostream>\n\nnamespace DEPFET {\n\n void DataReader::open(const std::vector<std::string>& filenames, int nEvents)\n {\n //Close open files\n m_file.close();\n m_file.clear();\n\n //Set number of events\n m_nEvents = nEvents;\n m_eventNumber = 0;\n\n //Set list of filenames to read in succession\n m_filenames = filenames;\n std::reverse(m_filenames.begin(), m_filenames.end());\n openFile();\n }\n\n bool DataReader::openFile()\n {\n if (m_filenames.empty()) return false;\n //Open the next file from the stack of files\n std::string filename = m_filenames.back();\n //std::cout << \"Opening \" << filename << std::endl;\n m_file.close();\n m_file.clear();\n m_file.open(filename.c_str(), std::ios::in | std::ios::binary);\n if (!m_file) {\n throw std::runtime_error(\"Error opening file \" + filename);\n }\n return true;\n }\n\n bool DataReader::readHeader()\n {\n //Read one header from file. If an error occured, try the next file\n while (true) {\n m_rawData.readHeader();\n //No error, so return true\n if (!m_file.fail()) return true;\n\n //We have an error, check if there is an additional file to open\n m_filenames.pop_back();\n if (!openFile()) return false;\n }\n }\n\n bool DataReader::skip(int nEvents)\n {\n m_event.clear();\n for (int i = 0; i < nEvents; ++i) {\n if (!next(true)) return false;\n }\n return true;\n }\n\n bool DataReader::next(bool skip)\n {\n if (!skip) {\n m_event.clear();\n ++m_eventNumber;\n //check if max number of events is reached\n if (m_nEvents > 0 && m_eventNumber > m_nEvents) return false;\n }\n\n while (readHeader()) {\n if (m_rawData.getDeviceType() == DEVICETYPE_INFO) {\n m_event.setRunNumber(m_rawData.getTriggerNr());\n continue;\n }\n if (m_rawData.getDeviceType() == DEVICETYPE_GROUP) {\n if (m_rawData.getEventType() == EVENTTYPE_DATA) {\n //If we are in skipping mode we don't read the data\n if (skip) {\n m_rawData.skipData();\n return true;\n }\n\n //Read event data\n m_event.setEventNumber(m_rawData.getTriggerNr());\n readEvent(m_rawData.getEventSize() - 2);\n return true;\n }\n }\n //Skip all other headers\n m_rawData.skipData();\n }\n return false;\n }\n\n void DataReader::readEvent(int dataSize)\n {\n size_t index(0);\n while (dataSize > 0) {\n if (!readHeader()) {\n throw std::runtime_error(\"Problem reading event from file\");\n }\n if (m_rawData.getEventType() != EVENTTYPE_DATA) {\n throw std::runtime_error(\"Expected data event, got something else\");\n }\n dataSize -= m_rawData.getEventSize();\n if (dataSize < 0) {\n throw std::runtime_error(\"Eventsize does not fit into remaining data size\");\n }\n\n //Read data\n m_rawData.readData();\n size_t alreadyUsed = 0;\n int frameNr = 0;\n while (alreadyUsed < m_rawData.getDataSize()) {\n m_event.resize(index + 1);\n ADCValues& adcvalues = m_event.at(index++);\n adcvalues.setModuleNr(m_rawData.getModuleNr());\n adcvalues.setTriggerNr(m_rawData.getTriggerNr());\n adcvalues.setStartGate(m_rawData.getStartGate());\n adcvalues.setFrameNr(frameNr++);\n alreadyUsed += convertData(m_rawData, adcvalues);\n m_rawData.setOffset(alreadyUsed);\n }\n }\n }\n\n\n size_t DataReader::convertData(RawData& rawdata, ADCValues& adcvalues)\n {\n switch (rawdata.getDeviceType()) {\n case DEVICETYPE_DEPFET_128: //S3B\n if (m_fold == 4) {\n S3BConverter4Fold convert;\n return convert(rawdata, adcvalues);\n } else {\n S3BConverter2Fold convert;\n return convert(rawdata, adcvalues);\n }\n break;\n case DEVICETYPE_DEPFET_DCD: //DCD\n if (m_fold == 4) {\n DCDConverter4Fold convert(m_useDCDBMapping);\n return convert(rawdata, adcvalues);\n } else {\n DCDConverter2Fold convert(m_useDCDBMapping);\n return convert(rawdata, adcvalues);\n }\n break;\n default: //S3A/\n S3AConverter convert;\n return convert(rawdata, adcvalues);\n }\n }\n}\n" } ]
24
alexarirok/Flask-Project
https://github.com/alexarirok/Flask-Project
15bc5695d572c9191f501f69a7abca62b8154698
45b4b3d7ac8177d838745be196dfc5b0869f9724
bc1db4e1fc15603d7238c080c5d4b43a1ea25b6a
refs/heads/master
2022-10-05T11:41:42.028058
2019-07-30T15:56:02
2019-07-30T15:56:02
198,634,442
0
0
null
2019-07-24T12:48:06
2019-07-30T16:01:23
2022-09-16T18:06:51
HTML
[ { "alpha_fraction": 0.7702702879905701, "alphanum_fraction": 0.7702702879905701, "avg_line_length": 30.85714340209961, "blob_id": "73ebdacea26ab72a6fb4ad3d18b784010b65b150", "content_id": "13a343c0c22eac5f8f25c967149d10031e971b64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 70, "num_lines": 7, "path": "/config.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nSQLALCHEMY_ECHO = False\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSECRET_KEY = 'secret_key'\nSQLALCHEMY_DATABASE_URI = \"postgresql://alex:postgres@localhost/store\"" }, { "alpha_fraction": 0.6523261666297913, "alphanum_fraction": 0.6718358993530273, "avg_line_length": 33.465518951416016, "blob_id": "17a9e84c377cb62009ba05b65be76df7f4eb4555", "content_id": "f224a9b385b8250bc40417f37b202920a4fbc12a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "no_license", "max_line_length": 91, "num_lines": 58, "path": "/migrations/versions/7a336e26d0b1_.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 7a336e26d0b1\nRevises: b47c79195e47\nCreate Date: 2019-07-30 11:15:04.320630\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7a336e26d0b1'\ndown_revision = 'b47c79195e47'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('items',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('parcel_name', sa.String(length=100), nullable=True),\n sa.Column('parcel_number', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('parcel_number')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=40), nullable=True),\n sa.Column('password', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.drop_table('item')\n op.drop_table('user')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('email', sa.VARCHAR(length=40), autoincrement=False, nullable=True),\n sa.Column('password', sa.VARCHAR(length=60), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='user_pkey'),\n sa.UniqueConstraint('email', name='user_email_key')\n )\n op.create_table('item',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('parcel_name', sa.VARCHAR(length=100), autoincrement=False, nullable=True),\n sa.Column('parcel_number', sa.VARCHAR(length=100), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='item_pkey'),\n sa.UniqueConstraint('parcel_number', name='item_parcel_number_key')\n )\n op.drop_table('users')\n op.drop_table('items')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7237936854362488, "alphanum_fraction": 0.7237936854362488, "avg_line_length": 25.086956024169922, "blob_id": "090b1dd000c3aac5a851f2f8d24245778a9f39f2", "content_id": "1b7b89854bb08abb89093737195573687dc4fa34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 84, "num_lines": 23, "path": "/app.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "\nfrom flask import Flask, render_template, Blueprint\nfrom flask_restful import Api\nfrom resources.Hello import Hello \nfrom flask_sqlalchemy import SQLAlchemy \nfrom flask_login import LoginManager \nfrom flask_bcrypt import Bcrypt\n\napi_bp = Blueprint('api', __name__)\napi = Api(api_bp)\n\napi.add_resource(Hello, '/Hello')\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'helloworld'\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://alex:postgres@localhost/store\"\nbcrypt = Bcrypt(app)\n\ndb = SQLAlchemy(app)\nlogin_manager = LoginManager(app)\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)\n" }, { "alpha_fraction": 0.609160304069519, "alphanum_fraction": 0.6778625845909119, "avg_line_length": 22.39285659790039, "blob_id": "6794820d949675f782d96e59f7c0449a32fb3ed7", "content_id": "e56e363fca7cf84bbffa2cf28c0c8ffd68cf5220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/migrations/versions/90f79c1e9d89_.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 90f79c1e9d89\nRevises: 7a336e26d0b1\nCreate Date: 2019-07-30 14:26:00.074882\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '90f79c1e9d89'\ndown_revision = '7a336e26d0b1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('items', sa.Column('parcel_id', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('items', 'parcel_id')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6953863501548767, "alphanum_fraction": 0.7042801380157471, "avg_line_length": 44.79487228393555, "blob_id": "209f6b7db487a30056386d98d673c66131e1932f", "content_id": "080ac3c0cc1de1566bc548c592e929e31876ef9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1799, "license_type": "no_license", "max_line_length": 108, "num_lines": 39, "path": "/resources/form.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, TextField\nfrom wtforms.validators import DataRequired, ValidationError, Length, Email, EqualTo\nfrom models import User, Order\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Email(), Length(min=4, max=40)])\n password = PasswordField('Password', validators=[DataRequired(), Length(min=4)])\n remember = BooleanField('Remember_Me')\n submit = SubmitField('Login')\n\nclass SignupForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Email(), Length(min=4, max=40)])\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])\n\n submit = SubmitField('Sign Up')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('This email is already taken. Please try another one.')\n\nclass OrderForm(FlaskForm):\n parcel_name = StringField('Parcel_Name', validators=[DataRequired(), Length(min=2, max=255)])\n parcel_number = StringField('Parcel_Number', validators=[DataRequired(), Length(min=4)])\n \n\n submit = SubmitField('Place Order')\n\n # def validate_parcel(self, order):\n # order = Order.query.filter_by(parcel=parcel.data).first()\n\nclass ContactForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n email = StringField('Email', validators=[DataRequired(), Email(), Length(min=2, max=100)])\n subject = TextField('Subject', validators=[DataRequired])\n message = TextField('Message', validators=[DataRequired])\n submit = SubmitField('Send')\n\n \n " }, { "alpha_fraction": 0.6732558012008667, "alphanum_fraction": 0.6732558012008667, "avg_line_length": 39, "blob_id": "939d4c97c4a3ffd0f5a5b2296d7107e0534db506", "content_id": "d64ca55525692af7a97ceca60c69857577ffeeac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 89, "num_lines": 43, "path": "/resources/auth.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, redirect, request, flash, url_for\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom .form import LoginForm, SignupForm, OrderForm\nfrom models import User, Order\nfrom app import db, bcrypt \nfrom flask_login import current_user, logout_user, login_user\n\nauth = Blueprint('auth', __name__)\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first() \n if user:# and bcrypt.check_password_hash(form.password.data, user.password):\n login_user(user, remember=form.remember.data)\n return redirect(url_for('main.index'))\n else:\n flash('Login unsuccessfully, Please check your email and password', 'danger')\n return render_template(\"login.html\", title=\"login\", form=form)\n \[email protected]('/signup', methods=['POST', 'GET'])\ndef signup():\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n\n form = SignupForm(request.form)\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data)\n user = User(email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash(f\"Thanks for sining up {form.email.data}\", \"success\")\n print (\"Sucessfully\")\n return redirect(url_for('auth.login'))\n return render_template('signup.html', form=form)\n\[email protected]('/logout')\ndef logout():\n logout_user()\n return redirect(url_for(\"auth.login\"))\n" }, { "alpha_fraction": 0.7292817831039429, "alphanum_fraction": 0.7292817831039429, "avg_line_length": 25, "blob_id": "ef3fccff236fefb72ee4971151825563602e7cc0", "content_id": "bc8bc1b12206ec77ba470bc319db2ea42d0ccde5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/db.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "# from flask_marshmallow import Marshmallow\n# from flask_sqlalchemy import SQLAlchemy\n# from flask import Flask \n\n# app = Flask(__name__)\n# ma = Marshmallow()\n# db = SQLAlchemy(app)" }, { "alpha_fraction": 0.5077186822891235, "alphanum_fraction": 0.7049742937088013, "avg_line_length": 16.66666603088379, "blob_id": "1a874a79129d76c644ac0adab76dc1c9275a1f48", "content_id": "87ab162af61a1e4fa089ce89b260504ab3405f9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 583, "license_type": "no_license", "max_line_length": 30, "num_lines": 33, "path": "/requirements.txt", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "alembic==1.0.11\naniso8601==7.0.0\nbcrypt==3.1.7\nblinker==1.4\ncffi==1.12.3\nClick==7.0\nFlask==0.12.2\nFlask-Bcrypt==0.7.1\nFlask-Login==0.4.1\nFlask-Mail==0.9.1\nflask-marshmallow==0.8.0\nFlask-Migrate==2.1.1\nFlask-RESTful==0.3.6\nFlask-Script==2.0.6\nFlask-SQLAlchemy==2.1\nFlask-WTF==0.14.2\ngreenlet==0.4.15\nitsdangerous==1.1.0\nJinja2==2.10.1\nMako==1.0.14\nMarkupSafe==1.1.1\nmarshmallow==2.14.0\nmarshmallow-sqlalchemy==0.13.2\nmeinheld==1.0.1\npsycopg2==2.7.5\npycparser==2.19\npython-dateutil==2.8.0\npython-editor==1.0.4\npytz==2019.1\nsix==1.12.0\nSQLAlchemy==1.3.6\nWerkzeug==0.15.5\nWTForms==2.2.1\n" }, { "alpha_fraction": 0.6351039409637451, "alphanum_fraction": 0.672825276851654, "avg_line_length": 28.522727966308594, "blob_id": "6d21da5bb5f04a683eada7b9d5ff6d44be0a1e3f", "content_id": "2dbf05d55e5df4945ff8530affed7880c0ee9227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 76, "num_lines": 44, "path": "/migrations/versions/8c2ec4d38e0a_.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 8c2ec4d38e0a\nRevises: \nCreate Date: 2019-07-29 12:12:03.568584\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8c2ec4d38e0a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('parcel',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('parcel_name', sa.String(length=100), nullable=True),\n sa.Column('parcel_number', sa.String(length=100), nullable=True),\n sa.Column('pickup_destination', sa.String(length=100), nullable=True),\n sa.Column('delivery_destination', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('parcel_number')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=40), nullable=True),\n sa.Column('password', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n op.drop_table('parcel')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.3709981143474579, "alphanum_fraction": 0.37664783000946045, "avg_line_length": 22.130434036254883, "blob_id": "e96c72c644660ffc3bc6cc545f1062a399724964", "content_id": "882d9d85b7eb920c227846c246fb4fee6640fef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 531, "license_type": "no_license", "max_line_length": 43, "num_lines": 23, "path": "/templates/order_items.html", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block content %}\n<div class=\"row\">\n <div class=\"mx-3 col-md-6\">\n <div class=\"content-section\">\n <table border=\"1px\">\n <tr>\n <th >Parcel Name</th>\n <th>Parcel Number</th>\n </tr>\n {% for o in order %}\n <tr>\n <td>{{ o.parcel_name }}</td>\n <td>{{ o.parcel_number }}</td>\n </tr>\n {% endfor %}\n </table> \n \n \n </div>\n </div>\n</div> \n{% endblock %}" }, { "alpha_fraction": 0.42686566710472107, "alphanum_fraction": 0.42686566710472107, "avg_line_length": 21.299999237060547, "blob_id": "df3c62190e816cfd00635c7229e175f885933762", "content_id": "0754561b7f334a68245af4187ded8a69b78eb5a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 670, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/notes/notes.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": " {{ form.hidden_tag() }}\n <fieldset>\n <legend>Sign Up</legend>\n <div>\n {{ form.email.label(class=\"form-control-label\") }}\n\n {% if form.email.errors %}\n {{ form.email(class=\"form-control form-control-lg is-invalid\") }}\n\n <div class=\"is-invalid-feedback\">\n\n {% for error in form.email.errors %}\n <span> {{ error }}</span>\n {% endfor %}\n\n </div>\n {% else %}\n {{ form.email(class=\"form-control form-control-lg\") }}\n\n {% endif %}\n\n </div>\n \n\n\n </fieldset>\n </form>\n </div>\n\n{% endblock %}" }, { "alpha_fraction": 0.6495395302772522, "alphanum_fraction": 0.6495395302772522, "avg_line_length": 31.746030807495117, "blob_id": "2c7e0cbc340bc8da1a2b380378a707971e1e41eb", "content_id": "fe627c16484ec937bac9aa5c8b976a8f06a629ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 100, "num_lines": 63, "path": "/resources/main.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, redirect, request, flash, url_for\nfrom .form import OrderForm, ContactForm\nfrom models import Order\nfrom flask_mail import Message, Mail\nfrom app import db\n\nmain = Blueprint('main', __name__)\n\[email protected]('/')\ndef index():\n return render_template(\"index.html\")\n\n\[email protected]('/user_change')\ndef user_change():\n return render_template(\"user_change.html\")\n\[email protected]('/cancelorder')\ndef cancelorder():\n return render_template(\"cancelorder.html\")\n\[email protected]('/contactform', methods=['POST', 'GET'])\ndef contactform():\n form = ContactForm(request.form)\n\n if request.method == 'POST':\n if form.validate() == False:\n flash(f\"All fields are required.\")\n return render_template('contactform.html', form=form)\n else:\n msg = Message(subject=form.subject.data, sender=form.email.data, recipients=[\"[email protected]\"])\n msg.body = \"Thanks your message has been recieved. We will get back to you shortly\"\n # (form.name.data, form.email.data, form.message.data)\n # mail.send(msg)\n \n\n return redirect(url_for(\"main.index\"))\n elif request.method == 'GET':\n return render_template(\"contactform.html\", form=form)\n\[email protected]('/order', methods=['POST', 'GET'])\ndef order():\n form = OrderForm(request.form)\n if request.method == 'POST':\n parcel_name = request.form.get('parcel_name')\n parcel_number = request.form.get('parcel_number')\n order = Order(parcel_name=form.parcel_name.data, parcel_number=form.parcel_number.data)\n db.session.add(order)\n db.session.commit()\n flash(f\"Parcel ordered succesfully\")\n return redirect(url_for('main.order'))\n return render_template('order.html', form=form)\n\n\[email protected]('/orders', methods=['GET'])\ndef orders():\n if request.method == 'GET':\n order = Order.query.all()\n return render_template('order_items.html', order=order)\n\[email protected]('/items')\ndef status():\n return render_template(\"status.html\")\n" }, { "alpha_fraction": 0.6027554273605347, "alphanum_fraction": 0.6142365336418152, "avg_line_length": 23.885713577270508, "blob_id": "2c7244d9ac98992f49372daadef34e5a50b6e6b4", "content_id": "d11b69216300a22c27b739e72c3354a4a9130674", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 871, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/models.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "from app import db, login_manager \nfrom flask_login import UserMixin \n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get((user_id))\n\nclass User(db.Model, UserMixin):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key = True)\n email = db.Column(db.String(40), unique = True)\n password = db.Column(db.String(40))\n\n\n def __repr__(self):\n return f\"{self.email}\"\n\n def self_to_db(self):\n db.session.add(self)\n db.session.commit()\n\nclass Order(db.Model):\n __tablename__ = \"items\"\n id = db.Column(db.Integer, primary_key = True)\n parcel_name = db.Column(db.String(100))\n parcel_number = db.Column(db.String(100), unique = True)\n \n\n \n def __repr__(self):\n return f\"{self.parcel_name}\"\n\n def self_to_db(self):\n db.session.add(self)\n db.session.commit()\n" }, { "alpha_fraction": 0.6127907037734985, "alphanum_fraction": 0.6779069900512695, "avg_line_length": 24.294116973876953, "blob_id": "a104afb300d6bc6f0e30b0f11440613ee27df42b", "content_id": "0e4669a1bf17d44a129d677ed45225e38f02c018", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "no_license", "max_line_length": 69, "num_lines": 34, "path": "/migrations/versions/b47c79195e47_.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: b47c79195e47\nRevises: 8c2ec4d38e0a\nCreate Date: 2019-07-29 17:46:14.435052\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b47c79195e47'\ndown_revision = '8c2ec4d38e0a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('parcel_name', sa.String(length=100), nullable=True),\n sa.Column('parcel_number', sa.String(length=100), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('parcel_number')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('item')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6643247604370117, "alphanum_fraction": 0.6697892546653748, "avg_line_length": 25.163265228271484, "blob_id": "0ac46758279214c81ea144d465b52f239e515c63", "content_id": "238ab775fb68b1998788bb30118ca6ba274ef32b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 53, "num_lines": 49, "path": "/run.py", "repo_name": "alexarirok/Flask-Project", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom app import db, bcrypt\nfrom flask_login import LoginManager \nfrom models import User\nfrom meinheld import server\nfrom flask_mail import Message, Mail\n\nmail = Mail()\n\ndef create_app(config_filename):\n app = Flask(__name__)\n #db = SQLAlchemy(app)\n db.init_app(app)\n app.config.from_object(config_filename)\n bcrypt.init_app(app)\n\n app.config[\"MAIL_SERVER\"] = \"smtp.gmail.com\"\n app.config[\"MAIL_PORT\"] = 465\n app.config[\"MAIL_USE_SSL\"] = True\n app.config[\"MAIL_USERNAME\"] = \"[email protected]\"\n app.config[\"MAIL-PASSWORD\"] = \"Alex1920$$\"\n\n mail.init_app(app)\n\n from app import api_bp\n app.register_blueprint(api_bp, url_prefix='/api')\n\n from resources.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n from resources.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n \n login_manager = LoginManager(app)\n login_manager.login_view = 'auth.login'\n login_manager.init_app(app)\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get((user_id))\n #from models import db\n #db.init_(app)\n \n\n return app\n\nif __name__ == \"__main__\":\n app = create_app(\"config\")\n app.run(debug=True, port=5001, threaded=True)" } ]
15
damian-villarreal/CoinsApi
https://github.com/damian-villarreal/CoinsApi
66b8c2e5a7e10dacb13d3e3a4ee340b6a3dcf1f8
11c134c8a1f976958a2068791dfb5ef147b37544
1ab41f519900bba270f9dd6d293c620422b6ae10
refs/heads/main
2023-08-14T23:56:40.114839
2021-10-07T19:56:42
2021-10-07T19:56:42
414,715,653
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7899408340454102, "alphanum_fraction": 0.7899408340454102, "avg_line_length": 20.1875, "blob_id": "3e83942fadf19dacc6aeab49efee4c38f0015d7e", "content_id": "42cd0fa42f578408cc8de8f3cbf116e488a61983", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/app/__init__.py", "repo_name": "damian-villarreal/CoinsApi", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom config import Config\nfrom flask_mongoengine import MongoEngine\nfrom flask_login import LoginManager\nfrom flask_restful import Api\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\napi = Api(app)\nlogin = LoginManager(app)\ndb = MongoEngine(app)\nCORS(app)\n\nfrom app import routes" }, { "alpha_fraction": 0.7330623269081116, "alphanum_fraction": 0.7330623269081116, "avg_line_length": 28.399999618530273, "blob_id": "57ffca3c4c920ebca5f9159d96d0dbdb4af01cc4", "content_id": "5cab3b9fd10267ed9a0c5b4e2b53962b3731fc49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/app/routes.py", "repo_name": "damian-villarreal/CoinsApi", "src_encoding": "UTF-8", "text": "from werkzeug.utils import redirect\nfrom app import api\nfrom .resources import *\n\n\napi.add_resource(Home, '/')\napi.add_resource(Redirect, '/redirect')\n\napi.add_resource(CoinsApi, '/api/coins')\napi.add_resource(CoinApi, '/api/coin/<id>','/api/coin')\n\napi.add_resource(UsersApi, '/api/users')\napi.add_resource(UserApi, '/api/user/<id>')\n\napi.add_resource(TransactionsApi, '/api/transactions')\napi.add_resource(TransactionApi, '/api/transactions/<id>')\n\napi.add_resource(AccountApi, '/api/account/<id>')\napi.add_resource(AccountsApi, '/api/accounts')\n\napi.add_resource(SignupApi, '/api/signup')\napi.add_resource(LoginApi, '/api/login')\napi.add_resource(LogoutApi, '/api/logout')\n\n# api.add_resource(MyTransactions, '/api/mytransactions')\n\n\n\n" }, { "alpha_fraction": 0.7195213437080383, "alphanum_fraction": 0.7225130796432495, "avg_line_length": 36.16666793823242, "blob_id": "e753751ad993e96ac65539db3e5c352ddc94d250", "content_id": "94c996a41a7fd981b574a804ad7a0cf38ed4a79b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 82, "num_lines": 36, "path": "/app/models.py", "repo_name": "damian-villarreal/CoinsApi", "src_encoding": "UTF-8", "text": "from app import db\nimport datetime\nfrom flask_bcrypt import generate_password_hash, check_password_hash\nfrom flask_login import UserMixin\nfrom app import login\n\nclass User(UserMixin, db.Document):\n email = db.EmailField(required=True, unique=True)\n password = db.StringField(required=True, min_length=6) \n role = db.StringField(required=True, default='user')\n accounts = db.ListField(db.ReferenceField('Account'))\n\n def hash_password(self):\n self.password = generate_password_hash(self.password).decode('utf8')\n\n def check_password(self, password):\n return check_password_hash(self.password, password)\n\[email protected]_loader\ndef load_user(id):\n return User.objects.get(id = id)\n\nclass Account(db.Document):\n user = db.ReferenceField('User', required = True)\n coin = db.ReferenceField('Coin', required = True)\n balance = db.FloatField(required = True, min=0)\n\nclass Transaction(db.Document):\n fromAccount = db.ReferenceField('Account', required = True)\n toAccount = db.ReferenceField('Account', required = True)\n amount = db.FloatField(required = True, min=0)\n creation = db.DateTimeField(default = datetime.datetime.utcnow, required=True)\n\nclass Coin(db.Document):\n name = db.StringField(required=True, unique=True ) \n currency = db.StringField(required=True, unique=True)" }, { "alpha_fraction": 0.566065788269043, "alphanum_fraction": 0.5686675310134888, "avg_line_length": 31.61212158203125, "blob_id": "1414005d0bfe7799c3c58a599476c1acedc732a3", "content_id": "32db9482a3bad0455dade0590c8a478fa68d96a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5383, "license_type": "no_license", "max_line_length": 111, "num_lines": 165, "path": "/app/resources.py", "repo_name": "damian-villarreal/CoinsApi", "src_encoding": "UTF-8", "text": "\nfrom flask_login.utils import login_required\nfrom flask_restful import Resource\nfrom flask import Response, request, jsonify, redirect, url_for\nfrom flask_login import current_user, login_user, logout_user\nfrom werkzeug.utils import redirect\nfrom .models import Coin, Account, User, Transaction\n\nclass Home(Resource):\n def get(self):\n if current_user.is_authenticated:\n return 'bienvenido ' + current_user.email\n else: \n return 'no logueado'\n\nclass Redirect(Resource):\n def get(self):\n return redirect('/')\n\n\n#--Auth resources--\n\nclass SignupApi(Resource):\n def post(self): \n body = request.get_json()\n user = User(**body)\n if User.objects(email=user.email):\n return 'el email ya se encuentra en uso'\n else:\n user.hash_password()\n #a los fines de test, al crear el usuario se crea la cuenta en peso argentino con saldo inicial \n user.save() \n account = Account()\n coin = Coin.objects.get(name=\"peso argentino\")\n account.user = user\n account.coin = coin \n account.balance = 1000\n account.save()\n user.accounts.append(account)\n user.save() \n userId = user.id\n return {'userId': str(userId)}, 200\n\nclass LoginApi(Resource):\n def post(self):\n body = request.get_json() \n user = User(**body)\n if not user == None: \n try: \n result = User.objects.get(email = user.email)\n except:\n return 'usuario y/o contraseña incorrectos'\n if not result.check_password(user.password):\n return 'usuario y/o contraseña incorrectos' \n else: \n login_user(result) \n return current_user.get(id)\n\nclass LogoutApi(Resource):\n def get(self): \n logout_user()\n return 'logged out' \n\n#---coin resources---\nclass CoinApi(Resource):\n #get a coins\n def get(self, id):\n coin = Coin.objects(id=id)\n return jsonify(coin)\n\n #create a coin\n @login_required\n def post(self):\n body = request.get_json()\n coin = Coin(**body) \n if Coin.objects(name = coin.name):\n return 'el nombre de la moneda ya se encuentra en uso'\n elif Coin.objects(currency = coin.currency):\n return 'la sigla ya se encuentra en uso'\n else:\n coin.save()\n id = coin.id\n return {'id': str(id)}, 200\n \nclass CoinsApi(Resource):\n #get all coins\n def get(self): \n coins = Coin.objects()\n return jsonify(coins)\n\n#--User Resources--\n\nclass UsersApi(Resource):\n def get(self):\n users = User.objects.account()\n return jsonify(users)\n\nclass UserApi(Resource):\n def get(self, id):\n user = User.objects(id=id) \n return jsonify(user)\n\n#--Account Resources--\n\nclass AccountApi(Resource):\n def get(self, id):\n account = Account.objects(id=id)\n return jsonify(account)\n\nclass AccountsApi(Resource):\n def get(self):\n accounts = Account.objects.coin(name='peso argentino')\n return jsonify(accounts)\n \n\n#--Transaction Resources--\n\nclass TransactionApi(Resource):\n def get(self, id):\n transaction = Transaction.objects(id=id)\n return jsonify(transaction)\n\nclass TransactionsApi(Resource):\n #get all transactions\n def get(self):\n transactions = Transaction.objects()\n return jsonify(transactions)\n \n #create transaction \n def post(self):\n body = request.get_json()\n transaction = Transaction() \n fromAccount = Account.objects.get(id = body['fromAccount'])\n\n #el usuario debe estar logueado y no puede hacer transferencias desde una cuenta diferente a la suya\n if not fromAccount.user == current_user:\n return 'operacion no permitida' \n\n #el usuario no puede transferir a una cuenta inexistente o de otra moneda\n try:\n toAccount = Account.objects.get(id = body['toAccount'], coin = fromAccount.coin)\n except:\n return \"la cuenta de destino es incorrecta o inexistente\"\n #el usuario no puede transferirse a si mismo\n if fromAccount == toAccount:\n return 'la cuenta de destino es incorrecta o inexistente'\n \n amount = body['amount']\n \n if amount <= 0:\n return 'El monto a transferir no puede ser cero'\n \n #el usuario no puede transferir mas de lo que tiene en su cuenta.\n if amount > fromAccount.balance:\n return 'el importe ingresado supera el saldo de la cuenta' \n else:\n transaction.fromAccount = fromAccount\n transaction.toAccount = toAccount\n transaction.amount = amount\n toAccount.balance += amount\n toAccount.save() \n fromAccount.balance -= amount\n fromAccount.save() \n transaction.save()\n transactionId = transaction.id\n return {'transactionId': str(transactionId)}, 200" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 29.33333396911621, "blob_id": "9f57f5dfc96c17872d543fc4c5e8cbf667b6ab08", "content_id": "42589d46dad8cff2b08ae7b6c485d993a26e3bd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 96, "num_lines": 6, "path": "/config.py", "repo_name": "damian-villarreal/CoinsApi", "src_encoding": "UTF-8", "text": "import os\n\nclass Config(object):\n SECRET_KEY = 'MbXCJXAW9PHIMJnYc87E9yT_T-Bxbd6zDpuKWg'\n MONGODB_SETTINGS = {'host': 'mongodb+srv://admin:[email protected]/Coins?retryWrites=true&w=majority'}\n JSON_SORT_KEYS = False\n\n" } ]
5
WangKuangYu/midterm
https://github.com/WangKuangYu/midterm
7137c0d208487cd965b956b186616fb7238ed74c
efe8fd44dc0e9ebb93b9df47483d1618c93a9296
8fb6192514291a69e63b2b99e33eeab9c5af0560
refs/heads/master
2023-03-27T00:25:26.706519
2020-05-31T03:53:37
2020-05-31T03:53:37
268,198,562
0
0
null
2020-05-31T02:57:09
2020-05-31T03:54:29
2021-03-20T04:08:32
Python
[ { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6545454263687134, "avg_line_length": 34.18000030517578, "blob_id": "629ff45a04d60858d5bcd32daedb62c6d2b644a0", "content_id": "339f0a4f588ee14e9b5447d9b0fb7a99e7997fab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1760, "license_type": "no_license", "max_line_length": 210, "num_lines": 50, "path": "/main.py", "repo_name": "WangKuangYu/midterm", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom flask_pymongo import PyMongo\nfrom datetime import datetime\nimport random\nimport string\n\napp=Flask(__name__)\napp.config['MONGO_URI']='mongodb://localhost:27017/flasktest'\nmongo = PyMongo(app)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/img-detection')\ndef img_detection():\n return render_template('img-detection.html')\n\[email protected]('/testimg', methods=['POST'])\ndef testimg():\n if 'profile' in request.files:\n print('yes')\n profile = request.files['profile']\n profile.filename=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))+'.jpg'\n mongo.save_file(profile.filename, profile)\n mongo.db.users.insert({'username' : request.form.get('username') , 'image_name': profile.filename , 'result' : request.form.get('result'), 'upload_time' : datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")})\n else:\n print('no')\n return redirect(url_for('img_detection'))\n \[email protected]('/mongodata')\ndef mongodata():\n data_list = mongo.db.users.find().sort('upload_time', -1)\n return render_template('mongodata.html', data_list = data_list)\n\[email protected]('/file/<filename>')\ndef file(filename):\n return mongo.send_file(filename)\n\[email protected]('/deletdata', methods=['POST'])\ndef deletdata():\n if 'delete' in request.form:\n mongo.db.users.delete_one({'image_name' : request.form['delete']})\n result = mongo.db.fs.files.find_one({'filename' : request.form['delete']})\n mongo.db.fs.files.remove({'_id': result['_id']})\n mongo.db.fs.chunks.remove({'files_id': result['_id']})\n return redirect(url_for('mongodata'))\n else:\n return \"no\"\n\n" } ]
1
SpiritHunt3r/API_Proyecto1
https://github.com/SpiritHunt3r/API_Proyecto1
3216aa92d0040a3db2d61d9e447919bcbb2ebc41
d01e2d43d03cfc73fa9428c4a17541f04fab4c78
026791fd9e87ad042f59be52e4efe145dd6bed64
refs/heads/master
2020-03-13T14:44:08.268767
2018-04-26T14:04:40
2018-04-26T14:04:40
131,164,414
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6622951030731201, "alphanum_fraction": 0.6819671988487244, "avg_line_length": 29.5, "blob_id": "38234a1aaec23f8038ad11ddcefc67d502f68a77", "content_id": "518c3fa2fc8af147ca4536c2e69f439a360f8cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 64, "num_lines": 10, "path": "/busempresa/models.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass BusEmpresa(models.Model):\n nombre = models.CharField(default = \"\", max_length=255)\n descripcion = models.CharField(default = \"\", max_length=255)\n choferes = models.ManyToManyField ('busdriver.BusDriver')\n \n def __str__(self):\n return self.nombre\n" }, { "alpha_fraction": 0.5226585865020752, "alphanum_fraction": 0.5800604224205017, "avg_line_length": 18.47058868408203, "blob_id": "9a057ce8a6114023ec3045945c8e72d51aa53516", "content_id": "707a96668e79f2d8cb3d1be0199708b76a3ca0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/bususer/migrations/0003_remove_bususer_is_driver.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.4 on 2018-04-18 19:54\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bususer', '0002_bususer_is_driver'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='bususer',\n name='is_driver',\n ),\n ]\n" }, { "alpha_fraction": 0.5144230723381042, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 22.11111068725586, "blob_id": "d59676df72e8b29b4faa63bb2f282db62e9b7c56", "content_id": "a272ae51bb04f8efb3fa1dd0f9f85f82fa5dadb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/busdriver/migrations/0004_auto_20180419_1602.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 22:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busdriver', '0003_auto_20180419_1521'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='busdriver',\n name='placa',\n field=models.CharField(default='', max_length=255, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.8315789699554443, "alphanum_fraction": 0.8315789699554443, "avg_line_length": 22.75, "blob_id": "c9ed5faa9831282f640badc9f4df5164a53bdac8", "content_id": "236a726d110334c12546d34a9090f12018a80f2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/busparada/admin.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BusParada\n\nadmin.site.register(BusParada)\n" }, { "alpha_fraction": 0.546875, "alphanum_fraction": 0.5982142686843872, "avg_line_length": 22.578947067260742, "blob_id": "bb9f8c817b93d998dd508f3fc41f2886e120b07d", "content_id": "e1de69f620da9cece92e1f5785bbb6a1ec77e890", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 67, "num_lines": 19, "path": "/busempresa/migrations/0002_busempresa_choferes.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 21:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busdriver', '0002_busdriver_empresa'),\n ('busempresa', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='busempresa',\n name='choferes',\n field=models.ManyToManyField(to='busdriver.BusDriver'),\n ),\n ]\n" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.5721924901008606, "avg_line_length": 23.39130401611328, "blob_id": "a33f5e34fa1d42da0e0537f1bc549b61d913941e", "content_id": "5929bd956dae44b4e68361bf9500d4eb626c4958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 67, "num_lines": 23, "path": "/busruta/migrations/0003_auto_20180419_1521.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 21:21\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busparada', '0005_remove_busparada_ruta'),\n ('busruta', '0002_auto_20180419_0228'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='busruta',\n name='empresa',\n ),\n migrations.AddField(\n model_name='busruta',\n name='paradas',\n field=models.ManyToManyField(to='busparada.BusParada'),\n ),\n ]\n" }, { "alpha_fraction": 0.6881533265113831, "alphanum_fraction": 0.703832745552063, "avg_line_length": 34.875, "blob_id": "4fb8aac956559228c22664f11fcbb8bded049a8d", "content_id": "9d8174e1080a77f8cb5fc6dac093536e4f743c0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "no_license", "max_line_length": 69, "num_lines": 16, "path": "/busruta/models.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass BusRuta(models.Model):\n nombre = models.CharField(default = \"\", max_length=255)\n latitud_final = models.CharField(default = \"\", max_length=255)\n longitud_final = models.CharField(default = \"\", max_length=255)\n costo = models.FloatField(null=True, blank=True, default=None)\n latitud = models.FloatField(null=True, blank=True, default=None)\n longitud = models.FloatField(null=True, blank=True, default=None)\n paradas = models.ManyToManyField('busparada.BusParada')\n\n\n\n def __str__(self):\n return self.nombre\n" }, { "alpha_fraction": 0.7559523582458496, "alphanum_fraction": 0.7559523582458496, "avg_line_length": 29.545454025268555, "blob_id": "3039649c8f7b1b05f396f53527736d980de15875", "content_id": "3bab4973f353eefbebd92eeda813d2aadef87f24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/busdriver/views.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from busdriver.models import BusDriver\nfrom rest_framework import viewsets\nfrom busdriver.serializers import UserSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = BusDriver.objects.all().order_by('-id')\n serializer_class = UserSerializer\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "0c124201acdc15e462786cf67c9d375350257612", "content_id": "5b93a31ac3a6eb4110cf9b2c739be60c01fceb96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/busruta/apps.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass BusrutaConfig(AppConfig):\n name = 'busruta'\n" }, { "alpha_fraction": 0.8350515365600586, "alphanum_fraction": 0.8350515365600586, "avg_line_length": 23.25, "blob_id": "b00c2924eeafe675b00338b61941ff8b72635c30", "content_id": "0d2d68d42690c8cf6f8dda245f4a689c5ff48ed2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/busempresa/admin.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BusEmpresa\n\nadmin.site.register(BusEmpresa)\n" }, { "alpha_fraction": 0.4854932427406311, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 21.478260040283203, "blob_id": "5d56347a55d50bee613466ef09db700702a34f1b", "content_id": "7e7bf3da8eccb250f3490e7f1bc49ad03de591b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/busruta/migrations/0004_auto_20180419_1526.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 21:26\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busruta', '0003_auto_20180419_1521'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='busruta',\n old_name='final',\n new_name='latitud_final',\n ),\n migrations.RenameField(\n model_name='busruta',\n old_name='inicio',\n new_name='longitud_final',\n ),\n ]\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.7062937021255493, "avg_line_length": 32, "blob_id": "d0b22da0d644e44043394af47e435e52f6604b28", "content_id": "5086b4374f717f3a75ba03ac9d10dd8fc6f5bbf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/busdriver/models.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass BusDriver(models.Model):\n placa = models.CharField(default = \"\", max_length=255, unique = True)\n rating = models.SmallIntegerField(default=0)\n latitud = models.FloatField(null=True, blank=True, default=None)\n longitud = models.FloatField(null=True, blank=True, default=None)\n rutas = models.ManyToManyField('busruta.BusRuta')\n\n\n\n def __str__(self):\n return self.placa\n" }, { "alpha_fraction": 0.7494646906852722, "alphanum_fraction": 0.7494646906852722, "avg_line_length": 37.91666793823242, "blob_id": "93677ee2b4e3713ae2df0d0dffdfbf980b5f8c4e", "content_id": "9e4ac12fd8f066d3095439d377b9a292f0cf6ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/busdriver/resources.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from tastypie.resources import ModelResource\nfrom busdriver.models import BusDriver\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\n\nclass BusDriverResource(ModelResource):\n rutas = fields.ManyToManyField(\"busruta.resources.BusRutaResource\", 'rutas', \n null=True, full=True, related_name='ruta')\n class Meta:\n queryset = BusDriver.objects.all()\n resource_name = 'driver'\n authorization = Authorization()\n" }, { "alpha_fraction": 0.7533783912658691, "alphanum_fraction": 0.7533783912658691, "avg_line_length": 31.88888931274414, "blob_id": "a879255bcbf7b16205292c208ed2170586c98673", "content_id": "acba69f5a8c5db0ea2e868c62955e788dcef55d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/bususer/resources.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from tastypie.resources import ModelResource\nfrom bususer.models import BusUser\nfrom tastypie.authorization import Authorization\n\nclass BusUserResource(ModelResource):\n class Meta:\n queryset = BusUser.objects.all()\n resource_name = 'user'\n authorization = Authorization()\n" }, { "alpha_fraction": 0.7582644820213318, "alphanum_fraction": 0.7582644820213318, "avg_line_length": 39.33333206176758, "blob_id": "8b0267cff702d81350352c89bc3ae2ad9dca34a9", "content_id": "35a694018c4b8dd2c7a7032d96d64b6f60cd2bdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 91, "num_lines": 12, "path": "/busempresa/resources.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from tastypie.resources import ModelResource\nfrom busempresa.models import BusEmpresa\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\n\nclass BusEmpresaResource(ModelResource):\n choferes = fields.ManyToManyField(\"busdriver.resources.BusDriverResource\", 'choferes', \n null=True, full=True, related_name='chofer')\n class Meta:\n queryset = BusEmpresa.objects.all()\n resource_name = 'empresa'\n authorization = Authorization()\n" }, { "alpha_fraction": 0.5063062906265259, "alphanum_fraction": 0.569369375705719, "avg_line_length": 23.130434036254883, "blob_id": "b64bc3c5958d730919d51998b77063638b3bd57c", "content_id": "4a8e8fb6e1335b222563fbbc36d128c6068e5909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 63, "num_lines": 23, "path": "/busdriver/migrations/0003_auto_20180419_1521.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 21:21\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busruta', '0003_auto_20180419_1521'),\n ('busdriver', '0002_busdriver_empresa'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='busdriver',\n name='empresa',\n ),\n migrations.AddField(\n model_name='busdriver',\n name='rutas',\n field=models.ManyToManyField(to='busruta.BusRuta'),\n ),\n ]\n" }, { "alpha_fraction": 0.8315789699554443, "alphanum_fraction": 0.8315789699554443, "avg_line_length": 22.75, "blob_id": "4e94bcf76ea51202b0fcca6b90407beaf26016da", "content_id": "6a6cb91a9d1c894f77696fd998d54f89d89f2a92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/busdriver/admin.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BusDriver\n\nadmin.site.register(BusDriver)\n" }, { "alpha_fraction": 0.5342105031013489, "alphanum_fraction": 0.5842105150222778, "avg_line_length": 20.11111068725586, "blob_id": "bf68cff6fe67e1eae6ccc70b90a183a8d1c1e51e", "content_id": "0da753d130d8482f7b5f4ae4623484f44011b75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/bususer/migrations/0002_bususer_is_driver.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.4 on 2018-04-18 19:12\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bususer', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='bususer',\n name='is_driver',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.7227533459663391, "alphanum_fraction": 0.7227533459663391, "avg_line_length": 42.58333206176758, "blob_id": "43f0cc85e7327ad7ecc9adc432612efe4e61c012", "content_id": "c4edce6faf4ac93aa66575e609106bbf804c4909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "no_license", "max_line_length": 95, "num_lines": 12, "path": "/busparada/urls.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.conf.urls import re_path\nfrom djoser import views as djoser_views\nfrom rest_framework_jwt import views as jwt_views\nfrom busparada import views\n\nurlpatterns = [\n # Views are defined in Djoser, but we're assigning custom paths.\n #re_path(r'^parada/view/$', djoser_views.UserView.as_view(), name='parada-view'),\n #re_path(r'^parada/delete/$', djoser_views.UserDeleteView.as_view(), name='parada-delete'),\n #re_path(r'^parada/create/$', djoser_views.UserCreateView.as_view(), name='parada-create'),\n\n]\n" }, { "alpha_fraction": 0.7100753784179688, "alphanum_fraction": 0.7155585885047913, "avg_line_length": 32.930233001708984, "blob_id": "3d05e3a4571a84e5792b88837ca7b95b53175da4", "content_id": "1e30d9434bfd413f7d2674fa7df197b2bb8f6a4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "no_license", "max_line_length": 77, "num_lines": 43, "path": "/BUS/urls.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "\"\"\"BUS URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, re_path\nfrom busparada.resources import BusParadaResource\nfrom busruta.resources import BusRutaResource\nfrom busempresa.resources import BusEmpresaResource\nfrom busdriver.resources import BusDriverResource\n#from bususer.resources import BusUserResource\n\n\n\n\nparada = BusParadaResource()\nruta = BusRutaResource()\nempresa = BusEmpresaResource()\ndriver = BusDriverResource()\n#user = BusUserResource()\n\nurlpatterns = [\n re_path(r'^', admin.site.urls),\n re_path(r'^admin/', admin.site.urls),\n re_path(r'^api/', include('bususer.urls')),\n re_path(r'^api/', include(parada.urls)),\n re_path(r'^api/', include(ruta.urls)),\n re_path(r'^api/', include(empresa.urls)),\n re_path(r'^api/', include(driver.urls)),\n #re_path(r'^api/', include(user.urls)),\n]\n" }, { "alpha_fraction": 0.747863233089447, "alphanum_fraction": 0.747863233089447, "avg_line_length": 35, "blob_id": "7ff2bf17c492b418da0aea4ec063e9b637adbcf0", "content_id": "f42ed8ae34369dece279cc7cfd5a3a068cb0d109", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 89, "num_lines": 13, "path": "/busruta/resources.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from tastypie.resources import ModelResource\nfrom busruta.models import BusRuta\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\n\n\nclass BusRutaResource(ModelResource):\n paradas = fields.ManyToManyField(\"busparada.resources.BusParadaResource\", 'paradas', \n null=True, full=True, related_name='parada')\n class Meta:\n queryset = BusRuta.objects.all()\n resource_name = 'ruta'\n authorization = Authorization()\n" }, { "alpha_fraction": 0.7684210538864136, "alphanum_fraction": 0.7684210538864136, "avg_line_length": 18, "blob_id": "9f40ba353fc037c75f73e19959a3d15c34d47608", "content_id": "9c19868b2b1a699ca0bb508d894b2b9dce7375c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/busempresa/apps.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass BusempresaConfig(AppConfig):\n name = 'busempresa'\n" }, { "alpha_fraction": 0.8323699235916138, "alphanum_fraction": 0.8323699235916138, "avg_line_length": 33.599998474121094, "blob_id": "f130bc6443f3a86aee73a951a44a80ccc3e112dc", "content_id": "002e9be683c3012a522ebd15be992fba48fc6f6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 53, "num_lines": 5, "path": "/busparada/views.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport uuid\nfrom rest_framework import views, permissions, status\nfrom rest_framework.response import Response\n# Create your views here.\n" }, { "alpha_fraction": 0.8241758346557617, "alphanum_fraction": 0.8241758346557617, "avg_line_length": 21.75, "blob_id": "fe803e72b70d0285cc083d8a1e7c49c7e7934426", "content_id": "c7f7411141832b2e959ee61766a814bd5b76d106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/busruta/admin.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BusRuta\n\nadmin.site.register(BusRuta)\n" }, { "alpha_fraction": 0.7589576840400696, "alphanum_fraction": 0.7589576840400696, "avg_line_length": 29.700000762939453, "blob_id": "81cf208923b4c011577133542d4cd289311bb8f9", "content_id": "c731979fbe6abe4b8672a80cf9631bb6f2a04576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/busparada/resources.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from tastypie.resources import ModelResource\nfrom busparada.models import BusParada\nfrom tastypie.authorization import Authorization\n\n\nclass BusParadaResource(ModelResource):\n class Meta:\n queryset = BusParada.objects.all()\n resource_name = 'parada'\n authorization = Authorization()\n" }, { "alpha_fraction": 0.7537091970443726, "alphanum_fraction": 0.7537091970443726, "avg_line_length": 29.636363983154297, "blob_id": "946b4c905b8438a5972683dfe427793e59ac2fbc", "content_id": "17982f2778b694d4d0af389d65db60136ce104f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/bususer/views.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from bususer.models import BusUser\nfrom rest_framework import viewsets\nfrom bususer.serializers import UserSerializer\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = BusUser.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.73097825050354, "avg_line_length": 19.44444465637207, "blob_id": "fc24831306f50f652d34c05c4ea97fa2fd909b6d", "content_id": "815e55a3e4e12a65465c7f91947100af9b3ef0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 368, "license_type": "no_license", "max_line_length": 31, "num_lines": 18, "path": "/requirements.txt", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "dj-database-url==0.5.0\nDjango==2.0.4\ndjango-tastypie==0.14.1\ndjango-templated-mail==1.1.1\ndjangorestframework==3.8.2\ndjangorestframework-jwt==1.11.0\ndjoser==1.1.5\ngunicorn==19.7.1\npsycopg2==2.7.4\nPyJWT==1.6.1\npython-dateutil==2.7.2\npython-dotenv==0.8.2\npython-mimeparse==1.6.0\npytz==2018.4\nsix==1.11.0\nvirtualenv==15.2.0\nvirtualenvwrapper-win==1.2.5\nwhitenoise==3.3.1\n" }, { "alpha_fraction": 0.5912408828735352, "alphanum_fraction": 0.6332116723060608, "avg_line_length": 26.399999618530273, "blob_id": "2b35816562bbbe3ef485f2c665f6a476b48ab169", "content_id": "73dd3264c117749e7fa2bda6ada4defda19a3077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 146, "num_lines": 20, "path": "/busdriver/migrations/0002_busdriver_empresa.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 18:36\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busempresa', '0001_initial'),\n ('busdriver', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='busdriver',\n name='empresa',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='busempresa.BusEmpresa'),\n ),\n ]\n" }, { "alpha_fraction": 0.7634408473968506, "alphanum_fraction": 0.7634408473968506, "avg_line_length": 17.600000381469727, "blob_id": "e4ddbab0e22286fe6e6a78d4de003f686dadca0b", "content_id": "7e55d47eb983c98d383246d47dd1197f622d2dc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/busparada/apps.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass BusparadaConfig(AppConfig):\n name = 'busparada'\n" }, { "alpha_fraction": 0.682170569896698, "alphanum_fraction": 0.682170569896698, "avg_line_length": 27.33333396911621, "blob_id": "3f6cfb310c24b3a3841698ee149e619665424252", "content_id": "f6bb5efd0fef9505fd9099ad34c3ea213fabc4ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 82, "num_lines": 9, "path": "/bususer/serializers.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from bususer.models import BusUser\nfrom rest_framework import serializers\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \n class Meta:\n model = BusUser\n fields = ('id','url','is_admin','email','first_name','last_name','chofer')\n\n\n\n" }, { "alpha_fraction": 0.8152173757553101, "alphanum_fraction": 0.8152173757553101, "avg_line_length": 17.399999618530273, "blob_id": "24ac504118dd0da89064977a2bc1244a9fb87319", "content_id": "3880dd3d0adf685f16692dc62e3a058f64fba00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/bususer/admin.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BusUser\n\n\nadmin.site.register(BusUser)\n" }, { "alpha_fraction": 0.671875, "alphanum_fraction": 0.6812499761581421, "avg_line_length": 31, "blob_id": "c28c9accf05bd8581d21db055c0e07527d3ee762", "content_id": "b4a22b51d1fe8968866545594a64a9d25fb2e6a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/busparada/models.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass BusParada(models.Model):\n nombre = models.CharField(default = \"\", max_length=255) \n latitud = models.FloatField(null=True, blank=True, default=None)\n longitud = models.FloatField(null=True, blank=True, default=None)\n \n def __str__(self):\n return self.nombre\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "6113766940e7bc51853e800e81459d74a986ab8e", "content_id": "5921b52cfc990b40d9623e8224ad842acc105fe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/bususer/apps.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass BususerConfig(AppConfig):\n name = 'bususer'\n" }, { "alpha_fraction": 0.7146974205970764, "alphanum_fraction": 0.7146974205970764, "avg_line_length": 44.260868072509766, "blob_id": "b8d17a82a812b30660ff0ab1df5beb3e59e284c0", "content_id": "bdaf5dbe6d0aee701c3c7be13c3ad076f0dbe121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 106, "num_lines": 23, "path": "/bususer/urls.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "from django.conf.urls import re_path, include\nfrom djoser import views as djoser_views\nfrom rest_framework_jwt import views as jwt_views\nfrom bususer import views\nfrom rest_framework import routers\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\n\n \nurlpatterns = [\n # Views are defined in Djoser, but we're assigning custom paths.\n re_path(r'^user/view/$', djoser_views.UserView.as_view(), name='user-view'),\n re_path(r'^user/delete/$', djoser_views.UserDeleteView.as_view(), name='user-delete'),\n re_path(r'^user/create/$', djoser_views.UserCreateView.as_view(), name='user-create'),\n # Views are defined in Rest Framework JWT, but we're assigning custom paths.\n re_path(r'^user/login/$', jwt_views.ObtainJSONWebToken.as_view(), name='user-login'),\n re_path(r'^user/login/refresh/$', jwt_views.RefreshJSONWebToken.as_view(), name='user-login-refresh'),\n re_path(r'^', include(router.urls)),\n re_path(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n\n]\n" }, { "alpha_fraction": 0.48942598700523376, "alphanum_fraction": 0.5830815434455872, "avg_line_length": 18.47058868408203, "blob_id": "283f5fc3cbd301b1435fa739d8030202b2aa0a2f", "content_id": "732a9b91bb0e0b04d4681d700261cda5744a2f56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/busparada/migrations/0005_remove_busparada_ruta.py", "repo_name": "SpiritHunt3r/API_Proyecto1", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.3 on 2018-04-19 21:21\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('busparada', '0004_auto_20180419_0228'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='busparada',\n name='ruta',\n ),\n ]\n" } ]
35
RALF34/McGyver_Game
https://github.com/RALF34/McGyver_Game
7f8557d9ab7374869cb09c819170fe4da31f254f
24ba256802f989290078af431cd952ce759bf58a
c0970377e21900158710c7397b4383023725dc45
refs/heads/master
2021-09-06T23:46:16.523315
2018-02-13T15:16:15
2018-02-13T15:16:15
116,263,272
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6291816234588623, "alphanum_fraction": 0.6378756165504456, "avg_line_length": 37.58394241333008, "blob_id": "13e81c48529e788d698cf75bcf56228b3126cc83", "content_id": "feb1119d3a764875cd5388365500734fb752e790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5292, "license_type": "no_license", "max_line_length": 113, "num_lines": 137, "path": "/classes_and_methods.py", "repo_name": "RALF34/McGyver_Game", "src_encoding": "UTF-8", "text": "\"\"\"classes for McGyver game\"\"\"\n\nimport pygame\nimport random \nfrom pygame.locals import * \nfrom constants import *\n\nclass Labyrinth :\n\tdef __init__(self, code_file):\n\t\tself.code_file = code_file\n\t\tself.position_tools = {'ether':(0,0), 'tube':(0,0), 'needle':(0,0)}\n\n\tdef display_game(self,position_mcgyver,screen):\n\t\t\"\"\"Method to display the labyrinth\"\"\"\t\n\t\tpygame.init()\n\t\tbackground = pygame.image.load(\"green_background.jpg\").convert()\n\t\tmcgyver = pygame.image.load(\"mcgyver.png\").convert()\n\t\tguard = pygame.image.load(\"guard.png\").convert()\n\t\tneedle = pygame.image.load(\"needle.png\").convert()\n\t\ttube = pygame.image.load(\"plastic_tube.png\").convert()\n\t\tether = pygame.image.load(\"ether.png\").convert()\n\t\tscreen.blit(background,(0,0))\n\t\tscreen.blit(mcgyver,position_mcgyver)\n\t\tscreen.blit(guard,((nbr_cells_on_board-1)*lenght_cell,(nbr_cells_on_board-1)*lenght_cell))\n\t\tif 'ether' in self.position_tools.keys():\n\t\t\tscreen.blit(ether,self.position_tools['ether'])\n\t\tif 'tube' in self.position_tools.keys():\n\t\t\tscreen.blit(tube,self.position_tools['tube'])\n\t\tif 'needle' in self.position_tools.keys():\n\t\t\tscreen.blit(needle,self.position_tools['needle'])\n\t\tpygame.display.flip()\n\t\twall = pygame.image.load(\"WALL.png\").convert()\n\t\twith open(self.code_file, \"r\") as f:\n\t\t\ti = 0\n\t\t\tfor line in f:\n\t\t\t\tfor j in range(len(line)-1):\n\t\t\t\t\tif line[j] == 'M':\n\t\t\t\t\t\tscreen.blit(wall, (j*lenght_cell,i*lenght_cell))\n\t\t\t\t\t\tpygame.display.flip()\n\t\t\t\ti += 1\n\n\n\t\t\t\n\t\n\t\n\tdef placing_tools(self):\n\t\t\"\"\"Méthod for randomly placing tools that McGyver has to collect in order to make the syringue\"\"\"\n\t\tneedle_correctly_placed, tube_correctly_placed, ether_correctly_placed = False, False, False\n\t\twith open(self.code_file, \"r\") as f:\n\t\t\tlines = f.readlines()\n\t\t\twhile not ether_correctly_placed:\n\t\t\t\tx_cell_ether = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\ty_cell_ether = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\tif lines[y_cell_ether][x_cell_ether] == \"C\":\n\t\t\t\t\tself.position_tools['ether'] = (x_cell_ether * lenght_cell , y_cell_ether * lenght_cell) \n\t\t\t\t\tether_correctly_placed = True\n\t\t\twhile not tube_correctly_placed:\n\t\t\t\tx_cell_tube = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\ty_cell_tube = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\tif lines[y_cell_tube][x_cell_tube] == \"C\" and (x_cell_tube , y_cell_tube) != (x_cell_ether , y_cell_ether):\n\t\t\t\t\tself.position_tools['tube'] = (x_cell_tube * lenght_cell , y_cell_tube * lenght_cell)\n\t\t\t\t\ttube_correctly_placed = True\n\t\t\twhile not needle_correctly_placed:\n\t\t\t\tx_cell_needle = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\ty_cell_needle = random.randint(2,nbr_cells_on_board-2)\n\t\t\t\tif lines[y_cell_needle][x_cell_needle] == \"C\":\n\t\t\t\t\tif (x_cell_needle , y_cell_needle) != (x_cell_tube , y_cell_tube):\n\t\t\t\t\t\tif (x_cell_needle , y_cell_needle) != (x_cell_ether , y_cell_ether):\n\t\t\t\t\t\t\tself.position_tools['needle'] = (x_cell_needle * lenght_cell , y_cell_needle * lenght_cell)\n\t\t\t\t\t\t\tneedle_correctly_placed = True\n\n\n\t\t\t\t\t\n\n\n\nclass McGyver:\n\n\tdef __init__(self):\n\t\tself.x_cell = 0\n\t\tself.y_cell = 0\n\t\tself.x_pixel_pos = 0\n\t\tself.y_pixel_pos = 0\n\t\tself.objects_found = 0\n\n\t\n\tdef turning(self, laby, towards):\n\t\tmcgyver = pygame.image.load(\"mcgyver.png\").convert()\n\t\twith open(laby.code_file, \"r\") as f:\n\t\t\tlines = f.readlines()\n\t\t\tif towards == 'right':\n\t\t\t\tif self.x_cell != (nbr_cells_on_board - 1):\n\t\t\t\t\tif lines[self.y_cell][self.x_cell+1] != 'M':\n\t\t\t\t\t\tif (self.x_pixel_pos+lenght_cell , self.y_pixel_pos) in laby.position_tools.values():\n\t\t\t\t\t\t\tself.objects_found += 1\n\t\t\t\t\t\t\tfor key in laby.position_tools.keys():\n\t\t\t\t\t\t\t\tif laby.position_tools[key] == (self.x_pixel_pos+lenght_cell , self.y_pixel_pos):\n\t\t\t\t\t\t\t\t\tlaby.position_tools.pop(key)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tself.x_cell += 1\n\t\t\t\t\t\tself.x_pixel_pos = self.x_cell * lenght_cell\n\t\t\n\t\t\tif towards == 'left':\n\t\t\t\tif self.x_cell != 0:\n\t\t\t\t\tif lines[self.y_cell][self.x_cell-1] != 'M':\n\t\t\t\t\t\tif (self.x_pixel_pos-lenght_cell , self.y_pixel_pos) in laby.position_tools.values():\n\t\t\t\t\t\t\tself.objects_found += 1\n\t\t\t\t\t\t\tfor key in laby.position_tools.keys():\n\t\t\t\t\t\t\t\tif laby.position_tools[key] == (self.x_pixel_pos-lenght_cell , self.y_pixel_pos):\n\t\t\t\t\t\t\t\t\tlaby.position_tools.pop(key)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tself.x_cell = self.x_cell-1\n\t\t\t\t\t\tself.x_pixel_pos = self.x_cell * lenght_cell\n\t\t\t\t\n\t\t\tif towards == 'up':\n\t\t\t\tif self.y_cell != 0:\n\t\t\t\t\tif lines[self.y_cell-1][self.x_cell] != 'M':\n\t\t\t\t\t\tif (self.x_pixel_pos , self.y_pixel_pos-lenght_cell) in laby.position_tools.values():\n\t\t\t\t\t\t\tself.objects_found += 1\n\t\t\t\t\t\t\tfor key in laby.position_tools.keys():\n\t\t\t\t\t\t\t\tif laby.position_tools[key] == (self.x_pixel_pos , self.y_pixel_pos-lenght_cell):\n\t\t\t\t\t\t\t\t\tlaby.position_tools.pop(key)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tself.y_cell = self.y_cell-1\n\t\t\t\t\t\tself.y_pixel_pos = self.y_cell * lenght_cell\n\t\t\n\t\t\tif towards == 'down':\n\t\t\t\tif self.y_cell != (nbr_cells_on_board - 1):\n\t\t\t\t\tif lines[self.y_cell+1][self.x_cell] != 'M':\n\t\t\t\t\t\tif (self.x_pixel_pos , self.y_pixel_pos+lenght_cell) in laby.position_tools.values():\n\t\t\t\t\t\t\tself.objects_found += 1\n\t\t\t\t\t\t\tfor key in laby.position_tools.keys():\n\t\t\t\t\t\t\t\tif laby.position_tools[key] == (self.x_pixel_pos , self.y_pixel_pos+lenght_cell):\n\t\t\t\t\t\t\t\t\tlaby.position_tools.pop(key)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tself.y_cell += 1\n\t\t\t\t\t\tself.y_pixel_pos = self.y_cell * lenght_cell\n\t\t\t\t\n" }, { "alpha_fraction": 0.5908722877502441, "alphanum_fraction": 0.6113961935043335, "avg_line_length": 30.347457885742188, "blob_id": "3229f27d1c83d24e2527484a2f94b539f7adb1fe", "content_id": "d5593eaea6bbdda8c4e6d721fa261028f5647b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3704, "license_type": "no_license", "max_line_length": 132, "num_lines": 118, "path": "/Labyrinth _game.py", "repo_name": "RALF34/McGyver_Game", "src_encoding": "ISO-8859-2", "text": "\"\"\"classes for McGyver game\"\"\"\n\nimport pygame, random \nfrom pygame.locals import * \nfrom constantes import *\n\nclass Labyrinth :\n\tdef __init__(self, code_file):\n\t\tself.code_file = code_file\n\t\tself.position_tools = ((0,0),(0,0),(0,0))\n\n\tdef display_laby(self):\n\t\t\"\"\"Method to display the labyrinth\"\"\"\t\n\t\tpygame.init()\n\t\tscreen = pygame.display.set_mode((15*30, 15*30))\n\t\tbackground = pygame.image.load(\"floor.jpg\").convert()\n\t\tscreen.blit(background, (0,0))\n\t\tpygame.display.flip()\n\t\twall = pygame.image.load(\"wall.png\").convert()\n\t\twith open(self.code_file, \"r\") as f:\n\t\t\ti = 0\n\t\t\tfor line in f:\n\t\t\t\tfor j in range(len(line)-1):\n\t\t\t\t\tif line[j] == 'M':\n\t\t\t\t\t\tscreen.blit(wall, (j*30,i*30))\n\t\t\t\t\t\tpygame.display.flip()\n\t\t\t\ti += 1\n\t running = True\t\t\n\t\twhile running:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\trunning = False\n\n\t\t\t\n\t\n\t\n\tdef display_tools(self, screen):\n\t\t\"\"\"Méthod for randomly placing tools that McGyver has to collect in order to make the syringue\"\"\"\n\t\tneedle = pygame.image.load(\"needle.png\").convert()\n\t\ttube = pygame.image.load(\"plastic_tube.png\").convert()\n\t\tether = pygame.image.load(\"ether.png\").convert()\n\t\tneedle_correctly_placed, tube_correctly_placed, ether_correctly_placed = False, False, False\n\t\twith open(self.code_file, \"r\") as f:\n\t\t\tlines = f.readlines()\n\t\t\twhile not needle_correctly_placed:\n\t\t\t\twhile not tube_correctly_placed:\n\t\t\t\t\twhile not ether_correctly_placed:\n\t\t\t\t\t\tx_ether, y_ether = random.randint(2,14), random.randint(2,14)\n\t\t\t\t\t\tif lines[y_ether][x_ether] == \"C\":\n\t\t\t\t\t\t\tself.position_tools[0] = (x_ether,y_ether)\n\t\t\t\t\t\t\tether_correctly_placed = True\n\t\t\t\t\t\t\tscreen.blit(ether, (x_ether*30,y_ether*30))\n\t\t\t\t\t\t\tpygame.display.flip()\n\t\t\t\t\tx_tube, y_tube = random.randint(2,14), random.randint(2,14)\n\t\t\t\t\tif lines[y_tube][x_tube] == \"C\" and (x_tube,y_tube) != (x_ether,y_ether):\n\t\t\t\t\t\tself.position_tools[1] = (x_tube,y_tube)\n\t\t\t\t\t\ttube_correctly_placed = True\n\t\t\t\t\t\tscreen.blit(tube, (x_tube*30,y_tube*30))\n\t\t\t\t\t\tpygame.display.flip()\n x_needle, y_needle = random.randint(2,14), random.randint(2,14)\n\t\t\t\tif lines[y_needle][x_needle] == \"C\" and (x_needle,y_needle) != (x_tube,y_tube) and (x_needle,y_needle) != (x_ether,y_ether):\n\t\t\t\t\tself.position_tools[2] = (x_needle,y_needle)\n\t\t\t\t\tneedle_corretly_placed = True\n\t\t\t\t\tscreen.blit(needle,(x_needle * 30, y_needle * 30)) \n\t\t\t\t\tpygame.display.flip()\n\t\t\t\t\t\n\n\t\t\t\t\n \n\n\n\n\nclass McGyver:\n\n\tdef __init__(self):\n\t\tself.x_cell = 0\n\t\tself.y_cell = 0\n\t\tself.x_pixel_pos = 0\n\t\tself.y_pixel_pos = 0\n\t\tself.\n\n\t\n\tdef moving(self, towards):\n\t\tmcgyver = pygame.image.load(\"mcgyver.png\").convert()\n\t\twith open(\"struct_laby.txt\",\"r\") as f:\n\t\t\tlines = f.readlines()\n\t\t\tif towards == 'right':\n\t\t\t\tif self.x_cell != (nbr_cells_on_board - 1):\n\t\t\t\t\tif lines[self.y_cell][self.x_cell+1] != 'M':\n\t\t\t\t\t\tif (self.x_cell+1,self.y_cell) in self.position_tools:\n\n\t\t\t\t\t\tself.x_cell += 1\n\t\t\t\t\t\tself.x_pixel_pos = self.x_cell * length_cell\n\t\t\n\t\t\tif towards == 'left':\n\t\t\t\tif self.case_x != 0:\n\t\t\t\t\tif lines[self.y_cell][self.x_cell-1] != 'M':\n\t\t\t\t\t\tif (self.x_cell-1,self.y_cell) in self.position_tools:\n\n\t\t\t\t\t\tself.x_cell -= 1\n\t\t\t\t\t\tself.x_pixel_pos = self.x_cell * lenght_cell\n\t\t\t\t\n\t\t\tif towards == 'up':\n\t\t\t\tif self.y_cell != 0:\n\t\t\t\t\tif lines[self.y_cell-1][self.x_cell] != 'M':\n\t\t\t\t\t\tif (self.x_cell,self.y_cell-1) in self.position_tools:\n\n\t\t\t\t\t\tself.y_cell -= 1\n\t\t\t\t\t\tself.y = self.y_cell * lenght_cell\n\t\t\n\t\t\tif towards == 'down':\n\t\t\t\tif self.y_cell != (nbr_cells_on_board - 1):\n\t\t\t\t\tif lines[self.y_cell+1][self.x_cell] != 'M':\n\t\t\t\t\t\tif (self.x_cell,self.self.y_cell+1) in self.position_tools:\n\n\t\t\t\t\t\tself.y_cell += 1\n\t\t\t\t\t\tself.y_pixel_pos = self.y_cell * lenght_cell\n\t\t\t\t" }, { "alpha_fraction": 0.6372841000556946, "alphanum_fraction": 0.6587254405021667, "avg_line_length": 23.632352828979492, "blob_id": "af8f59a22d0d5be419ec2ff7a6f522b7b6372387", "content_id": "80f6127c83b037cbd4379b367b1de236998a06b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1679, "license_type": "no_license", "max_line_length": 98, "num_lines": 68, "path": "/McGyver_game.py", "repo_name": "RALF34/McGyver_Game", "src_encoding": "UTF-8", "text": "\"\"\"\nGame \"Trapped in the labyrinthe\" \nMcGyver has to collect three objects and reach the exit of the labyrinthe \n\nPython script\nFichiers : ,McGyver_game.py, classes.py, constants.py, struct_laby.txt\n\"\"\"\n\nimport pygame\nfrom pygame.locals import *\n\nfrom classes import *\nfrom constants import *\n\npygame.init()\n\nscreen = pygame.display.set_mode((nbr_cells_on_board*lenght_cell,\\\n\t nbr_cells_on_board*lenght_cell))\n\nlaby = Labyrinth(\"struct_laby.txt\")\n\nmacGyver = Character()\n\nlaby.placing_tools()\n\nlaby.display_game((0, 0), screen)\n\npygame.display.flip()\n\nrunning = 1\n\nwhile running:\n\t\n\tpygame.time.Clock().tick(30)\n\t\n\tfor event in pygame.event.get():\n\t\t\n\t\t\t\n\t\tif event.type == QUIT:\n\t\t\trunning = 0\n\t\t\n\t\telif event.type == KEYDOWN:\n\t\t\t\t\n\t\t\tif event.key == K_RIGHT:\n\t\t\t\tmacGyver.turning(laby, 'right')\n\t\t\telif event.key == K_LEFT:\n\t\t\t\tmacGyver.turning(laby, 'left')\n\t\t\telif event.key == K_UP:\n\t\t\t\tmacGyver.turning(laby, 'up')\n\t\t\telif event.key == K_DOWN:\n\t\t\t\tmacGyver.turning(laby, 'down')\n\n\t\t\tlaby.display_game((macGyver.x_pixel_pos, macGyver.y_pixel_pos),\\\n\t\t\t screen)\n\t\t\n\t\t\tpygame.display.flip()\n\n\tif (macGyver.x_cell, macGyver.y_cell) == \\\n\t (nbr_cells_on_board-1, nbr_cells_on_board-1):\n\t\tif macGyver.tools_found == 3:\n\t\t\tscreen.fill((234, 234, 234))\n\t\t\tsuccess = pygame.image.load(\"you've_won.jpg\").convert()\n\t\t\tscreen.blit(success, (0.15*nbr_cells_on_board*lenght_cell, 0.4*nbr_cells_on_board*lenght_cell))\n\t\telse:\n\t\t\tscreen.fill((107, 133, 237))\n\t\t\tdefeat = pygame.image.load(\"you've_lost.png\").convert()\n\t\t\tscreen.blit(defeat, (0.1*nbr_cells_on_board*lenght_cell, 0.3*nbr_cells_on_board*lenght_cell))\n\t\tpygame.display.flip()\n\n\n\n\n" }, { "alpha_fraction": 0.44185304641723633, "alphanum_fraction": 0.4480298161506653, "avg_line_length": 47.601036071777344, "blob_id": "1f3041f20e0e7e00802a75e2411ecc21c3338e63", "content_id": "f615ecd7dc3291a58639d8ec6803ca8315be95a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9391, "license_type": "no_license", "max_line_length": 78, "num_lines": 193, "path": "/classes.py", "repo_name": "RALF34/McGyver_Game", "src_encoding": "UTF-8", "text": "\"\"\"classes for McGyver game\"\"\"\n\nimport pygame\nimport random\nfrom pygame.locals import *\nfrom constants import *\n\nclass Labyrinth:\n\n def __init__(self, code_file):\n \"\"\"File coding the structure of the labyrinth \"\"\"\n self.code_file = code_file\n \"\"\"Dictionnary containing the three objects as keys\n and their positions as values\"\"\"\n self.position_tools = {'ether': (0,0), 'tube': (0,0), 'needle': (0,0)}\n\n def display_game(self, position_mcgyver, screen):\n \"\"\"Method to display the labyrinth\"\"\"\n screen.fill((34,177,76))\n mcgyver = pygame.image.load(\"mcgyver.png\").convert()\n guard = pygame.image.load(\"guard.png\").convert()\n needle = pygame.image.load(\"needle.png\").convert()\n tube = pygame.image.load(\"plastic_tube.png\").convert()\n ether = pygame.image.load(\"ether.png\").convert()\n screen.blit(mcgyver, position_mcgyver)\n screen.blit(guard, ((nbr_cells_on_board-1)*lenght_cell, \\\n (nbr_cells_on_board-1)*lenght_cell))\n \"\"\"The following tests are telling which tool(s) has been\n found by McGyver, so they don't appears on the screen anymore\"\"\" \n if 'ether' in self.position_tools.keys():\n screen.blit(ether, self.position_tools['ether'])\n if 'tube' in self.position_tools.keys():\n screen.blit(tube, self.position_tools['tube'])\n if 'needle' in self.position_tools.keys():\n screen.blit(needle, self.position_tools['needle'])\n #pygame.display.flip()\n wall = pygame.image.load(\"WALL.png\").convert()\n \"\"\"Reading of \"code_file\" and loop \n to display the walls of the labyrinth\"\"\"\n with open(self.code_file, \"r\") as f:\n text = f.read()\n i, j, k = 0, 0, 0\n while text[k] != 't':\n if text[k] == '\\n':\n i += 1\n j = 0\n k += 1\n elif text[k] == 'M':\n screen.blit(wall, (j*lenght_cell, i*lenght_cell))\n #pygame.display.flip()\n j += 1\n k += 1\n else:\n j += 1\n k += 1\n \n def placing_tools(self):\n \"\"\"Méthod for randomly placing tools that McGyver has to collect \n in order to make the syringue \"\"\"\n needle_correctly_placed = False\n tube_correctly_placed = False \n ether_correctly_placed = False\n with open(self.code_file, \"r\") as f:\n lines = f.readlines()\n while not ether_correctly_placed:\n x_cell_ether = random.randint(2, nbr_cells_on_board-2)\n y_cell_ether = random.randint(2, nbr_cells_on_board-2)\n if lines[y_cell_ether][x_cell_ether] == \"C\":\n self.position_tools['ether'] = \\\n (x_cell_ether*lenght_cell, y_cell_ether*lenght_cell)\n ether_correctly_placed = True\n while not tube_correctly_placed:\n x_cell_tube = random.randint(2, nbr_cells_on_board-2)\n y_cell_tube = random.randint(2, nbr_cells_on_board-2)\n if lines[y_cell_tube][x_cell_tube] == \"C\":\n \"\"\"test to prevent the tube and the ether bottle\n from being at the same place in the labyrinth\"\"\"\n if (x_cell_tube, \\\n y_cell_tube) != (x_cell_ether, y_cell_ether):\n self.position_tools['tube'] = \\\n (x_cell_tube* lenght_cell, y_cell_tube* lenght_cell)\n tube_correctly_placed = True\n while not needle_correctly_placed:\n x_cell_needle = random.randint(2, nbr_cells_on_board-2)\n y_cell_needle = random.randint(2, nbr_cells_on_board-2)\n if lines[y_cell_needle][x_cell_needle] == \"C\":\n \"\"\"New tests to avoid having several tools\n at the same place in the labyrinth\"\"\" \n if (x_cell_needle, \\\n y_cell_needle) != (x_cell_tube, y_cell_tube):\n if (x_cell_needle, \\\n y_cell_needle) != (x_cell_ether, \\\n y_cell_ether):\n self.position_tools['needle'] = \\\n (x_cell_needle*lenght_cell, \\\n y_cell_needle*lenght_cell)\n needle_correctly_placed = True\n\t\t\t\t\t\t\t\n\n\nclass Character:\n\n def __init__(self):\n self.x_cell = 0\n self.y_cell = 0\n self.x_pixel_pos = 0\n self.y_pixel_pos = 0\n self.tools_found = 0\n\n\n def turning(self, laby, towards):\n mcgyver = pygame.image.load(\"mcgyver.png\").convert()\n with open(laby.code_file, \"r\") as f:\n lines = f.readlines()\n if towards == 'right':\n \"\"\"Making sure that Mcgyver is not on the \n right border of the labyrinth\"\"\"\n if self.x_cell != nbr_cells_on_board-1:\n \"\"\"Making sure that the cell on the\n right is not a wall ? \"\"\"\n if lines[self.y_cell][self.x_cell+1] != 'M':\n \"\"\"Making sure whether or not McGyver is\n moving to a position of a tool\"\"\" \n if (self.x_pixel_pos+lenght_cell, \\\n self.y_pixel_pos) in \\\n \tlaby.position_tools.values():\n self.tools_found += 1\n \"\"\"Loop to remove the tool (that McGyver \n has just found) from the dictionnary\"\"\"\n for key in laby.position_tools.keys():\n if laby.position_tools[key] == \\\n (self.x_pixel_pos+lenght_cell, \\\n self.y_pixel_pos):\n laby.position_tools.pop(key)\n break\n self.x_cell += 1\n self.x_pixel_pos = self.x_cell*lenght_cell\n if towards == 'left':\n \"\"\"Making sure that McGyver is not on the\n left border of the labyrinth\"\"\"\n if self.x_cell != 0:\n \"\"\"Making sure that there is no wall\"\"\" \n if lines[self.y_cell][self.x_cell-1] != 'M':\n \"\"\"Checking the presence of a tool on the way\"\"\"\n if (self.x_pixel_pos-lenght_cell, \\\n self.y_pixel_pos) in \\\n laby.position_tools.values():\n self.tools_found += 1\n \"\"\"Loop to remove the tools\n from the dictionnary\"\"\"\n for key in laby.position_tools.keys():\n \tif laby.position_tools[key] == \\\n (self.x_pixel_pos-lenght_cell, \\\n self.y_pixel_pos):\n \tlaby.position_tools.pop(key)\n \tbreak\n self.x_cell = self.x_cell-1\n self.x_pixel_pos = self.x_cell*lenght_cell\n if towards == 'up':\n \"\"\"Making sure that McGyver is\n not on the top of the screen\"\"\"\n if self.y_cell != 0:\n \"\"\"Making sure that there's no wall\"\"\"\n if lines[self.y_cell-1][self.x_cell] != 'M':\n \"\"\"Checking the presence of a tool\"\"\" \n if (self.x_pixel_pos, \\\n \tself.y_pixel_pos-lenght_cell) in \\\n laby.position_tools.values():\n self.tools_found += 1\n \"\"\"Removing the element from the dictionnary\"\"\"\n for key in laby.position_tools.keys():\n if laby.position_tools[key] == \\\n (self.x_pixel_pos, \\\n self.y_pixel_pos-lenght_cell):\n \tlaby.position_tools.pop(key)\n \tbreak\n self.y_cell = self.y_cell-1\n self.y_pixel_pos = self.y_cell*lenght_cell\n if towards == 'down':\n if self.y_cell != nbr_cells_on_board-1:\n if lines[self.y_cell+1][self.x_cell] != 'M':\n if (self.x_pixel_pos, \\\n self.y_pixel_pos+lenght_cell) in \\\n laby.position_tools.values():\n self.tools_found += 1\n for key in laby.position_tools.keys():\n if laby.position_tools[key] == \\\n (self.x_pixel_pos, \\\n self.y_pixel_pos+lenght_cell):\n laby.position_tools.pop(key)\n break\n self.y_cell += 1\n self.y_pixel_pos = self.y_cell*lenght_cell\n\t\t\t\t\n\n\t\t\t\t" }, { "alpha_fraction": 0.6097561120986938, "alphanum_fraction": 0.707317054271698, "avg_line_length": 19.5, "blob_id": "7c238fbffc97ae39212c6b5697a7a43b76c7b8cd", "content_id": "f48462a9262be055a5db8590c7ef688eb1a65ed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/constants.py", "repo_name": "RALF34/McGyver_Game", "src_encoding": "UTF-8", "text": "nbr_cells_on_board = 15\nlenght_cell = 45\n" } ]
5
kaku717813694/NewsRecommend
https://github.com/kaku717813694/NewsRecommend
5c721ab253fd2ed264eea7d69edf9acb35fbc7bb
f84e487a9e8151f00e7086f7d3b52b81227afa63
9e9c7ff4b1051e7ca45ab7ce4ea918913294dcd6
refs/heads/master
2022-04-10T23:29:43.837826
2020-03-01T02:13:51
2020-03-01T02:13:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.732600748538971, "alphanum_fraction": 0.7374847531318665, "avg_line_length": 24.625, "blob_id": "515af72f936766b2da17e10dfc95eccf0d953f91", "content_id": "f4d232c5d40e67150972f54900a8bcb91a109389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 823, "license_type": "permissive", "max_line_length": 73, "num_lines": 32, "path": "/back/src/test/java/com/smacul/demo/service/serviceImpl/IndexServiceImplTest.java", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "package com.smacul.demo.service.serviceImpl;\n\nimport com.smacul.demo.dao.ArticlesMapper;\nimport org.junit.jupiter.api.Test;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.boot.test.context.SpringBootTest;\n\nimport javax.servlet.http.HttpSession;\n\nimport java.util.List;\n\n@SpringBootTest\nclass IndexServiceImplTest {\n\n @Autowired\n ArticlesMapper articlesMapper;\n\n @Autowired\n HttpSession session;\n\n @Test\n void getLeftNavTags() {\n List<String> result = articlesMapper.getLeftNavTags();\n }\n\n @Test\n void getTinyArticles() {\n// List<TinyArticleModel> result = indexMapper.getTinyArticles();\n// TinyArticleModel result = indexMapper.getTinyArticles();\n System.out.println(articlesMapper.getTinyArticles(\"综合\", 10, 10));\n }\n}" }, { "alpha_fraction": 0.5438144207000732, "alphanum_fraction": 0.5463917255401611, "avg_line_length": 24.799999237060547, "blob_id": "6e00c8201b95dc208575e51546d6b12ea5d205d1", "content_id": "35905c91c3d093c171e52c6b974e0dffe1baaa52", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 35, "num_lines": 15, "path": "/spider/model/ArticleModel.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "class ArticleModel:\n\n def __init__(self):\n self.art_id = None\n self.art_title = None\n self.art_content = None\n self.art_spider = None\n self.art_class = None\n self.art_image_url = None\n self.art_comment_num = None\n self.art_customer_id = None\n self.art_tags = None\n self.art_time = None\n\n self.art_legal = 1\n\n" }, { "alpha_fraction": 0.5461538434028625, "alphanum_fraction": 0.5487179756164551, "avg_line_length": 26.85714340209961, "blob_id": "d51bf7f2428ced6a99883944b4096a9d45f050c5", "content_id": "eeb04cdf5a6bdbe6ea1c474cc8ecca6067b8b78f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "permissive", "max_line_length": 35, "num_lines": 14, "path": "/spider/model/ReplyModel.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "class ReplyModel:\n\n def __init__(self):\n self.rep_id = None\n self.rep_content = None\n self.rep_like_num = None\n self.rep_type = None\n self.rep_time = None\n self.rep_customer_id = None\n self.rep_article_id = None\n self.rep_comment_id = None\n self.rep_reply_id = None\n self.rep_spider = None\n self.rep_legal = 1\n" }, { "alpha_fraction": 0.6900928616523743, "alphanum_fraction": 0.705572783946991, "avg_line_length": 30.50731658935547, "blob_id": "2982abdc8a388647ad5f960ec439025c6887a947", "content_id": "8bdc19937a3425ccdb9e9905985517ac95e78010", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 7124, "license_type": "permissive", "max_line_length": 80, "num_lines": 205, "path": "/NewsRecommend.sql", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "DROP DATABASE IF EXISTS NewsRecommend;\nCREATE DATABASE IF NOT EXISTS NewsRecommend CHARACTER SET utf8mb4;\n\n\n-- cus 用户\nDROP TABLE IF EXISTS NewsRecommend.Customers;\nCREATE TABLE NewsRecommend.Customers (\n cus_id INT UNSIGNED NOT NULL auto_increment,\n cus_name VARCHAR(64),\n cus_pass VARCHAR(255),\n -- 爬虫中用于识别用户\n cus_spider VARCHAR(64) default '',\n -- 用户头像的 url\n cus_avatar_url VARCHAR(255) default '',\n -- 用户背景墙的图片 url\n cus_background_url VARCHAR(255) default '',\n -- 用户的个人描述\n cus_style VARCHAR(255) default '这个人很懒, 什么都没写',\n -- cus_gender 为 0 时性别未知, 为 1 时为男, 为 -1 时为女\n cus_gender TINYINT DEFAULT 0,\n -- 用户的创建时间\n cus_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n -- cus_type 为 0 时是普通用户, 为 1 时是可编辑用户\n cus_type TINYINT default 0,\n -- 此用户的关注的用户数量\n cus_follow_num int UNSIGNED default 0,\n -- 此用户的粉丝\n cus_fan_num int UNSIGNED default 0, \n -- 此用户的文章数量\n cus_article_num int UNSIGNED default 0,\n -- 用户评分\n cus_scope int UNSIGNED default 0,\n -- cus_legal 为 0 时待审核, 为 1 时合法, 为 -1 不合法\n cus_legal TINYINT default 0,\n\t\n primary key(cus_id)\n);\n\n\n-- art 新闻\nDROP TABLE IF EXISTS NewsRecommend.Articles;\nCREATE TABLE NewsRecommend.Articles (\n art_id INT UNSIGNED NOT NULL auto_increment,\n art_title VARCHAR(255) default '',\n art_content TEXT,\n -- 在爬虫中分辨文章\n art_spider VARCHAR(64) default '',\n -- 文章的分类\n art_class VARCHAR(32),\n -- 文章的标签 应该以 & 分隔\n art_tags VARCHAR(128) default '',\n -- 文章缩略图的信息\n art_image_url VARCHAR(255) default '',\n -- 文章的点赞数量\n art_like_num INT UNSIGNED default 0,\n -- 文章的点踩数量\n art_dislike_num INT UNSIGNED default 0,\n art_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n -- 文章的评论数量\n art_comment_num INT UNSIGNED default 0,\n -- 文章的分数\n\tart_scope int UNSIGNED default 0,\n art_legal tinyint default 0,\n \n art_customer_id INT UNSIGNED,\n primary key(art_id),\n\tforeign key(art_customer_id) references Customers(cus_id)\n);\n\n\n-- com 评论 \nDROP TABLE IF EXISTS NewsRecommend.Comments;\nCREATE TABLE NewsRecommend.Comments (\n com_id INT UNSIGNED NOT NULL auto_increment,\n com_content TEXT,\n com_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n com_like_num INT UNSIGNED default 0,\n com_dislike_num INT UNSIGNED default 0,\n\t-- 评论的回复数量 Abandon\n com_reply_num INT UNSIGNED default 0,\n -- 评论的分数\n\tcom_scope int UNSIGNED default 0,\n com_legal tinyint default 0,\n -- 爬虫过程中的评论标识\n com_spider varchar(64) default '',\n \n com_customer_id INT UNSIGNED,\n com_article_id INT UNSIGNED, \n\tprimary key(com_id),\n foreign key(com_customer_id) references Customers(cus_id),\n\tforeign key(com_article_id) references Articles(art_id)\n);\n\n\n-- rep 回复\nDROP TABLE IF EXISTS NewsRecommend.Replys;\nCREATE TABLE NewsRecommend.Replys (\n rep_id INT UNSIGNED NOT NULL auto_increment,\n rep_content TEXT,\n -- 回复的类型, 0 是对评论的回复, 1 是对回复的回复\n rep_type tinyint default 0, \n rep_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n rep_like_num INT UNSIGNED default 0,\n rep_dislike_num INT UNSIGNED default 0,\n\t-- 回复的回复数量 Abandon\n rep_reply_num INT UNSIGNED default 0,\n -- 回复的分数\n\trep_scope int UNSIGNED default 0,\n\trep_legal tinyint default 0,\n\n -- 爬虫过程中的评论标识\n rep_spider varchar(64) default '',\n\n rep_customer_id INT UNSIGNED,\n rep_article_id INT UNSIGNED,\n rep_comment_id INT UNSIGNED, \n rep_reply_id INT UNSIGNED,\n primary key(rep_id),\n foreign key(rep_customer_id) references Customers(cus_id),\n\tforeign key(rep_article_id) references Articles(art_id),\n foreign key(rep_comment_id) references Comments(com_id),\n foreign key(rep_reply_id) references Replys(rep_id)\n);\n\n\n-- adm 管理员\nDROP TABLE IF EXISTS NewsRecommend.Administrators;\nCREATE TABLE NewsRecommend.Administrators (\n adm_id INT UNSIGNED NOT NULL auto_increment,\n adm_name VARCHAR(64),\n adm_pass VARCHAR(255),\n adm_email VARCHAR(64),\n adm_phone VARCHAR(64),\n adm_address VARCHAR(255),\n adm_avatar_url VARCHAR(255),\n adm_gender TINYINT DEFAULT 0,\n adm_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n \n primary key(adm_id)\n);\n\n\n\n-- ccf 用户的关注情况记录\n-- follower -> followee\nDROP TABLE IF EXISTS NewsRecommend.CustomerCustomerFollow;\nCREATE TABLE NewsRecommend.CustomerCustomerFollow (\n ccf_id INT UNSIGNED NOT NULL auto_increment,\n ccf_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n\n ccf_follower_id INT UNSIGNED,\n ccf_followee_id INT UNSIGNED,\n\tprimary key(ccf_id),\n foreign key(ccf_follower_id) references Customers(cus_id),\n foreign key(ccf_followee_id) references Customers(cus_id)\n);\n\n\n\n-- acb 用户的新闻偏好程度记录\nDROP TABLE IF EXISTS NewsRecommend.ArticleCustomerBehaviors;\nCREATE TABLE NewsRecommend.ArticleCustomerBehaviors (\n acb_id INT UNSIGNED NOT NULL auto_increment,\n -- acb_behavior 用户行为: 无 0, 写作 1, 点赞 2, 点踩 3, 评论 4, 回复 5, 只浏览 6\n acb_behavior INT UNSIGNED default 0,\n acb_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n \n acb_article_id INT UNSIGNED,\n acb_customer_id INT UNSIGNED,\n\tprimary key(acb_id),\n foreign key(acb_article_id) references Articles(art_id),\n foreign key(acb_customer_id) references Customers(cus_id)\n);\n\n\n-- ccp 用户的评论点赞点踩记录\n-- DROP TABLE IF EXISTS NewsRecommend.CommentCustomerPreference;\n-- CREATE TABLE NewsRecommend.CommentCustomerPreference (\n-- ccp_id INT UNSIGNED NOT NULL,\n-- -- ccp_prefer 0 表示中立; -1 表示讨厌; 1 表示喜欢\n-- ccp_prefer TINYINT default 0,\n-- ccp_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n\n-- ccp_comment_id INT UNSIGNED,\n-- ccp_customer_id INT UNSIGNED,\n-- \tprimary key(ccp_id),\n-- foreign key(ccp_comment_id) references Comments(com_id),\n-- foreign key(ccp_customer_id) references Customers(cus_id)\n-- );\n\n\n-- rcp 用户的回复点赞点踩记录\n-- DROP TABLE IF EXISTS NewsRecommend.ReplyCustomerPreference;\n-- CREATE TABLE NewsRecommend.ReplyCustomerPreference (\n-- rcp_id INT UNSIGNED NOT NULL,\n-- -- rcp_prefer 0 表示中立; -1 表示讨厌; 1 表示喜欢\n-- rcp_prefer TINYINT default 0,\n-- rcp_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,\n\n-- \trcp_reply_id INT UNSIGNED,\n-- rcp_customer_id INT UNSIGNED,\n-- \tprimary key(rcp_id),\n-- foreign key(rcp_reply_id) references Replys(rep_id),\n-- foreign key(rcp_customer_id) references Customers(cus_id)\n-- );\n\n" }, { "alpha_fraction": 0.7889447212219238, "alphanum_fraction": 0.7889447212219238, "avg_line_length": 27.428571701049805, "blob_id": "bb209ad5e68e15e7e34ba4f28cd2ac845e833c65", "content_id": "254fb4a04d4a5fb139f04bb1c4574c58096109b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 398, "license_type": "permissive", "max_line_length": 99, "num_lines": 14, "path": "/back/src/main/java/com/smacul/demo/service/SearchService.java", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "package com.smacul.demo.service;\n\nimport com.smacul.demo.model.HotArticleModel;\nimport com.smacul.demo.model.TinyArticleModel;\n\nimport java.util.List;\n\npublic interface SearchService {\n List<HotArticleModel> getHotArticles(Integer page, Integer pageSize);\n\n List<String> getLeftNavTags();\n\n List<TinyArticleModel> getTinyArticles(String key, String tag, Integer page, Integer pageSize);\n}\n" }, { "alpha_fraction": 0.44383037090301514, "alphanum_fraction": 0.44529664516448975, "avg_line_length": 43.94416427612305, "blob_id": "a2a83e064aead5f46ce0a2261d8abbc158b63142", "content_id": "7723e4eb02877fcad869302b9493bb61b20aa603", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9174, "license_type": "permissive", "max_line_length": 121, "num_lines": 197, "path": "/spider/Main.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "import model.ArticleModel as ArtMod\nimport model.ReplyModel as RepMod\nimport model.CommentModel as ComMod\nimport model.CustomerModel as CusMod\n\nimport dao.ArticleDao as ArtDao\nimport dao.ReplyDao as RepDao\nimport dao.CommentDao as ComDao\nimport dao.CustomerDao as CusDao\n\nimport process.ArticleProcess as ArtPro\nimport process.ReplyProcess as RepPro\nimport process.CommentProcess as ComPro\nimport process.CustomerProcess as CusPro\n\nimport util.MySql as MySql\nimport util.Json as Json\nimport util.Time as Time\nimport os.path\nimport logging\n\nlog_file_name = os.path.join('log', '%s.txt' % Time.Time.get_local_time())\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(levelname)s - %(module)s - %(funcName)s :\\n \\t%(message)s')\nhandler = logging.FileHandler(filename=log_file_name, mode='a', encoding='utf-8')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Major:\n\n def __init__(self, path):\n db = Json.Json.read_json_file(path)\n self.__base = MySql.MySql(db_name=db['name'], user=db['user'], password=db['pass'],\n host=db['host'], charset=db['charset'])\n\n self.__art_pro = ArtPro.ArticleProcess()\n self.__rep_pro = RepPro.ReplyProcess()\n self.__com_pro = ComPro.CommentProcess()\n self.__cus_pro = CusPro.CustomerProcess()\n\n self.__cus_dao = CusDao.CustomerDao(self.__base)\n self.__art_dao = ArtDao.ArticleDao(self.__base)\n self.__com_dao = ComDao.CommentDao(self.__base)\n self.__rep_dao = RepDao.ReplyDao(self.__base)\n\n def major(self):\n categories = ['news_society', 'news_entertainment', 'news_tech', 'news_military', 'news_sports', 'news_car',\n 'news_finance', 'news_world', 'news_fashion', 'news_travel', 'news_discovery', 'news_baby',\n 'news_regimen', 'news_story', 'news_essay', 'news_game', 'news_history', 'news_food']\n for category in categories:\n print(\"\\n当前类别: %s\" % category)\n \"\"\" 处理 art \"\"\"\n try:\n arts_brief_json = self.__art_pro.get_arts_brief_json_by_category(category)\n if len(arts_brief_json) != 0:\n print(\"新闻总长度: %d\" % len(arts_brief_json))\n # print('arts_brief_json 获取 成功')\n except:\n print('arts_brief_json 获取 失败')\n continue\n\n for art_brief_json in arts_brief_json:\n # art_cus\n art_cus_mod = CusMod.CustomerModel()\n try:\n self.__cus_pro.set_art_cus(art_brief_json, art_cus_mod)\n self.__cus_dao.group_check_insert_cus_then_search_id(art_cus_mod)\n # print(\"art_cus 处理 成功\")\n except:\n print(\"art_cus 处理 失败\")\n continue\n # art\n art_mod = ArtMod.ArticleModel()\n try:\n self.__art_pro.set_art(art_brief_json, category, art_cus_mod.cus_id, art_mod)\n if not self.__art_dao.is_art_exist(art_mod.art_spider):\n # 新闻不存在的情况\n self.__art_dao.insert_art(art_mod)\n else:\n print(\"art 已存在\")\n continue\n art_mod.art_id = self.__art_dao.search_art_id_by_spider(art_mod.art_spider)\n # print(\"art 操作 成功\")\n except:\n print(\"art 操作 失败\")\n continue\n # art cus behavior\n try:\n if self.__art_dao.check_art_cus_relationship(art_mod.art_id, art_cus_mod.cus_id):\n self.__cus_dao.insert_cus_behavior(1, art_mod.art_id, art_cus_mod.cus_id, art_mod.art_time)\n else:\n pass\n # print(\"art-cus 行为 1 数据库操作 成功\")\n except:\n print(\"art-cus 行为 1 数据库操作 失败\")\n continue\n\n \"\"\" handel the coms \"\"\"\n try:\n coms_json = self.__com_pro.get_coms_json(art_brief_json)\n if len(coms_json) != 0:\n print(\"回复总长 %d\" % len(coms_json))\n except:\n print(\"coms_json 获取 失败\")\n continue\n\n for com_json in coms_json:\n # com_cus\n com_cus_mod = CusMod.CustomerModel()\n try:\n self.__cus_pro.set_com_cus(com_json, com_cus_mod)\n self.__cus_dao.group_check_insert_cus_then_search_id(com_cus_mod)\n # print(\"com_cus 处理 成功\")\n except:\n print(\"com_cus 处理 错误\")\n continue\n # com\n com_mod = ComMod.CommentModel()\n try:\n self.__com_pro.set_com(com_json, art_mod.art_id, com_cus_mod.cus_id, com_mod)\n if not self.__com_dao.is_com_exist(com_mod.com_spider):\n # if the com is not exist\n self.__com_dao.insert_com(com_mod)\n else:\n print(\"com 已存在\")\n continue\n com_mod.com_id = self.__com_dao.search_com_id_by_spider(com_mod.com_spider)\n self.__art_dao.update_art_com_number(art_mod.art_id)\n # print(\"com 处理 成功\")\n except:\n print(\"com 处理 失败\")\n continue\n # com cus behavior\n try:\n if self.__com_dao.check_com_cus_relationship(art_mod.art_id, com_mod.com_id, com_cus_mod.cus_id):\n self.__cus_dao.insert_cus_behavior(4, art_mod.art_id, com_cus_mod.cus_id, com_mod.com_time)\n else:\n pass\n # print(\"art-cus 行为 4 数据库操作 成功\")\n except:\n print(\"art-cus 行为 4 数据库操作 失败\")\n continue\n\n \"\"\" handel the reps \"\"\"\n try:\n reps_json = self.__rep_pro.get_reps_json(com_json)\n if len(reps_json) != 0:\n print(\"回复总长 %d\" % len(reps_json))\n except:\n print(\"reps_json 获取 失败\")\n continue\n\n for rep_json in reps_json:\n # rep_cus\n rep_cus_mod = CusMod.CustomerModel()\n try:\n self.__cus_pro.set_rep_cus(rep_json, rep_cus_mod)\n self.__cus_dao.group_check_insert_cus_then_search_id(rep_cus_mod)\n # print(\"rep_cus 处理 成功\")\n except:\n print(\"rep_cus 处理 失败\")\n continue\n # rep\n rep_mod = RepMod.ReplyModel()\n try:\n self.__rep_pro.set_rep(rep_json, art_mod.art_id,\n com_mod.com_id, rep_cus_mod.cus_id, rep_mod)\n if not self.__rep_dao.is_rep_exist(rep_mod.rep_spider):\n self.__rep_dao.search_rep_rep_by_spyder(rep_json, rep_mod)\n self.__rep_dao.insert_rep(rep_mod)\n else:\n print(\"rep 已存在\")\n continue\n rep_mod.rep_id = self.__rep_dao.search_rep_id_by_spider(rep_mod.rep_spider)\n # print(\"rep 处理 成功\")\n except:\n print(\"rep 处理 失败\")\n continue\n\n # rep cus behavior\n try:\n if self.__rep_dao.check_rep_cus_relationship(art_mod.art_id, rep_mod.rep_id,\n rep_cus_mod.cus_id):\n self.__cus_dao.insert_cus_behavior(5, art_mod.art_id, rep_cus_mod.cus_id,\n rep_mod.rep_time)\n else:\n pass\n # print(\"art-cus 行为 5 数据库操作 成功\")\n except:\n print(\"art-cus 行为 5 数据库操作 失败\")\n continue\n\n\nif __name__ == '__main__':\n Major(os.path.join('properties', 'database.json')).major()\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7029787302017212, "alphanum_fraction": 0.7029787302017212, "avg_line_length": 42.51852035522461, "blob_id": "980fe66cc7cffec7236544d0edbd1bec8666fc50", "content_id": "a714028fd793bbd2105db97bca416e073da3c1b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1175, "license_type": "permissive", "max_line_length": 93, "num_lines": 27, "path": "/back/src/main/java/com/smacul/demo/dao/CustomersMapper.java", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "package com.smacul.demo.dao;\n\nimport com.smacul.demo.model.ArticleAuthorModel;\nimport com.smacul.demo.model.CustomerModel;\nimport org.apache.ibatis.annotations.Param;\nimport org.apache.ibatis.annotations.Select;\nimport org.springframework.stereotype.Repository;\n\n@Repository\npublic interface CustomersMapper {\n\n @Select(\"select cus_id, cus_name, cus_avatar_url from Customers \" +\n \"where cus_id = (select art_customer_id from Articles where art_id = #{artId});\")\n ArticleAuthorModel getArticleAuthorByArtId(@Param(\"artId\") Integer artId);\n\n\n @Select(\"select cus_id, cus_name, cus_email, cus_phone, cus_address, cus_avatar_url, \" +\n \"cus_style, cus_gender, cus_time, cus_type from Customers \" +\n \"where cus_id = (select art_customer_id from Articles where art_id = #{artId});\")\n CustomerModel getCustomerInfoByArtId(@Param(\"artId\") Integer artId);\n\n\n @Select(\"select cus_id, cus_name, cus_email, cus_phone, cus_address, cus_avatar_url, \" +\n \"cus_style, cus_gender, cus_time, cus_type from Customers \" +\n \"where cus_id = #{cusId};\")\n CustomerModel getCustomerInfoByCusId(@Param(\"cusId\")Integer cusId);\n}\n" }, { "alpha_fraction": 0.5376712083816528, "alphanum_fraction": 0.5410959124565125, "avg_line_length": 28, "blob_id": "eda1f269c4187025b979cb3fe0321f574598719a", "content_id": "1efe5ca8ec170ef6bd72a9497e2954d1068bd5f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "permissive", "max_line_length": 36, "num_lines": 10, "path": "/spider/model/CustomerModel.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "class CustomerModel:\n def __init__(self):\n self.cus_id = None\n self.cus_name = None\n self.cus_pass = None\n self.cus_spider = None\n self.cus_avatar_url = None\n self.cus_style = None\n self.cus_background_url = ''\n self.cus_legal = 1\n\n\n" }, { "alpha_fraction": 0.5381882786750793, "alphanum_fraction": 0.5381882786750793, "avg_line_length": 50.09090805053711, "blob_id": "e6c277b31a04ab562df59d9bd719b0c7377246ff", "content_id": "c991587d22660978526d2e9b374c50a81001c752", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 635, "license_type": "permissive", "max_line_length": 96, "num_lines": 11, "path": "/front/src/util/ClassTransfer.js", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "export function translator(list) {\n let en = ['news_society', 'news_entertainment', 'news_tech', 'news_military', 'news_sports',\n 'news_car', 'news_finance', 'news_world', 'news_fashion', 'news_travel',\n 'news_discovery', 'news_baby', 'news_regimen', 'news_story', 'news_essay',\n 'news_game', 'news_history', 'news_food'];\n let zn = ['社会', '娱乐', '科技', '军事', '体育', '汽车', '财经', '国际', '时尚', '旅游', '探索',\n '育儿', '养生', '故事', '美文', '游戏', '历史', '美食'];\n return list.map(function (item) {\n return zn[en.indexOf(item)];\n })\n}\n\n" }, { "alpha_fraction": 0.8098434209823608, "alphanum_fraction": 0.8098434209823608, "avg_line_length": 28.799999237060547, "blob_id": "4d415e4148b7b69d035c9d55a7af78763df8fe8e", "content_id": "e21acc9910ff639bab6a054c6a0040a77fc25e9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 447, "license_type": "permissive", "max_line_length": 85, "num_lines": 15, "path": "/back/src/main/java/com/smacul/demo/service/ArticleService.java", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "package com.smacul.demo.service;\n\nimport com.smacul.demo.model.ArticleAuthorModel;\nimport com.smacul.demo.model.ArticleModel;\nimport com.smacul.demo.model.HotArticleModel;\n\nimport java.util.List;\n\npublic interface ArticleService {\n ArticleModel getArticleMain(Integer id);\n\n List<HotArticleModel> getHotArticles(String tag, Integer page, Integer pageSize);\n\n ArticleAuthorModel getArticleAuthorByArtId(Integer artId, Integer pageSize);\n}\n" }, { "alpha_fraction": 0.6286672353744507, "alphanum_fraction": 0.6286672353744507, "avg_line_length": 17.936508178710938, "blob_id": "0ce3b9f87086e5a2a0d87004d0891aff43833b38", "content_id": "2f929f103c5ef8ca5d14272fabb59388805b95ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1193, "license_type": "permissive", "max_line_length": 52, "num_lines": 63, "path": "/back/src/main/java/com/smacul/demo/model/TinyArticleModel.java", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "package com.smacul.demo.model;\n\nimport java.sql.Timestamp;\n\npublic class TinyArticleModel {\n\n private Integer artId;\n private String artTitle;\n private String artAbstract;\n private Timestamp artTime;\n private String cusName;\n private String artImage;\n\n public Integer getArtId() {\n return artId;\n }\n\n public void setArtId(Integer artId) {\n this.artId = artId;\n }\n\n public String getArtTitle() {\n return artTitle;\n }\n\n public void setArtTitle(String artTitle) {\n this.artTitle = artTitle;\n }\n\n public String getArtAbstract() {\n return artAbstract;\n }\n\n public void setArtAbstract(String artAbstract) {\n this.artAbstract = artAbstract;\n }\n\n public Timestamp getArtTime() {\n return artTime;\n }\n\n public void setArtTime(Timestamp artTime) {\n this.artTime = artTime;\n }\n\n public String getCusName() {\n return cusName;\n }\n\n public void setCusName(String cusName) {\n this.cusName = cusName;\n }\n\n public String getArtImage() {\n return artImage;\n }\n\n public void setArtImage(String artImage) {\n this.artImage = artImage;\n }\n\n\n}\n" }, { "alpha_fraction": 0.6163522005081177, "alphanum_fraction": 0.6415094137191772, "avg_line_length": 12.947368621826172, "blob_id": "6fec1eb834687340e06e0293a40d53b01ece9b64", "content_id": "c1a22016fcb13e37de90ed3c14701254db4cd47f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1113, "license_type": "permissive", "max_line_length": 52, "num_lines": 57, "path": "/README.md", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "# NewsRecommend\n\n基于协同过滤算法的新闻推荐系统, 推荐部分未开始. 项目分前后端与爬虫. \n\n## 数据库 NewsRecommend.sql\nMySQL 导入自动建库\n\n## 爬虫 spider\n\n### 运行\n爬虫独立运行获取数据后写入数据库, 数据来源为 今日头条, 需要 python 3 环境.\n\n在 `spider` 目录下创建 `properties/database.json`. \n*database.json* 模板:\n``` json\n{\n \"name\": \"NewsRecommend\",\n \"user\": \"your name\",\n \"pass\": \"your pass\",\n \"host\": \"your host\",\n \"charset\": \"utf8mb4\"\n}\n``` \n\n并在 `spider` 目录下创建 `log` 目录,用于存放日志文件 (我懒得写)\n\n正确安装 webdriver 后执行:\n\n``` sh\ncd spider\npython Main.py\n```\n\n## 前端 front\n\n前端利用 Vue Cli 3 脚手架. 需要 node.js yarn.\n\n应该需要先装 wangeditor (富文本编辑工具) `npm install wangeditor`\n\n### 运行\n``` sh\ncd front\nyarn install\nyarn serve\n```\n\n### 页面浏览\n``` sh\nlocalhost:8071/ # 首页\nlocalhost:8071/article # 文章阅读页\nlocalhost:8071/self # 个人中心\nlocalhost:8071/search # 搜索页面\n# ...\n```\n\n## 后端 back\n....\n" }, { "alpha_fraction": 0.5411553978919983, "alphanum_fraction": 0.5429447889328003, "avg_line_length": 40.585105895996094, "blob_id": "e82e9cee7be470b8ed6f1b47a473dc4f0d7a368e", "content_id": "3bdb37633a0e0639d9b762620f3bb32fe44c214b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4188, "license_type": "permissive", "max_line_length": 116, "num_lines": 94, "path": "/spider/dao/ArticleDao.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "import util.MySql as MySql\nimport model.ArticleModel as ArtMod\n\n# import os.path\nimport logging\n# logging.basicConfig(filename=os.path.join('log', 'major-log.txt'), filemode='a')\n\n\nclass ArticleDao:\n\n def __init__(self, base: MySql.MySql):\n self.__base = base\n\n def is_art_exist(self, art_spider):\n \"\"\" 检查新闻是否存在\n\n :param art_spider:\n :return:\n \"\"\"\n try:\n search_sql = \"select count(*) from Articles where art_spider = '%s'\" % art_spider\n\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n if result[0] == 0:\n logging.info(\"is_art_exist 新闻 art_spider=%s 数据库查询 不存在\" % art_spider)\n return False\n else:\n logging.info(\"is_art_exist 新闻 art_spider=%s 数据库查询 已存在\" % art_spider)\n return True\n except:\n logging.exception(\"is_art_exist 新闻 art_spider=%s 数据库查询 失败\" % art_spider)\n raise\n\n def insert_art(self, art_mod: ArtMod.ArticleModel):\n \"\"\" 插入新闻数据\n\n :param art_mod:\n :return:\n \"\"\"\n try:\n insert_sql = \"insert into Articles(art_title, art_spider, art_class, art_image_url, \" \\\n \"art_content, art_tags, \" \\\n \"art_customer_id, art_time, art_comment_num, art_legal)\" \\\n \" values ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %d, %d)\" \\\n % (art_mod.art_title, art_mod.art_spider, art_mod.art_class, art_mod.art_image_url,\n art_mod.art_content, art_mod.art_tags,\n art_mod.art_customer_id, art_mod.art_time, art_mod.art_comment_num, art_mod.art_legal)\n\n self.__base.execute_sql(insert_sql)\n self.__base.commit_transactions()\n logging.info(\"insert_art 新闻 art_spider=%s 数据库插入 成功\" % art_mod.art_spider)\n except:\n self.__base.commit_rollback()\n logging.exception(\"insert_art 新闻 art_spider=%s 数据库插入 失败\" % art_mod.art_spider)\n raise\n\n def search_art_id_by_spider(self, art_spider):\n try:\n search_sql = \"select art_id from Articles where art_spider = '%s'\" % art_spider\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n logging.info(\"search_art_id_by_spider 新闻 art_spider=%s 数据库查询: art_id 值: %s\" % (art_spider, result[0]))\n return result[0]\n except:\n logging.info(\"search_art_id_by_spider 新闻 art_spider=%s 数据库查询 art_id 失败\" % art_spider)\n raise\n\n def check_art_cus_relationship(self, art_id, cus_id):\n try:\n search_sql = \"select count(*) from Articles where art_id=%d and art_customer_id=%d\" % (art_id, cus_id)\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n if result[0] == 0:\n logging.info(\"check_art_cus_relationship 关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 不存在\" % (art_id, cus_id))\n return False\n else:\n logging.info(\"check_art_cus_relationship 关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 存在\" % (art_id, cus_id))\n return True\n except:\n logging.exception(\"check_art_cus_relationship 关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 错误\" % (art_id, cus_id))\n raise\n\n def update_art_com_number(self, art_id):\n try:\n update_sql = \"update Articles set art_comment_num = art_comment_num + 1 where art_id = %d\" % art_id\n\n self.__base.execute_sql(update_sql)\n self.__base.commit_transactions()\n logging.info(\"update_art_com_number art=%s 评论数 数据库更新 成功\" % art_id)\n except:\n self.__base.commit_rollback()\n logging.info(\"update_art_com_number art=%s 评论数 数据库更新 失败\" % art_id)\n raise\n\n\n\n" }, { "alpha_fraction": 0.5472972989082336, "alphanum_fraction": 0.5506756901741028, "avg_line_length": 25.81818199157715, "blob_id": "bf8fc7970e81517338d1df59e88ab8e3e45ad67a", "content_id": "377d135b71e6274b98110a237a9e491102678255", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "permissive", "max_line_length": 35, "num_lines": 11, "path": "/spider/model/CommentModel.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "class CommentModel:\n\n def __init__(self):\n self.com_id = None\n self.com_content = None\n self.com_like_num = None\n self.com_customer_id = None\n self.com_article_id = None\n self.com_time = None\n self.com_spider = None\n self.com_legal = 1\n\n" }, { "alpha_fraction": 0.5429333448410034, "alphanum_fraction": 0.5440000295639038, "avg_line_length": 38.0625, "blob_id": "0938cc3c688281d673ef296af798645d9acc1ed2", "content_id": "e792c9bf66e6a288a7ec9f963c601fa6069f88b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4016, "license_type": "permissive", "max_line_length": 123, "num_lines": 96, "path": "/spider/dao/CustomerDao.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "import util.MySql as MySql\nimport model.CustomerModel as CusMod\n\nimport logging\n\n\nclass CustomerDao:\n\n def __init__(self, base: MySql.MySql):\n self.__base = base\n\n def is_cus_exist(self, cus_spider):\n \"\"\" 检查用户是否存在与数据库\n\n :param cus_spider:\n :return:\n \"\"\"\n try:\n search_sql = \"select count(*) from Customers where cus_spider = '%s'\" % cus_spider\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n if result[0] == 0:\n logging.info(\"is_cus_exist 用户 cus_spider=%s 数据库查询 不存在\" % cus_spider)\n return False\n else:\n logging.info(\"is_cus_exist 用户 cus_spider=%s 数据库查询 已存在\" % cus_spider)\n return True\n except:\n logging.exception(\"is_cus_exist 用户 cus_spider=%s 数据库查询 失败\" % cus_spider)\n raise\n\n def insert_cus(self, cus_mod: CusMod.CustomerModel):\n \"\"\" 向数据库中插入用户数据\n\n :param cus_mod:\n :return:\n \"\"\"\n try:\n insert_sql = \"insert into Customers(cus_name, cus_pass, cus_spider, cus_avatar_url, \" \\\n \"cus_style, cus_background_url, cus_legal)\" \\\n \" values ('%s', '%s', '%s', '%s', '%s', '%s', %d)\" \\\n % (cus_mod.cus_name, cus_mod.cus_pass, cus_mod.cus_spider, cus_mod.cus_avatar_url,\n cus_mod.cus_style, cus_mod.cus_background_url, cus_mod.cus_legal)\n self.__base.execute_sql(insert_sql)\n self.__base.commit_transactions()\n logging.info(\"insert_cus 用户 cus_spider=%s 数据库插入 成功\" % cus_mod.cus_spider)\n except:\n self.__base.commit_rollback()\n logging.exception(\"insert_cus 用户 cus_spider=%s 数据库插入 失败\" % cus_mod.cus_spider)\n raise\n\n def search_cus_id_by_spider(self, cus_spider):\n \"\"\" 利用 spider 查询用户 id\n\n :param cus_spider:\n :return:\n \"\"\"\n try:\n search_sql = \"select cus_id from Customers where cus_spider = '%s'\" % cus_spider\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n logging.info(\"search_cus_id_by_spider 用户 cus_spider=%s 数据库查询: cus_id 值: %s\" % (cus_spider, result[0]))\n return result[0]\n except:\n logging.info(\"search_cus_id_by_spider 用户 cus_spider=%s 数据库查询 cus_id 失败\" % cus_spider)\n raise\n\n def group_check_insert_cus_then_search_id(self, cus_mod: CusMod.CustomerModel):\n try:\n if not self.is_cus_exist(cus_mod.cus_spider):\n self.insert_cus(cus_mod)\n cus_mod.cus_id = self.search_cus_id_by_spider(cus_mod.cus_spider)\n logging.info(\"check_insert_cus_then_search_id 数据库处理成功\")\n except:\n logging.exception(\"check_insert_cus_then_search_id 数据库处理失败\")\n raise\n\n def insert_cus_behavior(self, behavior, art_id, cus_id, time):\n \"\"\"\n\n :param behavior:\n :param art_id:\n :param cus_id:\n :param time:\n :return:\n \"\"\"\n try:\n insert_sql = \"insert into ArticleCustomerBehaviors(acb_behavior, acb_time, acb_article_id, acb_customer_id) \" \\\n \"values (%d, '%s', %d, %d)\" % (behavior, time, art_id, cus_id)\n self.__base.execute_sql(insert_sql)\n self.__base.commit_transactions()\n logging.info(\"insert_cus_behavior 新闻 art_id=%s 用户 cus_id=%s 行为 %s 数据库插入 成功\" % (art_id, cus_id, behavior))\n except:\n self.__base.commit_rollback()\n logging.exception(\"insert_cus_behavior 新闻 art_id=%s 用户 cus_id=%s 行为 %s 数据库插入 失败\" % (art_id, cus_id, behavior))\n raise\n" }, { "alpha_fraction": 0.5277718901634216, "alphanum_fraction": 0.5294614434242249, "avg_line_length": 46.82828140258789, "blob_id": "22b907ef384646f00555884073c99f223dfb6d81", "content_id": "8e24afabb9a5dca5ca022464b2165f42430759e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5001, "license_type": "permissive", "max_line_length": 130, "num_lines": 99, "path": "/spider/dao/ReplyDao.py", "repo_name": "kaku717813694/NewsRecommend", "src_encoding": "UTF-8", "text": "import util.MySql as MySql\nimport model.ReplyModel as RepMod\n\n# import os.path\nimport logging\n# logging.basicConfig(filename=os.path.join('log', 'major-log.txt'), filemode='a')\n\n\nclass ReplyDao:\n\n def __init__(self, base: MySql.MySql):\n self.__base = base\n\n def is_rep_exist(self, rep_spider):\n try:\n search_sql = \"select count(*) from Replys where rep_spider = '%s'\" % rep_spider\n\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n if result[0] == 0:\n logging.info(\"is_rep_exist 回复 rep_spider=%s 数据库查询 不存在\" % rep_spider)\n return False\n else:\n logging.info(\"is_rep_exist 回复 rep_spider=%s 数据库查询 已存在\" % rep_spider)\n return True\n except:\n logging.exception(\"is_rep_exist 回复 rep_spider=%s 数据库查询 失败\" % rep_spider)\n raise\n\n def insert_rep(self, rep_mod: RepMod.ReplyModel):\n try:\n if rep_mod.rep_reply_id is None:\n insert_sql = \"insert into Replys(rep_content, rep_like_num, rep_type, rep_time,\" \\\n \" rep_customer_id, rep_article_id, rep_comment_id, \" \\\n \" rep_spider, rep_legal)\" \\\n \" values ('%s', %d, %d, '%s', %d, %d, %d, '%s', %d)\" \\\n % (str(rep_mod.rep_content), int(rep_mod.rep_like_num), int(rep_mod.rep_type), str(rep_mod.rep_time),\n int(rep_mod.rep_customer_id), int(rep_mod.rep_article_id), int(rep_mod.rep_comment_id),\n str(rep_mod.rep_spider), int(rep_mod.rep_legal))\n else:\n insert_sql = \"insert into Replys(rep_content, rep_like_num, rep_type, rep_time,\" \\\n \" rep_customer_id, rep_article_id, rep_comment_id, \" \\\n \"rep_reply_id, rep_spider, rep_legal)\" \\\n \" values ('%s', %d, %d, '%s', %d, %d, %d, %d, '%s', %d)\" \\\n % (str(rep_mod.rep_content), int(rep_mod.rep_like_num), int(rep_mod.rep_type), str(rep_mod.rep_time),\n int(rep_mod.rep_customer_id), int(rep_mod.rep_article_id), int(rep_mod.rep_comment_id),\n int(rep_mod.rep_reply_id), str(rep_mod.rep_spider), int(rep_mod.rep_legal))\n\n self.__base.execute_sql(insert_sql)\n self.__base.commit_transactions()\n logging.info(\"insert_rep 回复 rep_spider=%s 数据库插入 成功\" % rep_mod.rep_spider)\n except:\n self.__base.commit_rollback()\n logging.exception(\"insert_rep 回复 rep_spider=%s 数据库插入 失败\" % rep_mod.rep_spider)\n raise\n\n def search_rep_id_by_spider(self, rep_spider):\n try:\n search_sql = \"select rep_id from Replys where rep_spider = '%s'\" % rep_spider\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n logging.info(\"search_rep_id_by_spider 新闻 rep_spider=%s 数据库查询: rep_id 值: %s\" % (rep_spider, result[0]))\n return result[0]\n except:\n logging.info(\"search_rep_id_by_spider 新闻 rep_spider=%s 数据库查询 rep_id 失败\" % rep_spider)\n raise\n\n def check_rep_cus_relationship(self, art_id, rep_id, cus_id):\n try:\n search_sql = \"select count(*) from Replys \" \\\n \"where rep_article_id=%d and rep_id=%d and rep_customer_id=%d\" % (art_id, rep_id, cus_id)\n self.__base.execute_sql(search_sql)\n result = self.__base.get_result_one()\n if result[0] == 0:\n logging.info(\"check_rep_cus_relationship 回复关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 不存在\" % (art_id, cus_id))\n return False\n else:\n logging.info(\"check_rep_cus_relationship 回复关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 存在\" % (art_id, cus_id))\n return True\n except:\n logging.exception(\"check_rep_cus_relationship 回复关系 新闻 art_id=%s 用户 cus_id=%s 数据库查询 错误\" % (art_id, cus_id))\n raise\n\n def search_rep_rep_by_spyder(self, rep_json, rep_mod):\n \"\"\" 处理回复的回复\n\n :param rep_json:\n :param rep_mod:\n :return:\n \"\"\"\n try:\n rep_rep_spider = str(rep_json['reply_to_comment']['id'])\n rep_mod.rep_reply_id = self.search_rep_id_by_spider(rep_rep_spider)\n rep_mod.rep_type = 1\n logging.info(\"search_rep_rep_by_spyder rep_reply_id 与 rep_type 数据库查询 成功\")\n except:\n rep_mod.rep_reply_id = None\n rep_mod.rep_type = 0\n logging.warning(\"search_rep_rep_by_spyder rep_reply_id 与 rep_type 数据库查询 失败\")\n" } ]
16
luxiaotong/crawler
https://github.com/luxiaotong/crawler
b9b9e831edef6e9976f4b3919963836382bdf3dc
82e5a5f59182719bcd96962744cdc7379a18d1db
c090d4b44f5fe25f2138b3aee2d5a5621e2f4ff4
refs/heads/master
2023-05-24T16:07:42.212325
2020-03-31T06:30:57
2020-03-31T06:30:57
197,111,381
2
1
null
2019-07-16T03:10:34
2020-07-19T15:54:32
2023-05-22T21:38:11
TSQL
[ { "alpha_fraction": 0.586776852607727, "alphanum_fraction": 0.6005509495735168, "avg_line_length": 37.21052551269531, "blob_id": "293ec3823cc2cf0ef8fbd1e866a6542946435ff4", "content_id": "87610334e4cdd0373b8f4ac962981f686e8a66e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 105, "num_lines": 19, "path": "/pydouyin/utils.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import time\n\ndef have_a_rest(sec):\n while sec > 0:\n msg = \"Have a rest, and we will fight again in: %d seconds\" %(sec)\n print(msg, '\\r', end=\"\")\n time.sleep(1)\n\n # below two lines is to replace the printed line with white spaces\n # this is required for the case when the number of digits in timer reduces by 1 i.e. from\n # 10 secs to 9 secs, if we dont do this there will be extra prints at the end of the printed line\n # as the number of chars in the newly printed line is less than the previous\n remove_msg = ' ' * len(msg)\n print( remove_msg, '\\r', end=\"\")\n\n # decrement timer by 1 second\n sec -= 1\n print(\"Let's Fight\", '!'*10)\n return\n" }, { "alpha_fraction": 0.6283783912658691, "alphanum_fraction": 0.6340090036392212, "avg_line_length": 25.909090042114258, "blob_id": "7a96fd81063d3abca2c9c5a141335bf8527218ce", "content_id": "4e67cc66fd5bd47682f611ed369410e265c17757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "no_license", "max_line_length": 76, "num_lines": 33, "path": "/pyniustar/pypost.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from niustar_model import NiuStarModel\nfrom niustar_crawler import NiuStarCrawler\nfrom utils import get_params\n\n\ncrawler = NiuStarCrawler()\nmodel = NiuStarModel()\n\nuids, last_record_time = get_params()\nif uids:\n uid_arr = uids.split(\",\")\n user_list = model.get_user_list(uid_arr)\nelse:\n user_list = model.get_user_list()\n\n\nfor user in user_list:\n print(\"Crawler Start to Run..............\")\n print(\"username:\", user['username'])\n print(\"userid:\", user['user_id'])\n print(\"short_url:\", user['short_url'])\n min_aweme_id = model.find_lr_aweme_id(user['user_id'], last_record_time)\n if min_aweme_id == 0:\n min_aweme_id = user['min_aweme_id']\n print(\"min_aweme_id:\", min_aweme_id)\n model.set_id(user['user_id'], min_aweme_id)\n crawler.run(model)\n\n print(\"-\"*20 + user['username'] + \" Finished\" + \"-\"*20)\n print()\n\n\nprint(\"Crawler End to Run!\")\n" }, { "alpha_fraction": 0.5623780488967896, "alphanum_fraction": 0.6270830035209656, "avg_line_length": 34.05789566040039, "blob_id": "be8f90048c02c6fb8bf88e3afeda04f7d266d33c", "content_id": "34f301102b165d3126e3871e8bd8a114339313ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6755, "license_type": "no_license", "max_line_length": 119, "num_lines": 190, "path": "/pyluckin/test.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport json\nfrom utils.common import random_ua\nfrom utils.common import get_proxies\nimport time\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef do_request(url, method='GET', params={}):\n result = {}\n ua = random_ua()\n headers = {'User-Agent': ua, 'Content-type': 'application/json'}\n #cookies = {\n # '_lxsdk_cuid':'16bc0c98a9fc8-0be68b564974c3-37647e05-1fa400-16bc0c98a9fc8',\n # '_lxsdk':'16bc0c98a9fc8-0be68b564974c3-37647e05-1fa400-16bc0c98a9fc8',\n # '_hc.v':'78a71f2d-b242-c86d-97d1-01527c97daa0.1562307562',\n # 's_ViewType':'10',\n # 'aburl':'1',\n # 'cy':'2',\n # 'cye':'beijing',\n # '_lxsdk_s':'16c6091a4d2-413-184-8b9%7C%7C8'\n #}\n cookies = {\n '_lxsdk_cuid':'16c6ee15815c8-0cdd3e898fd4cc-37647e05-fa000-16c6ee15815c8',\n '_lxsdk':'16c6ee15815c8-0cdd3e898fd4cc-37647e05-fa000-16c6ee15815c8',\n '_hc.v':'92a6974b-4405-d0a2-cc8f-45006650c8ba.1565228358',\n 's_ViewType':'10',\n 'aburl':'1',\n 'cy':'2',\n 'cye':'beijing',\n '_lxsdk_s':'16c6ee15819-bde-1d3-d0%7C%7C68'\n }\n\n print('当前请求的URL:%s'%url)\n print('当前使用的Method:%s'%method)\n print(params)\n print('当前使用的User-Agent:%s'%ua)\n\n for j in range(5):\n try:\n proxies = get_proxies()\n print('\\t第%d次尝试请求, 使用的Proxy:%s'%(j, proxies['http']))\n if method == 'GET':\n result = requests.get(url, headers=headers, proxies=proxies, timeout=5, params=params, cookies=cookies)\n elif method == 'POST':\n result = requests.post(url,headers=headers, proxies=proxies, timeout=5, data=params)\n except Exception as e:\n print(\"Error:\", e)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n continue\n\n print('\\t返回结果状态码:%d\\n' %result.status_code)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n\n if result.status_code != 200:\n continue\n else:\n print('\\t返回结果:%s\\n' %result.text)\n\n break\n\n return result\n\n\n#shopId = 98274270,\n#shopId = 113511472,\n#shopId = 98286173,\nshopId = 121237887\ncityId = 2\nparams = {\n \"shopId\":shopId,\n \"cityId\":cityId,\n \"mainCategoryId\":132,\n #\"_token\":\"eJxVjltvgkAQhf/LvHYDs7sIK4kPxrZWUzQiorXhAS9FQrmUJcVL+t87RvvQZJIz8805yblAPdqByxHR4gy+9zW4wA00bGDQaPp07I4QitvCQWSw/cdkVxDb1OEjuO9cKGRSyuhKfAI3ohAjdlsdR0VMWDRXz4gscGiayjXNtm2NXRoXVVokxrbMTX0oK7OrhLK5I6kKUCIPKEGa3TW+a/N3e9SdvDpNCtr242Mw15b++vA9HYTzE0rv/DKZvj59Ts4nNRj4STy0s3D9MFv1x/nyOXtbrstNfszK/mqxmA5nvR78/ALTpE9/\",\n #\"uuid\":\"78a71f2d-b242-c86d-97d1-01527c97daa0.1562307562\",\n \"_token\":\"eJxVj0tvgzAQhP/LXmuB1xjbIOVQpRVNVBKVINSHciCPAqIhBGjzqPrfuzTJoZKlnf08M7K/oRmtwEfOuUQGX+sGfECLWwoYdC3duMoVwkM0RhkGy3/Mk8pjsGiSO/DfUBjO0OXznkQEzsRwImeptZkzIen0nhFZIO+62rft/X5vrYq0qosqs5bbjd3m29pGdFxEqQW9BSiyifuIUYoaCBnlXoW8CETmcBJaa+ZoEkoI9pd3PckM9i1l30Izvczuuof0dXK2RVaRWo8P8ayV7e49Cts4mR25E54eJtPH+4/J6WiGwyhLA1UmrzdPz9m4CA67NMiLxSYqt7fp58t0kg0G8PMLb8hbbw==\",\n \"uuid\":\"92a6974b-4405-d0a2-cc8f-45006650c8ba.1565228358\",\n \"platform\":1,\n \"partner\":150,\n \"optimusCode\":10,\n \"originUrl\":\"http://www.dianping.com/shop/%d\" %shopId,\n}\n\nurl = \"http://www.dianping.com/ajax/json/shopDynamic/reviewAndStar\"\nresult = do_request(url, method='GET', params=params)\n\nd_map = {\n '\\ue32b':'0',\n '1':'1',\n '\\uef77':'2',\n '\\uf200':'3',\n '\\uef11':'4',\n '\\uefab':'5',\n '\\ue887':'6',\n '\\ue594':'7',\n '\\uf537':'8',\n '\\uecb6':'9',\n}\n\ndef get_svg_int(html):\n int_arr = []\n html = html.replace('1<d', '<d class=\"num\">1</d><d')\n html = html.replace('d>1<d', 'd><d class=\"num\">1</d><d')\n html = html.replace('d>1', 'd><d class=\"num\">1</d>')\n soup = BeautifulSoup(html, 'html.parser')\n d_html = soup.findAll('d')\n for i in range(len(d_html)):\n int_arr.append(d_map[d_html[i].get_text()])\n int_str = ''.join(int_arr)\n print('svg int:', int_str)\n\ndef get_svg_float(html):\n float_arr = []\n html = html.replace('.1', '.<d class=\"num\">1</d>')\n html = html.replace('1.', '<d class=\"num\">1</d>.')\n soup = BeautifulSoup(html, 'html.parser')\n d_html = soup.findAll('d')\n for i in range(len(d_html)):\n float_arr.append(d_map[d_html[i].get_text()])\n float_str = '.'.join(float_arr)\n print('svg float:', float_str)\n\nget_svg_int(json.loads(result.text)['avgPrice'])\nget_svg_int(json.loads(result.text)['defaultReviewCount'])\nget_svg_float(json.loads(result.text)['shopRefinedScoreValueList'][0])\nget_svg_float(json.loads(result.text)['shopRefinedScoreValueList'][1])\nget_svg_float(json.loads(result.text)['shopRefinedScoreValueList'][2])\n\n\n\navg_price_arr = []\nreview_count_arr = []\nflavor_score_arr = []\nsurrounding_score_arr = []\nservice_score_arr = []\n\n\navg_price_html = json.loads(result.text)['avgPrice']\navg_price_html = avg_price_html.replace('1<d', '<d class=\"num\">1</d><d')\navg_price_html = avg_price_html.replace('d>1<d', 'd><d class=\"num\">1</d><d')\navg_price_html = avg_price_html.replace('d>1', 'd><d class=\"num\">1</d>')\nsoup = BeautifulSoup(avg_price_html, 'html.parser')\nd_html = soup.findAll('d')\nfor i in range(len(d_html)):\n avg_price_arr.append(d_map[d_html[i].get_text()])\navg_price = ''.join(avg_price_arr)\nprint('avg price:', avg_price)\n\nreview_count_html = json.loads(result.text)['defaultReviewCount']\nreview_count_html = review_count_html.replace('1<d', '<d class=\"num\">1</d><d')\nreview_count_html = review_count_html.replace('d>1<d', 'd><d class=\"num\">1</d><d')\nreview_count_html = review_count_html.replace('d>1', 'd><d class=\"num\">1</d>')\nsoup = BeautifulSoup(review_count_html, 'html.parser')\nd_html = soup.findAll('d')\nfor i in range(len(d_html)):\n review_count_arr.append(d_map[d_html[i].get_text()])\nreview_count = ''.join(review_count_arr)\nprint('review count:', review_count)\n\nflavor_score_html = json.loads(result.text)['shopRefinedScoreValueList'][0]\nflavor_score_html = flavor_score_html.replace('.1', '.<d class=\"num\">1</d>')\nflavor_score_html = flavor_score_html.replace('1.', '<d class=\"num\">1</d>.')\nsoup = BeautifulSoup(flavor_score_html, 'html.parser')\nd_html = soup.findAll('d')\nfor i in range(len(d_html)):\n flavor_score_arr.append(d_map[d_html[i].get_text()])\nflavor_score = '.'.join(flavor_score_arr)\nprint('flavor score:', flavor_score)\n\nsurrounding_score_html = json.loads(result.text)['shopRefinedScoreValueList'][1]\nsurrounding_score_html = surrounding_score_html.replace('.1', '.<d class=\"num\">1</d>')\nsurrounding_score_html = surrounding_score_html.replace('1.', '<d class=\"num\">1</d>.')\nsoup = BeautifulSoup(surrounding_score_html, 'html.parser')\nd_html = soup.findAll('d')\nfor i in range(len(d_html)):\n surrounding_score_arr.append(d_map[d_html[i].get_text()])\nsurrounding_score = '.'.join(surrounding_score_arr)\nprint('surrounding score:', surrounding_score)\n\nservice_score_html = json.loads(result.text)['shopRefinedScoreValueList'][2]\nservice_score_html = service_score_html.replace('.1', '.<d class=\"num\">1</d>')\nservice_score_html = service_score_html.replace('1.', '<d class=\"num\">1</d>.')\nsoup = BeautifulSoup(service_score_html, 'html.parser')\nd_html = soup.findAll('d')\nfor i in range(len(d_html)):\n service_score_arr.append(d_map[d_html[i].get_text()])\nservice_score = '.'.join(service_score_arr)\nprint('service score:', service_score)\n" }, { "alpha_fraction": 0.7892791032791138, "alphanum_fraction": 0.8096118569374084, "avg_line_length": 24.761905670166016, "blob_id": "e3092941b06078ed759629a713fcdc1552eee120", "content_id": "39e5a56a62aa90d36388859c68bf6de42127ea61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/pydouyin/test_selenium_firefox.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\nfrom browsermobproxy import Server\nfrom urllib.parse import urlparse\n\n\nshare_id = \"83834087816\"\nshare_url = \"https://www.iesdouyin.com/share/user/\"+share_id\n\n\n# Firefox\noptions = webdriver.FirefoxOptions()\n#options.add_argument('-headless')\ndouyin_driver = webdriver.Firefox(firefox_options=options)\n\ndouyin_driver.get(share_url)\n\n#douyin_driver.quit()\n" }, { "alpha_fraction": 0.7238805890083313, "alphanum_fraction": 0.7370837926864624, "avg_line_length": 32.5, "blob_id": "6463bc61119066a7e4138d221da89ff3e4b314b2", "content_id": "8e07a5336b1a4ecd6d6ba03adf82ea82cd4df58e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 88, "num_lines": 52, "path": "/pyniustar/test_tab_switch.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from browsermobproxy import Server\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n#from selenium.webdriver.common.action_chains import ActionChains\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nfrom utils import have_a_rest\nimport random\nimport json\n\nserver = Server(\"./browsermob-proxy-2.1.4/bin/browsermob-proxy\", {'port': 9999})\nserver.start()\nproxy = server.create_proxy()\nurl = urlparse(proxy.proxy).path\n\nchrome_options = Options()\nchrome_options.set_headless(headless=True)\nchrome_options.add_argument(\"--proxy-server={0}\".format(url))\ndouyin_driver = webdriver.Chrome(options=chrome_options)\n\n\nshort_url = 'http://v.douyin.com/H3nPbo/'\ndouyin_driver.get(short_url)\n\nhave_a_rest(random.randrange(3, 5, 1))\nsoup = BeautifulSoup(douyin_driver.page_source, 'html.parser')\nuser_id = soup.find('span', class_='focus-btn')['data-id']\nprint(user_id)\n\n\nproxy.new_har(\"load_post_page\", options={'captureHeaders': True, 'captureContent':True})\n\npost_tab = douyin_driver.find_element_by_css_selector(\"div.user-tab.tab.get-list\")\n#post_tab = douyin_driver.find_element_by_class_name('user-tab')\n#ActionChains(douyin_driver).click(post_tab).perform()\ndouyin_driver.execute_script('arguments[0].click();', post_tab)\nhave_a_rest(random.randrange(3, 5, 1))\n\n# Process Response Content\nhar_return = json.loads(json.dumps(proxy.har, ensure_ascii=False))\nentries = har_return['log']['entries']\nfor i in range(len(entries)):\n if '/web/api/v2/aweme/post/' in entries[i]['request']['url']:\n print(entries[i]['request']['url'])\n result = json.loads(entries[i]['response']['content']['text'])\n\nprint(result)\n\n\nhave_a_rest(random.randrange(10, 20, 1))\nserver.stop()\ndouyin_driver.quit()\n" }, { "alpha_fraction": 0.6317327618598938, "alphanum_fraction": 0.6359081268310547, "avg_line_length": 30.933332443237305, "blob_id": "36a14cf0125e634fee84c7abf2535209800f14a3", "content_id": "1376318a7739372ab172fa678b0bce9c9a5a8c26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2467, "license_type": "no_license", "max_line_length": 100, "num_lines": 75, "path": "/pylvyuan/pylvyuan.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\n# 获取全部省份\nresult = requests.get('http://www.luyuan.cn/index.php/service.html')\nservice_html = result.text\nsoup = BeautifulSoup(service_html, 'html.parser')\nsel_region_html = soup.find(id=\"sel_region\").findAll('option')\nprovince_code_arr = []\nprovince_raws = []\nfor i in range(1, len(sel_region_html)):\n province_name = sel_region_html[i].string\n province_code = sel_region_html[i]['value']\n province_code_arr.append(province_code)\n\n province_one = [\n province_code,\n province_name,\n ]\n province_raws.append(province_one)\n\nprovince_df = pd.DataFrame(province_raws, columns=['province code', 'province name'])\nprovince_df.to_csv('lvyuan_province.csv')\nprint(province_df)\n\n# 遍历省份-获取每个省份的全部城市ID\ncity_code_arr = []\ncity_raws = []\nfor i in range(len(province_code_arr)):\n province_code = province_code_arr[i]\n result = requests.get('http://www.luyuan.cn/index.php/mapdealer/change_resion/%s'%province_code)\n region_html = result.text\n soup = BeautifulSoup(region_html, 'html.parser')\n option_html = soup.findAll('option')\n for i in range(len(option_html)):\n city_name = option_html[i].string\n city_code = option_html[i]['value']\n city_code_arr.append(city_code)\n\n city_one = [\n province_code,\n city_code,\n city_name,\n ]\n city_raws.append(city_one)\n\ncity_df = pd.DataFrame(city_raws, columns=['province code', 'city code', 'city name'])\ncity_df.to_csv('lvyuan_city.csv')\nprint(city_df)\n\n# 遍历城市-获取每个城市ID对应的门店\ndata_raws = []\n#city_code_arr = [110100]\nfor i in range(len(city_code_arr)):\n city_code = city_code_arr[i]\n result = requests.get('http://www.luyuan.cn/index.php/mapdealer/jxs_resion/%s/0'%city_code)\n jxs_html = result.text\n soup = BeautifulSoup(jxs_html, 'html.parser')\n li_html = soup.findAll('li')\n for i in range(len(li_html)-1):\n data_one = [\n city_code,\n li_html[i]['data-title'],\n li_html[i]['data-address'],\n li_html[i]['data-telphone'],\n li_html[i]['data-jin'],\n li_html[i]['data-wei'],\n ]\n data_raws.append(data_one)\nlabel = ['cityid', 'title', 'address', 'mobile', 'longitude', 'latitude']\nstore_df = pd.DataFrame(data_raws, columns=label)\nstore_df.to_csv('lvyuan.csv')\nprint(store_df)\n" }, { "alpha_fraction": 0.5320497751235962, "alphanum_fraction": 0.586757481098175, "avg_line_length": 30.08759117126465, "blob_id": "6db4be7b817b43a06838597a4ad5120c86ace9ae", "content_id": "83cb09a2627350b0bd11bd02a0d3c5e6853b89ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4283, "license_type": "no_license", "max_line_length": 156, "num_lines": 137, "path": "/pydouyin/test_svg_num.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\nimport pandas as pd\n\nnum_map = {\n #0\n ' \\ue603 ':'0',\n ' \\ue60d ':'0',\n ' \\ue616 ':'0',\n #1\n ' \\ue602 ':'1',\n ' \\ue60e ':'1',\n ' \\ue618 ':'1',\n #2\n ' \\ue605 ':'2',\n ' \\ue610 ':'2',\n ' \\ue617 ':'2',\n #3\n ' \\ue604 ':'3',\n ' \\ue611 ':'3',\n ' \\ue61a ':'3',\n #4\n ' \\ue606 ':'4',\n ' \\ue60c ':'4',\n ' \\ue619 ':'4',\n #5\n ' \\ue607 ':'5',\n ' \\ue60f ':'5',\n ' \\ue61b ':'5',\n #6\n ' \\ue608 ':'6',\n ' \\ue612 ':'6',\n ' \\ue61f ':'6',\n #7\n ' \\ue60a ':'7',\n ' \\ue613 ':'7',\n ' \\ue61c ':'7',\n #8\n ' \\ue60b ':'8',\n ' \\ue614 ':'8',\n ' \\ue61d ':'8',\n #9\n ' \\ue609 ':'9',\n ' \\ue615 ':'9',\n ' \\ue61e ':'9',\n}\n\ndef get_post_num(post_num_html):\n post_num_arr = []\n for i in range(len(post_num_html.contents)):\n post_num_content = post_num_html.contents[i]\n if post_num_content == ' ': continue\n if post_num_content != '.' and post_num_content != 'w ':\n post_num_arr.append(num_map[post_num_content.get_text()])\n else:\n post_num_arr.append(post_num_content)\n return ''.join(post_num_arr)\n\ndef get_like_num(like_num_html):\n like_num_arr = []\n for i in range(len(like_num_html.contents)):\n like_num_content = like_num_html.contents[i]\n if like_num_content == ' ': continue\n if like_num_content != '.' and like_num_content != 'w ':\n like_num_arr.append(num_map[like_num_content.get_text()])\n else:\n like_num_arr.append(like_num_content)\n return ''.join(like_num_arr)\n\ndef get_focus_num(focus_num_html):\n focus_num_arr = []\n for i in range(len(focus_num_html.contents)):\n focus_num_content = focus_num_html.contents[i]\n if focus_num_content == ' ': continue\n if focus_num_content != '.' and focus_num_content != 'w ':\n focus_num_arr.append(num_map[focus_num_content.get_text()])\n else:\n focus_num_arr.append(focus_num_content)\n return ''.join(focus_num_arr)\n\ndef get_follow_num(follow_num_html):\n follow_num_arr = []\n for i in range(len(follow_num_html.contents)):\n follow_num_content = follow_num_html.contents[i]\n if follow_num_content == ' ': continue\n if follow_num_content != '.' and follow_num_content != 'w ':\n follow_num_arr.append(num_map[follow_num_content.get_text()])\n else:\n follow_num_arr.append(follow_num_content)\n return ''.join(follow_num_arr)\n\ndef get_digg_num(digg_num_html):\n digg_num_arr = []\n for i in range(len(digg_num_html.contents)):\n digg_num_content = digg_num_html.contents[i]\n if digg_num_content == ' ': continue\n if digg_num_content != '.' and digg_num_content != 'w ':\n digg_num_arr.append(num_map[digg_num_content.get_text()])\n else:\n digg_num_arr.append(digg_num_content)\n return ''.join(digg_num_arr)\n\n\n#分享ID\nshare_id = \"83834087816\" #6722315998835330318\n#share_id = \"68544493839\" #6720499432518929672\n#share_id = \"94417447197\" #6722733418410560772\nshare_url = \"https://www.iesdouyin.com/share/user/\"+share_id\nheader = {\n \"User-Agent\":\"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1\"\n}\nresponse = requests.get(url=share_url,headers=header)\naweme_post_html = response.text\n\nsoup = BeautifulSoup(aweme_post_html, 'html.parser')\npost_num_html = soup.find('div', class_='user-tab').find('span')\nlike_num_html = soup.find('div', class_='like-tab').find('span')\nfocus_num_html = soup.find('span', class_='focus').find('span')\nfollow_num_html = soup.find('span', class_='follower').find('span')\ndigg_num_html = soup.find('span', class_='liked-num').find('span')\n\n\npost_num = get_post_num(post_num_html)\nprint('作品:', post_num)\nlike_num = get_like_num(like_num_html)\nprint('喜欢:', like_num)\nfocus_num = get_focus_num(focus_num_html)\nprint('关注:', focus_num)\nfollow_num = get_follow_num(follow_num_html)\nprint('粉丝:', follow_num)\ndigg_num = get_digg_num(digg_num_html)\nprint('获赞:', digg_num)\n" }, { "alpha_fraction": 0.5524475574493408, "alphanum_fraction": 0.7342657446861267, "avg_line_length": 19.428571701049805, "blob_id": "5aa1d5dcadac9f0216047ecb9b99f0b5961acce5", "content_id": "f816fc846c9389b642c44c80eda50856c968d396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 143, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/pydouyin/requirements.txt", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "selenium==3.141.0\npandas==0.23.1\nemoji==0.5.3\nrequests==2.20.0\nbrowsermob_proxy==0.8.0\nbeautifulsoup4==4.8.0\nmysql_connector_repackaged==0.3.1\n" }, { "alpha_fraction": 0.6505681872367859, "alphanum_fraction": 0.6534090638160706, "avg_line_length": 31, "blob_id": "8c5b078f3bbfc6f80148ad36272a663fe38511d9", "content_id": "48152562ae04a33a14374dae7bcb27c7c9c31d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 79, "num_lines": 11, "path": "/pyxinri/split_coordinate.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nxinri_df = pd.read_csv('xinri.csv')\nxinri_df.columns=['id', 'title', 'address', 'coordinate', 'tel', 'p', 'c', 'd']\n\ncoord_df = xinri_df['coordinate'].str.split(',', expand=True)\ncoord_df.columns = ['longitude', 'latitude']\n\nxinri_df = pd.concat([xinri_df, coord_df], axis=1)\nxinri_df.to_csv('xinri_split_coord.csv')\nprint('Done')\n" }, { "alpha_fraction": 0.6836363673210144, "alphanum_fraction": 0.6872727274894714, "avg_line_length": 26.5, "blob_id": "4f6fb0838d7cb84947cf3b04be33c27b77012a42", "content_id": "c8029f0d61dc52c9b329860c28780751893eef31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/pyaima/split_location.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import pandas as pd\n\naima_df = pd.read_csv('aima.csv')\n\nlocation_df = aima_df['location'].str.split(',', expand=True)\nlocation_df.columns = ['longitude', 'latitude']\n\naima_df = pd.concat([aima_df, location_df], axis=1)\naima_df.to_csv('aima_split_location.csv')\nprint('Done')\n" }, { "alpha_fraction": 0.5759368538856506, "alphanum_fraction": 0.6035503149032593, "avg_line_length": 21.04347801208496, "blob_id": "84b52b55dbe14baf3ab06404da8d2dc36614e36d", "content_id": "1e8b53a50be9a3bac9af97ac2924f512641d9d0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/pyluckin/utils/common.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import random\nimport json\nimport requests\n\ndef random_ua():\n with open('./utils/fake_useragent_0.1.11.json', 'r') as f:\n fake_ua = json.load(f)\n \n browser = random.choice(list(fake_ua['randomize'].values()))\n ua = random.choice(fake_ua['browsers'][browser])\n\n return ua\n\ndef get_proxy():\n result = requests.get('http://127.0.0.1:5010/get')\n return result.text\n\ndef get_proxies():\n proxies = {\n \"http\": get_proxy(),\n \"https\": get_proxy()\n }\n return proxies\n" }, { "alpha_fraction": 0.5689381957054138, "alphanum_fraction": 0.576862096786499, "avg_line_length": 27.68181800842285, "blob_id": "a18bcc89592eafaa9acc5a51dab6d4a1d7f977f0", "content_id": "c3705814384ff3bcf4cb414c24573bc19e4ab60b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/pyluckin/test2.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nfrom utils.common import random_ua\nfrom utils.common import get_proxies\nimport time\n\ndef do_request(url, method='GET', params={}):\n result = {}\n ua = random_ua()\n headers = {'User-Agent': ua, 'Content-type': 'application/json'}\n\n print('当前请求的URL:%s'%url)\n print('当前使用的Method:%s'%method)\n print(params)\n print('当前使用的User-Agent:%s'%ua)\n\n for j in range(5):\n try:\n proxies = get_proxies()\n print('\\t第%d次尝试请求, 使用的Proxy:%s'%(j, proxies['http']))\n if method == 'GET':\n result = requests.get(url, headers=headers, proxies=proxies, timeout=5, params=params)\n elif method == 'POST':\n result = requests.post(url,headers=headers, proxies=proxies, timeout=5, data=params)\n except Exception as e:\n print(\"Error:\", e)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n continue\n\n print('\\t返回结果状态码:%d\\n' %result.status_code)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n\n if result.status_code != 200:\n continue\n\n break\n\n return result\n\nurl = \"http://www.dianping.com/\"\nresult = do_request(url, method='GET')\nprint(result.cookies.keys())\nprint(result.cookies.get_dict())\n" }, { "alpha_fraction": 0.6960784196853638, "alphanum_fraction": 0.6960784196853638, "avg_line_length": 17.545454025268555, "blob_id": "0707e5b0e604e5e81b064ecfb91129ed5fce2f89", "content_id": "b2e863974087f6872dcb17674201515318bab61f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/pydouyin/pytopic.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from dy_model import DYModel\nfrom dy_crawler import DYCrawler\n\ncrawler = DYCrawler()\nmodel = DYModel()\n\nprint(\"Crawler Start to Run..............\")\ncrawler.run_topic(model)\n\n\nprint(\"Crawler End to Run!\")\n" }, { "alpha_fraction": 0.5311594009399414, "alphanum_fraction": 0.5355072617530823, "avg_line_length": 31.85714340209961, "blob_id": "823650de193a0701cdb15cc1f7467e6e13ce1fdd", "content_id": "79caa9e8d36f7184a6c07333915a76dae3860b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 151, "num_lines": 42, "path": "/pyaima/pyamap.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport pandas as pd\n\nkey = ''\nkeywords = '爱玛电动车'\nresult = requests.get('https://restapi.amap.com/v3/place/text?key=%s&keywords=%s'%(key, keywords))\nresult = json.loads(result.text)\ncity_list = result['suggestion']['cities']\ndata_rows = []\nfor i in range(len(city_list)):\n adcode = city_list[i]['adcode']\n page = 0\n while True:\n page += 1\n result = requests.get('https://restapi.amap.com/v3/place/text?key=%s&keywords=%s&city=%s&page=%s&citylimit=true'%(key, keywords, adcode, page))\n result = json.loads(result.text)\n poi_list = result['pois']\n\n for i in range(len(poi_list)):\n data_one = [\n poi_list[i]['id'],\n poi_list[i]['name'],\n poi_list[i]['address'],\n poi_list[i]['location'],\n poi_list[i]['pname'],\n poi_list[i]['cityname'],\n poi_list[i]['adname'],\n poi_list[i]['tel'],\n poi_list[i]['type'],\n poi_list[i]['typecode'],\n ]\n data_rows.append(data_one)\n print(data_one)\n\n if len(poi_list) < 20: break\n\n\nlabel = [ 'id', 'name', 'address', 'location', 'pname', 'cityname', 'adname', 'tel', 'type', 'typecode', ]\nstore_df = pd.DataFrame(data_rows, columns=label)\nstore_df.to_csv('aima.csv')\nprint('Done!')\n" }, { "alpha_fraction": 0.6256434321403503, "alphanum_fraction": 0.6307908296585083, "avg_line_length": 29.52857208251953, "blob_id": "d03d79fd4bec13980e9b95987bd61862770317e0", "content_id": "aaf918a0f54719d6d1934a073b1d2855d295092a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2241, "license_type": "no_license", "max_line_length": 102, "num_lines": 70, "path": "/pyluckin/city.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport json\nfrom utils.common import random_ua\nfrom utils.common import get_proxies\nimport time\nimport pandas as pd\n\ndef do_request(url, method='GET', params={}):\n result = {}\n ua = random_ua()\n headers = {'User-Agent': ua, 'Content-type': 'application/json'}\n\n print('当前请求的URL:%s' %url)\n print('当前使用的Method:%s' %method)\n print('当前传递的参数', params)\n print('当前使用的User-Agent:%s' %ua)\n\n for j in range(5):\n try:\n proxies = get_proxies()\n print('\\t第%d次尝试请求, 使用的Proxy:%s'%(j, proxies['http']))\n if method == 'GET':\n result = requests.get(url, headers=headers, proxies=proxies, timeout=5, params=params)\n elif method == 'POST':\n result = requests.post(url,headers=headers, proxies=proxies, timeout=5, data=params)\n except Exception as e:\n print(\"Error:\", e)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n continue\n\n print('\\t返回结果状态码:%d\\n' %result.status_code)\n print('休息,休息一下...(5s)')\n time.sleep(5)\n\n if result.status_code != 200:\n continue\n\n break\n\n return result\n\nurl = \"https://www.dianping.com/ajax/citylist/getAllDomesticProvince\"\nresult = do_request(url, method='POST')\nprovinceList = json.loads(result.text)['provinceList']\ncity_df = pd.DataFrame()\nfor i in range(len(provinceList)):\n print('省份:%s\\t%s' %(provinceList[i]['provinceId'], provinceList[i]['provinceName']))\n\n provinceId = provinceList[i]['provinceId']\n params = json.dumps({\"provinceId\":provinceId})\n url = \"https://www.dianping.com/ajax/citylist/getDomesticCityByProvince\"\n result = do_request(url, method='POST', params=params)\n\n print(result.text)\n print(json.loads(result.text))\n print(json.loads(result.text)['cityList'])\n cityList = json.loads(result.text)['cityList']\n if len(cityList) == 0: continue\n\n city_df = city_df.append(pd.DataFrame(cityList))\n\n# Save City\ncity_df.to_csv('dianping_city.csv')\n\n# Save Province\nprovince_df = pd.DataFrame(provinceList)\nprovince_df.to_csv('dianping_province.csv')\n\nprint('Done!')\n" }, { "alpha_fraction": 0.6634730696678162, "alphanum_fraction": 0.6706587076187134, "avg_line_length": 32.400001525878906, "blob_id": "0aba189863c65eada706861c6731dcce23b66b03", "content_id": "2c44cdd2b0dfba2bc80ed96aac2ef69bc4d94a88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 86, "num_lines": 25, "path": "/pysciencenet/read_org.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport mysql.connector as sql\n\norg_db = sql.connect(host='127.0.0.1', database='crawler', user='root')\norg_cursor = org_db.cursor(dictionary=True)\n\norg_a_df = pd.read_csv('org_a.csv')\norg_a_arr = org_a_df.values.tolist()\nprint(org_a_arr)\nsql = \"INSERT INTO science_net_org (name, city, dept, level) VALUES (%s, %s, %s, 'A')\"\norg_cursor.executemany(sql, org_a_arr)\n\norg_b_df = pd.read_csv('org_b.csv')\norg_b_arr = org_b_df.values.tolist()\nprint(org_b_arr)\nsql = \"INSERT INTO science_net_org (name, city, dept, level) VALUES (%s, %s, %s, 'B')\"\norg_cursor.executemany(sql, org_b_arr)\n\norg_c_df = pd.read_csv('org_c.csv')\norg_c_arr = org_c_df.values.tolist()\nprint(org_c_arr)\nsql = \"INSERT INTO science_net_org (name, city, dept, level) VALUES (%s, %s, %s, 'C')\"\norg_cursor.executemany(sql, org_c_arr)\n\norg_db.commit()\n" }, { "alpha_fraction": 0.332865834236145, "alphanum_fraction": 0.5507246255874634, "avg_line_length": 21.28125, "blob_id": "111165fc5d8639dd9915b32ec683b50ab76d6eb1", "content_id": "a7cd5d008d055389858ae1d6323d73d23a86204f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "no_license", "max_line_length": 72, "num_lines": 96, "path": "/pyyadi/combine_dist.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n# Read From CSV\nstore_df = pd.read_csv('yadi.csv')\ndist_df = pd.read_csv('dist.csv')\n\n# Drop Duplicated\nstore_df = store_df.drop_duplicates('code')\n\n# Replace C/D\nto_replace = {\n 'ccode': {\n 150122: 150100,\n 150802: 150800,\n 340181: 340100,\n 341881: 341800,\n 653101: 653100,\n 653201: 653200,\n 652901: 652900,\n 652801: 652800,\n 652201: 652200,\n 422801: 422800,\n 222403: 222400,\n 360403: 360400,\n 360681: 360600,\n 441702: 441700,\n 500116: 500100,\n 410482: 410400,\n 542600: 540400,\n },\n 'dcode': {\n 110228: 110118,\n 110229: 110119,\n 120221: 120117,\n 120223: 120118,\n 130323: 130306,\n 130621: 130607,\n 130622: 130608,\n 130625: 130609,\n 210282: 210214,\n 360400: 360403,\n 360600: 360681,\n 542621: 540400,\n }\n}\nstore_df = store_df.replace(to_replace)\n\n# P/C/D DataFrame\np_df = dist_df.drop_duplicates('p').set_index('pcode')['p']\nc_df = dist_df.drop_duplicates('c').set_index('ccode')['c']\nd_df = dist_df.drop_duplicates('d').set_index('dcode')['d']\n\n# Append C/D DataFrame\nextra = [\n [419001, '济源市'],\n [429004, '仙桃市'],\n [429005, '潜江市'],\n [429006, '天门市'],\n [469001, '五指山市'],\n [469002, '琼海市'],\n [469003, '儋州市'],\n [469005, '文昌市'],\n [469006, '万宁市'],\n [469007, '东方市'],\n [469022, '屯昌县'],\n [469023, '澄迈县'],\n [469024, '临高县'],\n [469026, '昌江黎族自治县'],\n [469027, '乐东黎族自治县'],\n [469028, '陵水黎族自治县'],\n [469029, '保亭黎族苗族自治县'],\n [469030, '琼中黎族苗族自治县'],\n [540400, '林芝市'],\n]\nfor i in range(len(extra)):\n l = extra[i][0]\n v = extra[i][1]\n c_df.loc[l] = v\n d_df.loc[l] = v\n\n\n# Combine\nstore_df = store_df.join(p_df, 'pcode')\nstore_df = store_df.join(c_df, 'ccode')\nstore_df = store_df.join(d_df, 'dcode')\n\n# Split\ncoord_df = store_df['gps'].str.split(',', expand=True).drop(columns=[2])\ncoord_df.columns = ['longitude', 'latitude']\n\n# Concat\nstore_df = pd.concat([store_df, coord_df], axis=1)\n\nstore_df.to_csv('yadi_with_dist.csv')\nprint('Done')\n" }, { "alpha_fraction": 0.5528846383094788, "alphanum_fraction": 0.557692289352417, "avg_line_length": 38.35135269165039, "blob_id": "5f0cd89dec2ad7e02a8110d35d34f956d3a58768", "content_id": "b20f2239baade60a8056d4faa0c886f13c4b6ff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1456, "license_type": "no_license", "max_line_length": 141, "num_lines": 37, "path": "/pysciencenet/read_code.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nimport mysql.connector as sql\n\ncode_db = sql.connect(host='127.0.0.1', database='crawler', user='root')\ncode_cursor = code_db.cursor(dictionary=True)\n\nf = open(\"applycode.json\",'r',encoding = 'utf-8')\ncode_json = f.read()\ncode_dict = json.loads(code_json)\nf.close()\n\ndata = []\nfor i in range(len(code_dict)):\n subject_a = code_dict[i]['label']\n subject_a_k = code_dict[i]['key']\n for j in range(len(code_dict[i]['children'])):\n subject_b = code_dict[i]['children'][j]['label']\n subject_b_k = code_dict[i]['children'][j]['key']\n if 'children' in code_dict[i]['children'][j]:\n for k in range(len(code_dict[i]['children'][j]['children'])):\n subject_c = code_dict[i]['children'][j]['children'][k]['label']\n subject_c_k = code_dict[i]['children'][j]['children'][k]['key']\n data_one = [\n subject_a, subject_b, subject_c,\n subject_a_k, subject_b_k, subject_c_k,\n ]\n print(data_one)\n data.append(data_one)\n else:\n data_one = [subject_a, subject_b, '', subject_a_k, subject_b_k, '']\n print(data_one)\n data.append(data_one)\n\nsql = \"INSERT INTO science_net_code (subject_a, subject_b, subject_c, subject_a_k, subject_b_k, subject_c_k) VALUES (%s, %s, %s, %s, %s, %s)\"\ncode_cursor.executemany(sql, data)\ncode_db.commit()\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.557823121547699, "avg_line_length": 23.5, "blob_id": "8bc393ca68b209a1a3ec9cd0da1be765b0663e5c", "content_id": "09843d6764d923876b113ff111369e80a805e2f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 50, "num_lines": 24, "path": "/pyxinri/data_js_to_csv.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import execjs\nimport demjson\nimport pandas as pd\n\nf = open(\"data.js\",'r',encoding = 'utf-8')\ndist_js = f.read()\ndist = demjson.decode(dist_js)\ndist_arr = []\n\nfor k1 in dist[86]:\n if k1 not in dist: continue\n p = dist[86][k1]\n for k2 in dist[k1]:\n if k2 not in dist: continue\n c = dist[k1][k2]\n for k3 in dist[k2]:\n d = dist[k2][k3]\n data_one = [k1, p, k2, c, k3, d]\n dist_arr.append(data_one)\n\nlabel = ['pcode', 'p', 'ccode', 'c', 'dcode', 'd']\ndf = pd.DataFrame(dist_arr, columns=label)\ndf.to_csv('dist.csv')\nprint('Done!')\n" }, { "alpha_fraction": 0.5488873720169067, "alphanum_fraction": 0.5569791197776794, "avg_line_length": 31.2391300201416, "blob_id": "c1ebda65f97435d4222f728ca2ea78ae2919c5fc", "content_id": "501d65837390b93667d4117408e5fc8761e501f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 105, "num_lines": 46, "path": "/pyniustar/utils.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import time\nimport sys, getopt\n\ndef have_a_rest(sec):\n while sec > 0:\n msg = \"Have a rest, and we will fight again in: %d seconds\" %(sec)\n print(msg, '\\r', end=\"\")\n time.sleep(1)\n\n # below two lines is to replace the printed line with white spaces\n # this is required for the case when the number of digits in timer reduces by 1 i.e. from\n # 10 secs to 9 secs, if we dont do this there will be extra prints at the end of the printed line\n # as the number of chars in the newly printed line is less than the previous\n remove_msg = ' ' * len(msg)\n print( remove_msg, '\\r', end=\"\")\n\n # decrement timer by 1 second\n sec -= 1\n print(\"Let's Fight\", '!'*10)\n return\n\ndef get_params():\n argv = sys.argv[1:]\n uids = ''\n last_record_time = ''\n try:\n opts, args = getopt.getopt(argv,\"hu:l:\",[\"uids=\", \"last_record_time=\"])\n except getopt.GetoptError:\n print ('pypost.py -u <user_id_list> -l <last_record_time>')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print ('pypost.py -u <user_id_list> -l <last_record_time>')\n sys.exit()\n elif opt in (\"-u\", \"--uids\"):\n uids = arg\n elif opt in (\"-l\", \"--last_record_time\"):\n last_record_time = arg\n\n\n if last_record_time == '':\n print ('pypost.py -u <user_id_list> -l <last_record_time>')\n sys.exit()\n\n return uids, last_record_time\n" }, { "alpha_fraction": 0.5666502118110657, "alphanum_fraction": 0.5717580914497375, "avg_line_length": 46.78740310668945, "blob_id": "d240dc48f785cb6c2ff709f037db390a442f21f8", "content_id": "1e52ddc87db24462a1e6546e304eb0fd069ab798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6081, "license_type": "no_license", "max_line_length": 615, "num_lines": 127, "path": "/pyniustar/niustar_model.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import mysql.connector as sql\nimport time\nimport emoji\nimport sys\n\nclass NiuStarModel:\n\n def __init__(self):\n self.dy_db = sql.connect(host='127.0.0.1', database='crawler', user='root')\n self.dy_cursor = self.dy_db.cursor(dictionary=True)\n self.record_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n self.user_id = ''\n self.min_aweme_id = ''\n\n def set_id(self, user_id, min_aweme_id):\n self.user_id = user_id\n self.min_aweme_id = min_aweme_id\n\n def add_user_statistics(self, data):\n one_row = (\n self.user_id,\n self.min_aweme_id,\n self.record_time,\n data['post_count'],\n data['like_count'],\n data['focus_count'],\n data['follow_count'],\n data['digg_count'],\n )\n \n sql = \"INSERT INTO niustar_user_statistics (user_id, min_aweme_id, record_time, post_count, like_count, focus_count, follow_count, digg_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n self.dy_cursor.execute(sql, one_row)\n self.dy_db.commit()\n return True\n\n def add_post(self, aweme_list):\n post_arr = []\n for i in range(len(aweme_list)):\n post_row = (\n self.user_id,\n aweme_list[i]['aweme_id'],\n aweme_list[i]['aweme_type'],\n emoji.demojize(aweme_list[i]['desc']),\n aweme_list[i]['video']['vid'],\n aweme_list[i]['video']['cover']['url_list'][0],\n aweme_list[i]['video']['download_addr']['url_list'][0],\n aweme_list[i]['video']['download_addr']['url_list'][1],\n aweme_list[i]['video']['play_addr']['url_list'][0],\n aweme_list[i]['video']['play_addr']['url_list'][1],\n aweme_list[i]['video']['duration'],\n aweme_list[i]['video']['ratio'],\n aweme_list[i]['video']['width'],\n aweme_list[i]['video']['height'],\n )\n post_arr.append(post_row)\n\n sql = \"INSERT INTO niustar_post (user_id, aweme_id, aweme_type, `desc`, vid, cover_url, download_addr_0, download_addr_1, play_addr_0, play_addr_1, duration, ratio, width, height) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE aweme_type=VALUES(aweme_type), `desc`=VALUES(`desc`), vid=VALUES(vid), cover_url=VALUES(cover_url), download_addr_0=VALUES(download_addr_0), download_addr_1=VALUES(download_addr_1), play_addr_0=VALUES(play_addr_0), play_addr_1=VALUES(play_addr_1), duration=VALUES(duration), ratio=VALUES(ratio), width=VALUES(width), height=VALUES(height)\"\n self.dy_cursor.executemany(sql, post_arr)\n self.dy_db.commit()\n return True\n\n def add_post_statistics(self, aweme_list):\n post_statistics_arr = []\n for i in range(len(aweme_list)):\n post_statistics_row = (\n self.user_id,\n aweme_list[i]['aweme_id'],\n self.record_time,\n aweme_list[i]['statistics']['play_count'],\n aweme_list[i]['statistics']['comment_count'],\n aweme_list[i]['statistics']['digg_count'],\n aweme_list[i]['statistics']['forward_count'],\n aweme_list[i]['statistics']['share_count'],\n )\n post_statistics_arr.append(post_statistics_row)\n\n sql = \"INSERT INTO niustar_post_statistics (user_id, aweme_id, record_time, play_count, comment_count, digg_count, forward_count, share_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n self.dy_cursor.executemany(sql, post_statistics_arr)\n self.dy_db.commit()\n return True\n\n def get_user_list(self, uid_arr = []):\n sql = \"SELECT * FROM niustar_user WHERE deleted_at IS NULL\"\n\n if uid_arr:\n sql += \" AND user_id IN (%s)\" % \",\".join(['%s'] * len(uid_arr))\n\n self.dy_cursor.execute(sql, uid_arr)\n user_list = self.dy_cursor.fetchall() # fetchall() 获取所有记录\n return user_list\n\n def add_user(self, user):\n\n sql = \"INSERT INTO niustar_user (user_id, username, short_url, min_aweme_id, avatar_url, gid, douyin_account, wechat_account, realname) VALUES (%(user_id)s, %(username)s, %(short_url)s, %(min_aweme_id)s, %(avatar_url)s, %(gid)s, %(douyin_account)s, %(wechat_account)s, %(realname)s) ON DUPLICATE KEY UPDATE username=VALUES(username), short_url=VALUES(short_url), avatar_url=VALUES(avatar_url)\"\n self.dy_cursor.execute(sql, user)\n self.dy_db.commit()\n return True\n\n # Find Least Recently aweme_id\n def find_lr_aweme_id(self, user_id, last_record_time):\n sql = \"SELECT aweme_id FROM niustar_post_statistics WHERE user_id = '%s' AND record_time = '%s' ORDER BY aweme_id DESC limit 0,1\" % (user_id, last_record_time)\n self.dy_cursor.execute(sql)\n post = self.dy_cursor.fetchone()\n if post:\n return post['aweme_id']\n else:\n user = {'user_id':user_id, 'min_aweme_id':0, 'record_time':last_record_time}\n self.add_user_retry_log(user)\n return 0\n\n def check_post(self):\n sql = \"SELECT aweme_id FROM niustar_post_statistics WHERE user_id = '%s' AND record_time = '%s' ORDER BY aweme_id ASC limit 0,1\" % (self.user_id, self.record_time)\n self.dy_cursor.execute(sql)\n post = self.dy_cursor.fetchone()\n\n if post is None and self.min_aweme_id != \"0\" or post is not None and post['aweme_id'] > self.min_aweme_id:\n user = {'user_id':self.user_id, 'min_aweme_id':self.min_aweme_id, 'record_time':self.record_time}\n self.add_user_retry_log(user)\n\n return\n\n def add_user_retry_log(self, log):\n log['traceback'] = sys._getframe(1).f_code.co_name\n sql = \"INSERT INTO niustar_user_retry (user_id, min_aweme_id, record_time, traceback) VALUES (%(user_id)s, %(min_aweme_id)s, %(record_time)s, %(traceback)s)\"\n self.dy_cursor.execute(sql, log)\n self.dy_db.commit()\n return True\n" }, { "alpha_fraction": 0.6123085021972656, "alphanum_fraction": 0.6857959032058716, "avg_line_length": 35.32075500488281, "blob_id": "66112aed2b0c75a6c4a222b4239fbd23633a0f8b", "content_id": "4ad3e771ea69147b91af6c7a38dea3ac2ac99380", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3943, "license_type": "no_license", "max_line_length": 864, "num_lines": 106, "path": "/pydouyin/test_selenium_signature.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\nimport pandas as pd\n\n\n\n#分享ID\nshare_id = \"83834087816\"\nshare_url = \"https://www.iesdouyin.com/share/user/\"+share_id\nheader = {\n \"User-Agent\":\"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1\"\n}\nresponse = requests.get(url=share_url,headers=header)\n\n#dytk 和tac的正则表达式\ndytk_search = re.compile(r\"dytk: '(.*?)'\")\ntac_search = re.compile(r\"<script>tac=(.*?)</script>\")\n\n#处理获取dytk 和tac\ndytk = re.search(dytk_search,response.text).group(1)\ntac = re.search(tac_search,response.text).group(1)\n\n#tac封装成为js的格式\ntac = \"var tac=\"+tac+\";\"\n\n\n# html页面的编写合成 header + tac+ foot\nwith open(\"head.txt\") as f1:\n f1_read = f1.read()\n\nwith open(\"foot.txt\") as f2:\n f2_read = f2.read().replace(\"&&&&\", share_id)\n\nwith open(\"sign.html\",\"w\") as f_w:\n f_w.write(f1_read+\"\\n\"+tac+\"\\n\"+f2_read)\n\nchrome_options = Options()\nchrome_options.set_headless(headless=True)\ndouyin_driver = webdriver.Chrome(options=chrome_options)\ndouyin_driver.get(\"file:///Users/shannon/code/crawler/pydouyin/sign.html\")\n\nsignature = douyin_driver.title\ndouyin_driver.quit()\n\nprint(dytk)\nprint(tac)\nprint(signature)\n\n#curl 'https://www.iesdouyin.com/web/api/v2/aweme/post/?user_id=83834087816&sec_uid=&count=21&max_cursor=0&aid=1128&_signature=QvkTehAfH423nmOdOxyvrUL5E2&dytk=2f1f312f7325deb8244f711b1e230ae4' -H 'pragma: no-cache' -H 'cookie: _ga=GA1.2.868563206.1567557111; _gid=GA1.2.1730599626.1567661031; tt_webid=6733126270223779336; _ba=BA0.2-20190905-5199e-HPRYAtdrbOOHFZzjKX61' -H 'accept-encoding: gzip, deflate, br' -H 'accept-language: en,zh;q=0.9' -H 'user-agent: Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1' -H 'sec-fetch-mode: cors' -H 'accept: application/json' -H 'cache-control: no-cache' -H 'authority: www.iesdouyin.com' -H 'x-requested-with: XMLHttpRequest' -H 'sec-fetch-site: same-origin' -H 'referer: https://www.iesdouyin.com/share/user/83834087816' --compressed\n\nmovie_url = \"https://www.iesdouyin.com/web/api/v2/aweme/post/?user_id=\" + share_id + \"&sec_uid=&count=21&max_cursor=0&aid=1128&_signature=\" + signature + \"&dytk=\" + dytk\nprint(movie_url)\n\nheader = {\n \"User-Agent\":\"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1\",\n \"pragma\": \"no-cache\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"accept\": \"application/json\",\n \"cache-control\": \"no-cache\",\n \"authority\": \"www.iesdouyin.com\",\n \"referer\": \"https://www.iesdouyin.com/share/user/83834087816\",\n \"x-requested-with\": \"XMLHttpRequest\"\n}\n\nretry_count = 0\nwhile True:\n retry_count += 1\n print(retry_count)\n\n movie_reponse = requests.get(url=movie_url,headers=header)\n aweme_list = json.loads(movie_reponse.text)[\"aweme_list\"]\n if aweme_list == []:\n time.sleep(1)\n continue\n else:\n print(aweme_list)\n break\n #for item in aweme_list:\n # video_url = item[\"video\"][\"play_addr\"][\"url_list\"][0]\n # video_response = requests.get(url=video_url,headers=header)\n # with open(\"douyin.mp4\",\"wb\") as v:\n # #不能使用video_response.text,必须使用content才可以把内容写进去\n # v.write(video_response.content)\n # break\n\n\n\n\n\n#f = open(\"third.js\",'r',encoding = 'utf-8')\n#js = f.read()\n#f.close()\n#\n#driver=webdriver.Chrome()\n#driver.execute_script(js)\n#result1 = driver.execute_script(\"return _bytedAcrawler.sign(arguments[0])\", \"110677980134\")\n#result2 = driver.execute_script(\"return _bytedAcrawler.sign(110677980134)\")\n#driver.close()\n#\n#print(result1)\n#print(result2)\n\n" }, { "alpha_fraction": 0.737726092338562, "alphanum_fraction": 0.7403100728988647, "avg_line_length": 27.66666603088379, "blob_id": "23554f90f3d2dda84a5513beca18968f25cd0b88", "content_id": "42aa9516e8d598e2b0490ce5073e4f918736e84e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 78, "num_lines": 27, "path": "/pydouyin/pyshorturl.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nfrom dy_model import DYModel\n\nshort_url = sys.argv[1]\n\nchrome_options = Options()\nchrome_options.set_headless(headless=True)\ndouyin_driver = webdriver.Chrome(options=chrome_options)\ndouyin_driver.get(short_url)\n\nsoup = BeautifulSoup(douyin_driver.page_source, 'html.parser')\nuser = {\n \"user_id\": soup.find('span', class_='focus-btn')['data-id'],\n \"username\": soup.find('p', class_='nickname').text,\n \"avatar_url\": soup.find('img', class_='avatar')['src'],\n \"short_url\": short_url,\n}\n\nmodel = DYModel()\nmodel.add_user(user)\nprint(user)\n\ndouyin_driver.quit()\n" }, { "alpha_fraction": 0.5304238200187683, "alphanum_fraction": 0.6101552844047546, "avg_line_length": 32.09722137451172, "blob_id": "32b996c551ce24c984cb8e5793d700dc233bfdc4", "content_id": "c5afdaf46477b1bbd92b3005476528364db06207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2383, "license_type": "no_license", "max_line_length": 333, "num_lines": 72, "path": "/pyxinri/pyxinri.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import execjs\nimport demjson\nimport requests\nfrom urllib.parse import unquote\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Cookie\": \"UM_distinctid=16bfe8909f9a4b-0afcf6b805bab6-37647e05-1fa400-16bfe8909fa4ab; Hm_lvt_bab9524cb72f32892de60fd48af5f4aa=1563343522; CNZZDATA1271242693=749532626-1563338602-https%253A%252F%252Fwww.google.com%252F%7C1563591102; ASP.NET_SessionId=g1nzaaabsbnmk3bkomkmclbq; Hm_lpvt_bab9524cb72f32892de60fd48af5f4aa=1563591954\"\n}\n\nf = open(\"TDES.js\",'r',encoding = 'utf-8')\njs = f.read()\nf.close()\nctx = execjs.compile(js)\n\ndef encrpyt_data(p, c, d):\n p = ctx.call(\"TDES.encrypt\", p)\n c = ctx.call(\"TDES.encrypt\", c)\n d = ctx.call(\"TDES.encrypt\", d)\n return p, c, d\n\ndef name_decode(s):\n return s.replace('%', '\\\\') \\\n .encode('utf-8') \\\n .decode('unicode_escape')\n\ndef get_data(p_name, c_name, d_name):\n p, c, d = encrpyt_data(p_name, c_name, d_name)\n data = {'cmd':'getDotList', 'p':p, 'c':c, 'd':d}\n result = requests.post('http://www.xinri.com/Ajax/AjaxHandler_XRDDC.ashx', data = data, headers=headers)\n info = unquote(demjson.decode(result.text)['info'], encoding='utf-8')\n soup = BeautifulSoup(info, 'html.parser')\n dl_arr = soup.findAll('dl')\n data_rows = []\n for i in range(len(dl_arr)):\n if dl_arr[i]['data-title'] == '': continue\n data_one = [\n name_decode(dl_arr[i]['data-title']),\n name_decode(dl_arr[i]['data-address']),\n dl_arr[i]['data-point'],\n dl_arr[i]['data-tel'],\n p_name,\n c_name,\n d_name,\n ]\n data_rows.append(data_one)\n print(data_one)\n return data_rows\n\nf = open(\"data.js\",'r',encoding = 'utf-8')\ndist_js = f.read()\ndist = demjson.decode(dist_js)\ndist_arr = []\n\nfor k1 in dist[86]:\n if k1 not in dist: continue\n p = dist[86][k1]\n for k2 in dist[k1]:\n if k2 not in dist: continue\n c = dist[k1][k2]\n for k3 in dist[k2]:\n d = dist[k2][k3]\n dist_arr.extend(get_data(p, c, d))\n\ndf = pd.DataFrame.from_dict(dist_arr)\ndf.to_csv('xinri.csv')\nprint('Done!')\n" }, { "alpha_fraction": 0.6048387289047241, "alphanum_fraction": 0.6248247027397156, "avg_line_length": 34.209877014160156, "blob_id": "01684809ffefd670fc273af0dfeedb832823fb0a", "content_id": "5eafee9e6d1b94bb2b150636a91f2bf3b28a79f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "no_license", "max_line_length": 96, "num_lines": 81, "path": "/pyniustar/pyuser.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom browsermobproxy import Server\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\nfrom utils import have_a_rest\nimport random\nimport json\nfrom niustar_model import NiuStarModel\n\nmodel = NiuStarModel()\n\n# Load User\n#user_df = pd.read_excel('./niustar_user_20191101.xlsx')\n#user_df = pd.read_excel('./niustar_user_20191104.xlsx')\nuser_df = pd.read_excel('./niustar_user_20191105.xlsx')\n\n\nserver = Server(\"./browsermob-proxy-2.1.4/bin/browsermob-proxy\", {'port': 9999})\nserver.start()\nproxy = server.create_proxy()\nurl = urlparse(proxy.proxy).path\n\nchrome_options = Options()\nchrome_options.set_headless(headless=True)\nchrome_options.add_argument(\"--proxy-server={0}\".format(url))\ndouyin_driver = webdriver.Chrome(options=chrome_options)\n\nfor index, row in user_df.iterrows():\n\n short_url = row[2].strip()\n douyin_driver.get(short_url)\n\n # Process Response Content\n have_a_rest(random.randrange(3, 5, 1))\n soup = BeautifulSoup(douyin_driver.page_source, 'html.parser')\n\n music_active = soup.find('div', class_='music-tab tab active get-list')\n if music_active:\n proxy.new_har(\"load_post_page\", options={'captureHeaders': True, 'captureContent':True})\n post_tab = douyin_driver.find_element_by_css_selector(\"div.user-tab.tab.get-list\")\n douyin_driver.execute_script('arguments[0].click();', post_tab)\n have_a_rest(random.randrange(3, 5, 1))\n\n # Process Response Content\n har_return = json.loads(json.dumps(proxy.har, ensure_ascii=False))\n entries = har_return['log']['entries']\n for i in range(len(entries)):\n if '/web/api/v2/aweme/post/' in entries[i]['request']['url']:\n print(entries[i]['request']['url'])\n result = json.loads(entries[i]['response']['content']['text'])\n if len(result['aweme_list']) > 0:\n min_aweme_id = result['aweme_list'][0]['aweme_id']\n else:\n min_aweme_id = 0\n else:\n aweme_list = soup.findAll('li', class_='item goWork')\n if len(aweme_list) > 0:\n min_aweme_id = aweme_list[0]['data-id']\n else:\n min_aweme_id = 0\n\n user = {\n \"user_id\" : soup.find('span', class_='focus-btn')['data-id'],\n \"username\" : soup.find('p', class_='nickname').text.encode('utf-8'),\n \"min_aweme_id\" : min_aweme_id,\n \"avatar_url\" : soup.find('img', class_='avatar')['src'],\n \"short_url\" : short_url,\n 'gid' : row[0],\n 'douyin_account': row[1],\n 'wechat_account': row[4],\n 'realname' : row[3],\n }\n\n model.add_user(user)\n print(user)\n have_a_rest(random.randrange(5, 10, 1))\n\ndouyin_driver.quit()\nprint(\"Crawler End to Run!\")\n" }, { "alpha_fraction": 0.5620900988578796, "alphanum_fraction": 0.5671185255050659, "avg_line_length": 46.64583206176758, "blob_id": "96adc21ac58dc7b5667cba22e82f20609d3be2cc", "content_id": "88b50e2c09a4543d3140339ac2377f9dfe8c1b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4586, "license_type": "no_license", "max_line_length": 614, "num_lines": 96, "path": "/pydouyin/dy_model.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import mysql.connector as sql\nimport time\nimport emoji\n\nclass DYModel:\n\n def __init__(self):\n self.dy_db = sql.connect(host='127.0.0.1', database='crawler', user='root')\n self.dy_cursor = self.dy_db.cursor(dictionary=True)\n self.record_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n self.user_id = ''\n self.min_aweme_id = ''\n\n def set_id(self, user_id, min_aweme_id):\n self.user_id = user_id\n self.min_aweme_id = min_aweme_id\n\n def add_user_statistics(self, data):\n one_row = (\n self.user_id,\n self.record_time,\n data['post_count'],\n data['like_count'],\n data['focus_count'],\n data['follow_count'],\n data['digg_count'],\n )\n \n sql = \"INSERT INTO douyin_user_statistics (user_id, record_time, post_count, like_count, focus_count, follow_count, digg_count) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n self.dy_cursor.execute(sql, one_row)\n self.dy_db.commit()\n return True\n\n def add_post(self, aweme_list):\n post_arr = []\n for i in range(len(aweme_list)):\n post_row = (\n self.user_id,\n aweme_list[i]['aweme_id'],\n aweme_list[i]['aweme_type'],\n emoji.demojize(aweme_list[i]['desc']),\n aweme_list[i]['video']['vid'],\n aweme_list[i]['video']['cover']['url_list'][0],\n aweme_list[i]['video']['download_addr']['url_list'][0],\n aweme_list[i]['video']['download_addr']['url_list'][1],\n aweme_list[i]['video']['play_addr']['url_list'][0],\n aweme_list[i]['video']['play_addr']['url_list'][1],\n aweme_list[i]['video']['duration'],\n aweme_list[i]['video']['ratio'],\n aweme_list[i]['video']['width'],\n aweme_list[i]['video']['height'],\n )\n post_arr.append(post_row)\n\n sql = \"INSERT INTO douyin_post (user_id, aweme_id, aweme_type, `desc`, vid, cover_url, download_addr_0, download_addr_1, play_addr_0, play_addr_1, duration, ratio, width, height) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE aweme_type=VALUES(aweme_type), `desc`=VALUES(`desc`), vid=VALUES(vid), cover_url=VALUES(cover_url), download_addr_0=VALUES(download_addr_0), download_addr_1=VALUES(download_addr_1), play_addr_0=VALUES(play_addr_0), play_addr_1=VALUES(play_addr_1), duration=VALUES(duration), ratio=VALUES(ratio), width=VALUES(width), height=VALUES(height)\"\n self.dy_cursor.executemany(sql, post_arr)\n self.dy_db.commit()\n return True\n\n def add_post_statistics(self, aweme_list):\n post_statistics_arr = []\n for i in range(len(aweme_list)):\n post_statistics_row = (\n self.user_id,\n aweme_list[i]['aweme_id'],\n self.record_time,\n aweme_list[i]['statistics']['play_count'],\n aweme_list[i]['statistics']['comment_count'],\n aweme_list[i]['statistics']['digg_count'],\n aweme_list[i]['statistics']['forward_count'],\n aweme_list[i]['statistics']['share_count'],\n )\n post_statistics_arr.append(post_statistics_row)\n\n sql = \"INSERT INTO douyin_post_statistics (user_id, aweme_id, record_time, play_count, comment_count, digg_count, forward_count, share_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n self.dy_cursor.executemany(sql, post_statistics_arr)\n self.dy_db.commit()\n return True\n\n def get_user_list(self):\n self.dy_cursor.execute(\"SELECT * FROM douyin_user where deleted_at IS NULL\")\n user_list = self.dy_cursor.fetchall() # fetchall() 获取所有记录\n return user_list\n\n def add_user(self, user):\n\n sql = \"INSERT INTO douyin_user (user_id, username, short_url, avatar_url) VALUES (%(user_id)s, %(username)s, %(short_url)s, %(avatar_url)s) ON DUPLICATE KEY UPDATE username=VALUES(username), short_url=VALUES(short_url), avatar_url=VALUES(avatar_url)\"\n self.dy_cursor.execute(sql, user)\n self.dy_db.commit()\n return True\n\n def add_topic_post(self, post_list):\n sql = \"INSERT INTO douyin_topic_post (topic_name, aweme_id) VALUES (%(topic_name)s, %(aweme_id)s) ON DUPLICATE KEY UPDATE topic_name=VALUES(topic_name), aweme_id=VALUES(aweme_id)\"\n self.dy_cursor.executemany(sql, post_list)\n self.dy_db.commit()\n return True\n" }, { "alpha_fraction": 0.6188747882843018, "alphanum_fraction": 0.6188747882843018, "avg_line_length": 26.549999237060547, "blob_id": "6f1de7bc34c043557b1fdfa81cbd75c9b02149ad", "content_id": "7439993cb89fe9aad8155ce461de6503e709d1e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 59, "num_lines": 20, "path": "/pydouyin/pydouyin.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from dy_model import DYModel\nfrom dy_crawler import DYCrawler\n\ncrawler = DYCrawler()\nmodel = DYModel()\nuser_list = model.get_user_list()\n\nfor user in user_list:\n print(\"Crawler Start to Run..............\")\n print(\"username:\", user['username'])\n print(\"userid:\", user['user_id'])\n print(\"min_aweme_id:\", user['min_aweme_id'])\n print(\"short_url:\", user['short_url'])\n model.set_id(user['user_id'], user['min_aweme_id'])\n crawler.run(model)\n\n print(\"User \" + user['username'] + \" Finished........\")\n\n\nprint(\"Crawler End to Run!\")\n" }, { "alpha_fraction": 0.5140009522438049, "alphanum_fraction": 0.5244423151016235, "avg_line_length": 34.71186447143555, "blob_id": "6dc9643f35efa22018a504daab50317861ee58cb", "content_id": "9d2acb52e692907b8f41912c4a7143d80c70a312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2107, "license_type": "no_license", "max_line_length": 116, "num_lines": 59, "path": "/pysoco/pysoco.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndist_df = pd.read_csv('dist.csv')\npcode_arr = np.array(dist_df.drop_duplicates(['pcode'])['pcode'])\n\ndata_rows = []\nfor i in range(len(pcode_arr)):\n post_data = {\n 'province_id': pcode_arr[i],\n 'pageindex': 0,\n 'latitude': 39.911013,\n 'longitude': 116.413554,\n }\n\n while True:\n post_data['pageindex'] += 1\n result = requests.post('http://wx.supersoco.com/index.php?c=MapCtrl&m=getsupplierlist_page', data=post_data)\n \n list_result = json.loads(result.text)\n \n for i in range(len(list_result['result'])):\n data_one = [\n list_result['result'][i]['id'],\n list_result['result'][i]['title'],\n list_result['result'][i]['address'],\n list_result['result'][i]['lbs_type'],\n list_result['result'][i]['longitude'],\n list_result['result'][i]['latitude'],\n list_result['result'][i]['province_id'],\n list_result['result'][i]['province'],\n list_result['result'][i]['city_id'],\n list_result['result'][i]['city'],\n list_result['result'][i]['county_id'],\n list_result['result'][i]['county'],\n list_result['result'][i]['contact_manager'],\n list_result['result'][i]['tel'],\n list_result['result'][i]['bus_hour_start'],\n list_result['result'][i]['bus_hour_end'],\n list_result['result'][i]['updatetime'],\n ]\n data_rows.append(data_one)\n print(data_one)\n \n if len(list_result['result']) < 10: break\n\n\nlabel = [\n 'id', 'title', 'address', 'lbs_type', 'longitude', 'latitude',\n 'province_id', 'province', 'city_id', 'city',\n 'county_id', 'county', 'contact_manager', 'tel',\n 'bus_hour_start', 'bus_hour_end', 'updatetime',\n ]\nstore_df = pd.DataFrame(data_rows, columns=label)\nstore_df.to_csv('soco.csv')\nprint('Done!')\n" }, { "alpha_fraction": 0.5958378911018372, "alphanum_fraction": 0.6021358370780945, "avg_line_length": 38.69565200805664, "blob_id": "70bdfc1359e0a6b379168514cd4a7e43caf5be32", "content_id": "0359c6ff78225d3a70a70fb220e4f7933c7e11e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3670, "license_type": "no_license", "max_line_length": 106, "num_lines": 92, "path": "/pyniustar/niustar_crawler.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from browsermobproxy import Server\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\nfrom urllib.parse import urlparse\nimport random\n\nfrom niustar_parse import NiuStarParse\nfrom utils import have_a_rest\n\nclass NiuStarCrawler:\n\n def __init__(self):\n # Browsermob Proxy\n self.server = Server(\"./browsermob-proxy-2.1.4/bin/browsermob-proxy\", {'port': 9999})\n self.server.start()\n self.proxy = self.server.create_proxy()\n url = urlparse(self.proxy.proxy).path\n\n # Selenium Chrome\n chrome_options = Options()\n chrome_options.set_headless(headless=True)\n #mobile_emulation = { \"deviceName\": \"Nexus 5\" }\n #chrome_options.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n chrome_options.add_argument(\"--proxy-server={0}\".format(url))\n self.douyin_driver = webdriver.Chrome(options=chrome_options)\n\n self.parse = NiuStarParse()\n\n def __del__(self):\n # Quit\n self.server.stop()\n self.douyin_driver.quit()\n\n def run(self, model):\n share_url = \"https://www.iesdouyin.com/share/user/\" + model.user_id\n page_num = 1\n\n # Load First Page\n self.proxy.new_har(\"load_first_page\", options={'captureHeaders': True, 'captureContent':True})\n self.douyin_driver.get(share_url)\n print(\"...........Ready to Load Page \", page_num)\n\n # Swtich to post tab\n music_active = self.parse.get_music_tab(self.douyin_driver.page_source)\n if music_active:\n self.proxy.new_har(\"load_first_page\", options={'captureHeaders': True, 'captureContent':True})\n post_tab = self.douyin_driver.find_element_by_css_selector(\"div.user-tab.tab.get-list\")\n self.douyin_driver.execute_script('arguments[0].click();', post_tab)\n\n # Process User Data\n user_data = self.parse.process_user_data(self.douyin_driver.page_source)\n model.add_user_statistics(user_data)\n\n # Process Response Content & Load More\n while True:\n # Take a break\n have_a_rest(random.randrange(3, 5, 1))\n\n # Process Response Content\n har_return = json.loads(json.dumps(self.proxy.har, ensure_ascii=False))\n entries = har_return['log']['entries']\n for i in range(len(entries)):\n if '/web/api/v2/aweme/post/' in entries[i]['request']['url']:\n print(entries[i]['request']['url'])\n result = json.loads(entries[i]['response']['content']['text'])\n\n if len(result['aweme_list']) == 0:\n model.check_post()\n break\n\n # Process Post Data\n model.add_post(result['aweme_list'])\n model.add_post_statistics(result['aweme_list'])\n\n # Whether to Load Next Page\n if result['aweme_list'][-1]['aweme_id'] < model.min_aweme_id:\n break\n if result['has_more'] == False:\n break\n\n # Load Next Page\n page_num += 1\n have_a_rest(random.randrange(5, 10, 1))\n self.proxy.new_har(\"load_next_page\", options={'captureHeaders': True, 'captureContent':True})\n print(\"...........Ready to Load Page \", page_num)\n # 拖动到可见的元素去\n pageload_element = self.douyin_driver.find_element_by_id(\"pagelet-loading\")\n self.douyin_driver.execute_script(\"arguments[0].scrollIntoView();\", pageload_element)\n print(\"...........Scrolled\")\n" }, { "alpha_fraction": 0.41430947184562683, "alphanum_fraction": 0.45391014218330383, "avg_line_length": 29.353534698486328, "blob_id": "1ef5cb0a65650e404698e08702c20fc4defe62d2", "content_id": "1e668034ec989efdbc44ba763134644d3e232aed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3025, "license_type": "no_license", "max_line_length": 75, "num_lines": 99, "path": "/pyniustar/niustar_parse.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\n\nclass NiuStarParse:\n\n _num_map = {\n #0\n ' \\ue603 ':'0',\n ' \\ue60d ':'0',\n ' \\ue616 ':'0',\n #1\n ' \\ue602 ':'1',\n ' \\ue60e ':'1',\n ' \\ue618 ':'1',\n #2\n ' \\ue605 ':'2',\n ' \\ue610 ':'2',\n ' \\ue617 ':'2',\n #3\n ' \\ue604 ':'3',\n ' \\ue611 ':'3',\n ' \\ue61a ':'3',\n #4\n ' \\ue606 ':'4',\n ' \\ue60c ':'4',\n ' \\ue619 ':'4',\n #5\n ' \\ue607 ':'5',\n ' \\ue60f ':'5',\n ' \\ue61b ':'5',\n #6\n ' \\ue608 ':'6',\n ' \\ue612 ':'6',\n ' \\ue61f ':'6',\n #7\n ' \\ue60a ':'7',\n ' \\ue613 ':'7',\n ' \\ue61c ':'7',\n #8\n ' \\ue60b ':'8',\n ' \\ue614 ':'8',\n ' \\ue61d ':'8',\n #9\n ' \\ue609 ':'9',\n ' \\ue615 ':'9',\n ' \\ue61e ':'9',\n }\n\n def get_num(self, num_html):\n num_arr = []\n for i in range(len(num_html.contents)):\n num_content = num_html.contents[i]\n if num_content == ' ': continue\n if num_content != '.' and num_content != 'w ':\n num_arr.append(self._num_map[num_content.get_text()])\n else:\n num_arr.append(num_content)\n return ''.join(num_arr)\n\n def process_user_data(self, page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n post_num_html = soup.find('div', class_='user-tab').find('span')\n like_num_html = soup.find('div', class_='like-tab').find('span')\n focus_num_html = soup.find('span', class_='focus').find('span')\n follow_num_html = soup.find('span', class_='follower').find('span')\n digg_num_html = soup.find('span', class_='liked-num').find('span')\n post_num = self.get_num(post_num_html)\n like_num = self.get_num(like_num_html)\n focus_num = self.get_num(focus_num_html)\n follow_num = self.get_num(follow_num_html)\n digg_num = self.get_num(digg_num_html)\n #print('作品:', post_num)\n #print('喜欢:', like_num)\n #print('关注:', focus_num)\n #print('粉丝:', follow_num)\n #print('获赞:', digg_num)\n\n return {\n 'post_count': post_num,\n 'like_count': like_num,\n 'focus_count': focus_num,\n 'follow_count': follow_num,\n 'digg_count': digg_num,\n }\n\n def get_music_tab(self, page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n return soup.find('div', class_='music-tab tab active get-list')\n\n def process_topic_post(self, page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n topic_name = soup.find('div', class_='author-info').p.text\n post_list = soup.findAll('li', class_='item goWork')\n data = []\n for post in post_list:\n data.append({\n 'topic_name': topic_name,\n 'aweme_id' : post[\"data-id\"],\n })\n return data\n" }, { "alpha_fraction": 0.6746666431427002, "alphanum_fraction": 0.690666675567627, "avg_line_length": 25.785715103149414, "blob_id": "39cd13b16d098779aeb77291566a9e98939acd9e", "content_id": "23a55b2ed93e26daeac94a3db24fb5147f3fb993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 85, "num_lines": 14, "path": "/pyaima/pyaima.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport pandas as pd\n\n\nstore_list = []\nfor i in range(2, 33):\n result = requests.get('https://www.aimatech.com/outlets/list?ccode=0&pcode=%s'%i)\n result_json = result.text.split(\"(\", 1)[1].strip(\")\")\n store_list.extend(json.loads(result_json))\n\nstore_df = pd.DataFrame.from_dict(store_list)\nstore_df.to_csv('aimatech.csv')\nprint(store_df)\n" }, { "alpha_fraction": 0.766262412071228, "alphanum_fraction": 0.7783902883529663, "avg_line_length": 24.91428565979004, "blob_id": "c60b7165d124137769396c05de3a081d31739c2b", "content_id": "a1b845f29c4dec4719e25c983028aabf99b4aa73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "no_license", "max_line_length": 83, "num_lines": 35, "path": "/pydouyin/test_desired_capabilities.py", "repo_name": "luxiaotong/crawler", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nimport json\nimport time\nfrom browsermobproxy import Server\nfrom urllib.parse import urlparse\n\n\nshare_id = \"83834087816\"\nshare_url = \"https://www.iesdouyin.com/share/user/\"+share_id\n\n\n# Chrome\ncaps = DesiredCapabilities.CHROME\ncaps['goog:loggingPrefs'] = {'performance': 'ALL'}\nchrome_options = Options()\nchrome_options.set_headless(headless=True)\ndouyin_driver = webdriver.Chrome(options=chrome_options, desired_capabilities=caps)\n\n\ndouyin_driver.get(share_url)\n\n\ndef process_browser_log_entry(entry):\n response = json.loads(entry['message'])['message']\n print(response)\n return response\n\n\nbrowser_log = douyin_driver.get_log('performance') \nevents = [process_browser_log_entry(entry) for entry in browser_log]\n\n\ndouyin_driver.quit()\n" } ]
32
catieo/regex206
https://github.com/catieo/regex206
749ed6417a6eb5eeca824260f52223dedf65c0c8
5cb07d1e888e8d9c93ab7f6019e325e44594aab7
8950a4a89040df1a93b95db4c1717fc27cc82a22
refs/heads/master
2021-07-11T09:21:08.624493
2017-09-30T18:46:10
2017-09-30T18:46:10
105,392,343
0
0
null
2017-09-30T18:41:53
2017-09-25T18:58:47
2017-09-25T16:46:51
null
[ { "alpha_fraction": 0.653333306312561, "alphanum_fraction": 0.6622222065925598, "avg_line_length": 27.1875, "blob_id": "d214a25dfe5e6178f6bf1cd7300522e445b1f683", "content_id": "9af3b8dc8415315efb642e04ba4471189fb03fe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 66, "num_lines": 16, "path": "/lab4.py", "repo_name": "catieo/regex206", "src_encoding": "UTF-8", "text": "import re\n\nfilevar = open(\"mbox-short.txt\")\n\nlist_of_nums = []\nfor line in filevar.readlines():\n\tline = line.rstrip()\n\ttemp_numbers = re.findall('^X-DSPAM-Confidence: ([0-9.]+)', line)\n\tif len(temp_numbers) != 1: continue \n\tnum = float(temp_numbers[0])\n\tlist_of_nums.append(num)\n\nprint(\"Number of Values =\", len(list_of_nums))\nprint(\"Max =\", max(list_of_nums))\nprint(\"Min =\", min(list_of_nums))\nprint(\"Average =\", sum(list_of_nums)/len(list_of_nums))" } ]
1
gr33ndata/irlib
https://github.com/gr33ndata/irlib
fb26677639a4146c86a528356e6ee5179b52bf8b
4a518fec994b1a89cdc7d09a8170efec3d7e6615
caeafe32a5036289d68c192c7dff1a3eb5e96008
refs/heads/master
2021-12-22T15:08:59.243665
2021-12-18T13:31:53
2021-12-18T13:31:53
7,213,891
83
25
null
null
null
null
null
[ { "alpha_fraction": 0.5844845771789551, "alphanum_fraction": 0.6216790676116943, "avg_line_length": 29.354839324951172, "blob_id": "ec8e47b7fbf697939a5beb6079b36e835e892dba", "content_id": "14a5364f6b8456382beeb2bc3c3a9d26a50db3f3", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "permissive", "max_line_length": 44, "num_lines": 31, "path": "/tests/TestSuperList.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.superlist import SuperList\n\nclass TestSuperList(TestCase):\n\n def setUp(self):\n self.x = SuperList([0,1,2,3])\n self.y = SuperList([0,1,2,3])\n\n def test_unique_append(self):\n new_item = 1\n i = self.x.unique_append(new_item)\n self.assertEqual(i, new_item)\n self.assertEqual(self.x, self.y)\n new_item = 4\n i = self.x.unique_append(new_item)\n self.assertEqual(i, new_item)\n self.assertNotEqual(self.x, self.y)\n\n def test_insert_after_padding(self):\n self.x.insert_after_padding(7,99)\n self.assertEqual(self.x[7],99)\n self.x.insert_after_padding(1,99)\n self.assertEqual(self.x[1],99)\n\n def test_increment_after_padding(self):\n self.x.increment_after_padding(7,99)\n self.assertEqual(self.x[7],99)\n self.x.increment_after_padding(1,99)\n self.assertEqual(self.x[1],100)\n" }, { "alpha_fraction": 0.6731216311454773, "alphanum_fraction": 0.6764782071113586, "avg_line_length": 32.956138610839844, "blob_id": "b5e75176f8c9c6e34f4666873f0da05222abf323", "content_id": "5411ca0acab85029f09b8f5fb04210302c5e7b02", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3873, "license_type": "permissive", "max_line_length": 101, "num_lines": 114, "path": "/irlib/configuration.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "# Reads configuration parameters from file 'classifier.conf'. \n# Used for sharing the configuration between all modules \n\n# Author: Tarek Amr <@gr33ndata> \n\nclass Configuration:\n\t\n\t# Initialize some variables and default values\n\tdef __init__(self, config_file='classifier.conf'):\n\t\tself.config_file=config_file\n\t\tself.configuration = {}\n\t\tself.configuration['classes'] = {}\n\t\tself.configuration['k'] = 0\n\t\tself.configuration['pos'] = False\n\t\tself.configuration['stem'] = False\n\t\tself.configuration['ngram'] = 1\n\n\t# Remove white spaces and convert to lowercase\n\tdef parse_config_attribute(self, attr=''):\n\t\ta = attr.strip().lower()\n\t\treturn a\n\n\t# Remove white spaces and convert to lowercase\n\t# Compile true and false strings\t\n\tdef parse_config_value(self, value=''):\n\t\tv = value.strip().lower()\n\t\tif v == 'true':\n\t\t\tv = True\n\t\telif v == 'false':\n\t\t\tv = False\n\t\treturn v\n\n\t# Split lines into attributes and values\n\tdef extract_attr_value(self, conf_line=''):\n\t\t(attr, value) = conf_line.split(':')\n\t\ta = self.parse_config_attribute(attr)\n\t\tv = self.parse_config_value(value)\n\t\treturn (a,v)\n\n\tdef update_value(self, attr, value):\n\t\tself.configuration[attr] = value\t\n\n\tdef populate_class_names(self, conf_line=''):\n\t\tconf_line = conf_line[1:].strip('(').strip(')')\n\t\t(c_id, c_name) = conf_line.split(':')\n\t\tself.configuration['classes'][c_id] = c_name\t\t\n\n\tdef load_configuration(self):\n\t\tfd = open(self.config_file, 'r')\n\t\tfor line in fd.readlines():\n\t\t\tline = line.strip()\n\t\t\tif line.startswith('#'):\n\t\t\t\tpass # comment line\n\t\t\telif line.startswith('$'):\n\t\t\t\tself.populate_class_names(line)\n\t\t\telif line:\n\t\t\t\t(attr, value) = self.extract_attr_value(line)\n\t\t\t\tself.configuration[attr] = value\n\t\tself.configuration['k'] = int(self.configuration['k'])\n\t\tfd.close()\n\n\tdef get_configuration(self):\n\t\treturn self.configuration\n\n\tdef get_fold_path(self, fold=0):\n\t\tpath = \"%s/%s%s\" % (self.configuration['data_path'], self.configuration['folds_prefix'], str(fold))\n\t\treturn path\n\n\t# Returns the configuration hash-table (a la C/C++ Structures)\n\tdef display_configuration(self):\n\t\tfor item in self.configuration:\n\t\t\tprint item, self.configuration[item]\n\n\t# Returns a summary of configuration as a string\n\tdef __str__(self):\n\t\tenabled_options = []\n\t\tif not self.configuration:\n\t\t\treturn \"No configuration loaded yet!\"\n\t\telse:\n\t\t\tif self.configuration['classifier'] == 'knn':\n\t\t\t\ttry:\n\t\t\t\t\tclassifier_str = \"%s-NN\" % self.configuration['k']\n\t\t\t\texcept:\n\t\t\t\t\tclassifier_str = \"k-NN\"\n\t\t\t\tenabled_options.append(\"Metric: %s, \" % self.configuration['distance_metric'])\n\t\t\telif self.configuration['classifier'] == 'rocchio':\n\t\t\t\tclassifier_str = self.configuration['classifier'].title()\n\t\t\t\tenabled_options.append(\"Metric: %s, \" % self.configuration['distance_metric'])\n\t\t\telif self.configuration['classifier'] == 'bayes':\n\t\t\t\tclassifier_str = self.configuration['classifier'].title()\n\t\t\t\tenabled_options.append(\"Mode: %s, \" % self.configuration['mode'])\n\t\t\telse:\n\t\t\t\tclassifier_str = \"No proper classifier set!\"\n\t\t\tif self.configuration['pos'] == True:\n\t\t\t\tenabled_options.append(\"PoS, \")\n\t\t\tif self.configuration['stem'] == True:\n\t\t\t\tenabled_options.append(\"Stemmer(%s), \" % self.configuration['stemmer_name'])\n\t\t\tif int(self.configuration['ngram']) > 1:\n\t\t\t\tenabled_options.append(\"%d-gram, \" % int(self.configuration['ngram']))\n\t\t\tenabled_options.append(\"%d Folds\" % int(self.configuration['folds_count']))\n\t\t\tconf_str = \"%s [%s ]\" % (classifier_str, \"\".join(enabled_options))\n\t\t\treturn conf_str\n\n\t# Returns a list (array) of our fold numbers\n\tdef get_folds(self):\n\t\tfolds = [i for i in range(1,int(self.configuration['folds_count'])+1)]\n\t\treturn folds\n\n\t# Return a list of our fold numbers, except f\n\tdef get_all_folds_but(self,fold=1):\n\t\tif fold > int(self.configuration['folds_count']):\n\t\t\traise Exception\n\t\tfolds = self.get_folds()\n\t\treturn folds[0:fold-1]+folds[fold:len(folds)+1]\t\n\n" }, { "alpha_fraction": 0.46036988496780396, "alphanum_fraction": 0.49603697657585144, "avg_line_length": 24.627119064331055, "blob_id": "265958c0f126beb09452be1ddc5bd463cb53ee11", "content_id": "b2b9128938a7476610fbf31df937ff94978048ae", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "permissive", "max_line_length": 47, "num_lines": 59, "path": "/tests/TestMetrics.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.metrics import Metrics\n\nclass TestMetrics(TestCase):\n\n def setUp(self):\n self.m = Metrics()\n\n def test_jaccard_same_len(self):\n with self.assertRaises(ValueError):\n self.m.jaccard_vectors(\n [0, 1],\n [0, 1, 2, 3]\n )\n\n def test_jaccard_empty(self):\n e = self.m.jaccard_vectors([],[])\n self.assertEqual(e,1)\n\n def test_jaccard_int(self):\n e = self.m.jaccard_vectors(\n [0, 2, 1, 3],\n [0, 1, 2, 3]\n )\n self.assertEqual(e,0.75)\n \n def test_jaccard_bool(self):\n e = self.m.jaccard_vectors(\n [False, False, True, True, True ],\n [False, True , True, True, False]\n )\n self.assertEqual(e,0.4)\n\n def test_euclid_same_len(self):\n with self.assertRaises(ValueError):\n self.m.euclid_vectors(\n [0, 1, 2, 3],\n [0, 1]\n )\n\n def test_euclid(self):\n e = self.m.euclid_vectors([1,1],[4,5])\n self.assertEqual(e,5)\n\n def test_cos_same_len(self):\n with self.assertRaises(ValueError):\n self.m.cos_vectors(\n [0, 1, 2],\n [1, 1]\n )\n\n def test_cos_0(self):\n c = self.m.cos_vectors([1,0,1],[0,1,0])\n self.assertEqual(round(c,5),float(0))\n\n def test_cos_1(self):\n c = self.m.cos_vectors([1,1,1],[1,1,1])\n self.assertEqual(round(c,5),float(1)) \n\n" }, { "alpha_fraction": 0.6888449788093567, "alphanum_fraction": 0.6938954591751099, "avg_line_length": 28.309677124023438, "blob_id": "6d9b293cfc609b61077703843f20dfd3bec64b1b", "content_id": "e774db17324b3c09f5aeaecc29817fe5e5b02782", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4554, "license_type": "permissive", "max_line_length": 105, "num_lines": 155, "path": "/examples/deceptive spam/classify.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "''' \nReview Spam Classifier \n======================\nWhat it reallt does:\n* Load configuration \n* Load preprocessor (preprocessor.py)\n* Load classifier (irlib.py) specified by configuration \n* Train on all folds but one\n* Test on remaining fold\n* Permutate and do previous two steps on all folds\n* Get results\n'''\n\n# Author: Tarek Amr <@gr33ndata> \n\nimport os\nimport re\nimport sys\n\n# Adding this to path to be able to import irlib\nsys.path.append('../../')\n\n# Importing the irlib stuff\nfrom irlib.classifier import NaiveBayes\nfrom irlib.classifier import Rocchio \nfrom irlib.classifier import KNN \nfrom irlib.classifier import Evaluation \nfrom irlib.preprocessor import Preprocessor \nfrom irlib.configuration import Configuration \n\n\nVERBOSE = True\n\ndef get_doc_id(fold=1, filename=\"\"):\n\tdoc_id = 'fold' + str(fold) + ':' + filename.split(\".\")[0]\n\treturn doc_id\n\n\n# Parse not any more than the first_n_files in folder\n# @ml: Object for our classifier class (Rocchio, kNN, etc)\n# @config: Our configuration class (class as in OOP not ML)\n# @prep: Preprocessor class; tokenizers, stemmers, etc.\ndef parse_files(fold=1, mode = \"training\", first_n_files = 10000, ml=object, config=object, prep=object):\n\tconfig_data = config.get_configuration()\n\t#DOCSDIR = config_data['docs_dir']\n\t#fold_path = DOCSDIR + str(fold)\t+ \"/original-text-files\"\n\tfold_path = config.get_fold_path(fold)\n\tfiles = os.listdir(fold_path)\n\t#print files\t\n\tfor filename in files[0:first_n_files]:\n\t\tclass_name = \"\"\n\t\tfor c in config_data['classes']:\n\t\t\tif filename.startswith(c):\n\t\t\t\tclass_name = config_data['classes'][c]\n\t\t# Skip if failed to identify file's class\n\t\tif not class_name:\n\t\t\tcontinue\t\n\t\tdoc_id = get_doc_id(fold, filename)\n\t\tfd = open('%s/%s' % (fold_path, filename), 'r')\n\t\tfile_data = fd.read()\n\t\tterms = prep.ngram_tokenizer(text=file_data)\n\t\tif mode == 'training':\n\t\t\tml.add_doc(doc_id = doc_id, doc_class=class_name, doc_terms=terms)\n\t\telse:\n\t\t\t# Class known from filename\n\t\t\tml.add_query(query_id = doc_id, query_class=class_name, query_terms=terms)\t\n\t\tfd.close()\n\n# Let's do some workout now on all folders but one\ndef training(config, test_fold, ml, prep):\n\tfolds = config.get_all_folds_but(test_fold)\n\tfor fold in folds:\n\t\tparse_files(fold = fold, mode = 'training', first_n_files = 1000, ml=ml, config=config, prep=prep)\n\n# Let's test on the remaining folder\ndef testing(config, test_fold, ml, ev, prep):\n\tparse_files(fold = test_fold, mode = 'testing', first_n_files = 10000, ml=ml, config=config, prep=prep)\n\tml.compare_queries()\n\n# Call this if anything goes wrong, for clean exit\ndef classifier_exit():\n\tif VERBOSE: \n\t\tprint sys.exc_info()\n\tprint \"\\n[!] Houston, we have a problem [!]\" \n\traise\n\tsys.exit()\n\n# Our main function\ndef main():\n\n\t# Load configuration from file\n\tconfig = Configuration(config_file='classify.conf')\n\ttry:\n\t\tconfig.load_configuration()\n\t\tconfig_data = config.get_configuration()\n\texcept:\n\t\tprint \"Error loading configuration file.\"\n\t\tprint \"Classifier aborting.\"\n\t\traise \t\n\t\n\t#config.display_configuration()\n\tprint config\n\n\t#sys.exit()\n\t\n\tmyfolds = config.get_folds()\n\tcorrectness = 0\n\n\t#Preporcessor: tokenizer, stemmer, etc.\n\tprep_lower = config_data['lower']\n\tprep_stem = config_data['stem']\n\tprep_pos = config_data['pos']\n\tprep_ngram = config_data['ngram'] \n\tprep = Preprocessor(pattern='\\W+', lower=prep_lower, stem=prep_stem, pos=prep_pos, ngram=prep_ngram)\n\n\tfor myfold in myfolds:\n\t\tev = Evaluation(config=config, fold=myfold)\n\t\tif config_data['classifier'] == 'rocchio':\n\t\t\tml = Rocchio(verbose=VERBOSE, fold=myfold, config=config, ev=ev)\n\t\telif config_data['classifier'] == 'knn':\n\t\t\tml = KNN(verbose=VERBOSE, fold=myfold, config=config, ev=ev)\n\t\telse:\n\t\t\tml = NaiveBayes(verbose=VERBOSE, fold=myfold, config=config, ev=ev)\n\t\ttraining(config, myfold, ml, prep )\n\t\tml.do_padding()\n\t\tml.calculate_training_data()\n\t\t#r.display_idx()\n\t\tml.diagnose()\n\t\ttesting(config, myfold, ml, ev, prep)\n\t\t\n\t\tk = config_data['k']\n\t\tresults = ev.calculate(review_spam=True, k=k)\n\t\tprint 'Accuracy for fold %d: %s' % (myfold, results)\n\n\t\tcorrectness += results\t\n\n\tprint \"\\nAverage accuracy for all folds:\", correctness / len(myfolds) \n\n\nif __name__ == '__main__':\n\n\t# Profiling mode is not to be used in production,\n\t# only used for profiling the code's performance.\n\tprofiling_mode = False\n\tif profiling_mode: \n\t\timport cProfile\n\t\timport pstats\n\t\tcProfile.run('main()','classifier_prof')\n\t\tp_stats = pstats.Stats('classifier_prof')\n\t\tp_stats.sort_stats('time').print_stats(10)\n\telse:\n\t\ttry:\n\t\t\tmain()\n\t\texcept:\n\t\t\tclassifier_exit()\n\t\t\t\n\n\n\t\n\n\n\n" }, { "alpha_fraction": 0.4366863965988159, "alphanum_fraction": 0.45133134722709656, "avg_line_length": 35.344085693359375, "blob_id": "ef989f3cfc9ebdd939ccda78139bd1287ab50194", "content_id": "a13ccad2af86b8dffc192dfba3f11bfe395eccb8", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6760, "license_type": "permissive", "max_line_length": 78, "num_lines": 186, "path": "/tests/TestMatrix.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.matrix import Matrix\n\nclass TestMatrixDocs(TestCase):\n\n def setUp(self):\n self.mxds = MatrixDocs()\n \n def test_doc_fields(self):\n self.assertEqual(\n self.mxds.doc_fields(),\n set(['id', 'class', 'terms'])\n )\n\n def test_is_valid_doc(self):\n self.assertEqual(\n self.mxds.is_valid_doc({\n 'id': '1',\n 'class': 'Spam',\n 'terms': ['a', 'b', 'c'],\n 'for_future_use': 'na'\n }),\n True\n )\n self.assertEqual(\n self.mxds.is_valid_doc({\n 'docid': '1',\n 'class_name': 'Spam',\n 'terms': ['a', 'b', 'c'],\n 'for_future_use': 'na'\n }),\n False\n )\n\n def test_shuffle(self):\n mxdocs = MatrixDocs([0,1,2,3,4,5,6,7])\n mxdocs.shuffle()\n self.assertNotEqual(\n mxdocs, MatrixDocs([0,1,2,3,4,5,6,7])\n )\n\n def test_split(self):\n mxdocs = MatrixDocs([0,1,2,3])\n left = MatrixDocs([0,1])\n right = MatrixDocs([2,3])\n self.assertEqual(\n mxdocs.split(),\n (left, right)\n )\n\n\nclass TestMatrix(TestCase):\n\n def setUp(self):\n self.m = Matrix()\n \n def test_add_doc(self):\n # Try without frequency\n self.assertEqual(len(self.m),0)\n doc1_terms = ['buy', 'now', 'or', 'buy', 'later']\n self.m.add_doc( doc_id = 'file_spam.txt', \n doc_class='Spam', \n doc_terms= doc1_terms,\n frequency=False)\n self.assertEqual(self.m.terms, ['buy', 'now', 'or', 'later'])\n self.assertEqual(self.m.docs[0]['terms'], [1,1,1,1])\n \n # Now try with frequency\n doc2_terms = ['buy', 'today', 'or', 'buy', 'later']\n self.m.add_doc( doc_id = 'file_spam.txt', \n doc_class='Spam', \n doc_terms= doc2_terms,\n frequency=True)\n self.assertEqual(self.m.terms, ['buy', 'now', 'or', 'later', 'today'])\n self.assertEqual(self.m.docs[1]['terms'], [2,0,1,1,1])\n\n # Now let's see if padding is working\n doc2_terms = ['buy', 'now']\n self.m.add_doc( doc_id = 'file_spam.txt', \n doc_class='Ham', \n doc_terms= doc2_terms,\n frequency=True,\n do_padding=True)\n #print self.m.terms, self.m.docs[0]['terms']\n self.assertEqual(len(self.m.terms), len(self.m.docs[0]['terms'])) \n self.assertEqual(len(self.m),3)\n self.assertEqual('buy' in self.m, True)\n self.assertEqual('shopping' in self.m, False)\n\n def test_add_doc_empty(self):\n doc1_terms = []\n with self.assertRaises(ValueError):\n self.m.add_doc( doc_id = 'doc1', \n doc_class='Spam', \n doc_terms= doc1_terms)\n\n def test_meta_data(self):\n mx = Matrix()\n for i,s in enumerate(['hello', 'world']):\n mx.add_doc( doc_id = str(i), \n doc_class='Email', \n doc_terms= s.split(),\n do_padding=True,\n frequency=True,\n meta_data={\n 'original_text': s,\n 'original_text_len': len(s)\n })\n self.assertEqual(mx.docs[1]['original_text'], 'world')\n self.assertEqual(mx.docs[1]['original_text_len'], 5)\n\n def test_docs_unique_ids(self):\n mx = Matrix()\n for i,s in enumerate(['hello', 'how are you', 'fine thank you']):\n mx.add_doc( doc_id = str(i), \n doc_class='Email', \n doc_terms= s.split(),\n do_padding=True,\n frequency=True)\n mx.add_doc(doc_id = '1', \n doc_class='Email', \n doc_terms= 'goodbye'.split(),\n do_padding=True,\n frequency=True,\n unique_ids=True)\n self.assertEqual(len(mx), 3)\n\n def test_get_doc_by_id(self):\n mx = Matrix()\n for i,s in enumerate(['hello', 'how are you', 'fine thank you']):\n mx.add_doc( doc_id = str(i), \n doc_class='Email', \n doc_terms= s.split(),\n do_padding=True,\n frequency=True)\n doc1_id = mx.docs.index('1')\n self.assertEqual(mx.docs[doc1_id]['id'], '1')\n\n def test_query_alignment(self):\n doc1_terms = ['buy', 'now', 'or', 'buy', 'later']\n self.m.add_doc( doc_id = 'file_spam.txt', \n doc_class='Spam', \n doc_terms= doc1_terms,\n frequency=False)\n q_vector = self.m.query_to_vector(['best', 'buy'], frequency=False)\n self.assertEqual(q_vector, [1,0,0,0]) \n\n def test_tf_idf(self):\n doc1_terms = ['new', 'york', 'times']\n self.m.add_doc( doc_id = 'doc1', \n doc_class='Spam', \n doc_terms= doc1_terms,\n do_padding=True,\n frequency=True)\n doc2_terms = ['new', 'york', 'post']\n self.m.add_doc( doc_id = 'doc2', \n doc_class='Spam', \n doc_terms= doc2_terms,\n do_padding=True,\n frequency=True)\n doc3_terms = ['los', 'angeles', 'times']\n self.m.add_doc( doc_id = 'doc3', \n doc_class='Spam', \n doc_terms= doc3_terms,\n do_padding=True,\n frequency=True)\n self.m.tf_idf(log_base=2)\n doc1_tfidf_retval = self.m.docs[0]['terms']\n doc1_tfidf_retval = [round(item, 3) for item in doc1_tfidf_retval]\n doc1_tfidf_expval = [0.585, 0.585, 0.585, 0, 0, 0]\n self.assertEqual(doc1_tfidf_retval, doc1_tfidf_expval)\n\n def test_white_and_black_lists(self):\n doc_terms = ['this', 'is', 'a', 'new', 'test']\n white_list = ['test']\n black_list = ['this', 'is', 'a']\n mx = Matrix(whitelist=white_list, blacklist=black_list)\n mx.add_doc( doc_id = 'doc1', \n doc_class='TestClass', \n doc_terms= doc_terms,\n do_padding=True,\n frequency=True)\n returned_terms = mx.vocabulary()\n expected_terms = ['test']\n self.assertItemsEqual(returned_terms, expected_terms)\n" }, { "alpha_fraction": 0.48128849267959595, "alphanum_fraction": 0.488867849111557, "avg_line_length": 28.73239517211914, "blob_id": "9ae56d09f358919c2561acd343d3b9648d814f8d", "content_id": "9273b4dd4da927556b77fcea578ea399040f8548", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2111, "license_type": "permissive", "max_line_length": 61, "num_lines": 71, "path": "/tests/TestAnalysis.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.analysis import Freq\n\nclass TestAnalysis(TestCase):\n\n def setUp(self):\n pass\n\n def test_freq_existing_term(self):\n f = Freq()\n f.add(['green', 'apple', 'juice'])\n f.add(['orange', 'orange', 'juice'])\n f.add(['sweet', 'apple', 'pie'])\n apple_freq = f['apple']\n self.assertEqual(apple_freq, 2)\n\n def test_freq_non_existing_term(self):\n f = Freq()\n f.add(['apple', 'juice'])\n f.add(['apple', 'pie'])\n orange_freq = f['orange']\n self.assertEqual(orange_freq, -1) \n\n def test_freq_compare(self):\n f = Freq()\n cmp_val_gt = f.freq_cmp(\n {'token': 'orange', 'freq': 3},\n {'token': 'apple', 'freq': 2}\n )\n self.assertEqual(cmp_val_gt, 1) \n cmp_val_lt = f.freq_cmp(\n {'token': 'orange', 'freq': 2},\n {'token': 'apple', 'freq': 3}\n )\n self.assertEqual(cmp_val_lt, -1) \n\n def test_to_array(self):\n f = Freq()\n f.add(['apple', 'juice'])\n f.add(['apple', 'pie'])\n returned_array = f.to_array()\n expected_array = [\n {'token': 'apple', 'freq': 2},\n {'token': 'juice', 'freq': 1}, \n {'token': 'pie', 'freq': 1}\n ]\n self.assertItemsEqual(returned_array, expected_array)\n\n def test_topn_value(self):\n f = Freq()\n f.add(['apple', 'not', 'orange', 'juice'])\n f.add(['apple', 'and', 'cinnamon', 'pie'])\n returned_array = f.topn(1)\n expected_array = [\n {'token': 'apple', 'freq': 2}\n ]\n self.assertEqual(returned_array, expected_array)\n\n def test_topn_len(self):\n f = Freq()\n f.add(['apple', 'not', 'orange', 'juice'])\n f.add(['apple', 'and', 'cinnamon', 'pie'])\n returned_array = f.topn(5)\n self.assertEqual(len(returned_array), 5)\n\n def test_len(self):\n f = Freq()\n f.add(['apple', 'not', 'orange', 'juice'])\n f.add(['apple', 'and', 'cinnamon', 'pie'])\n self.assertEqual(len(f), 7)\n" }, { "alpha_fraction": 0.48223039507865906, "alphanum_fraction": 0.48774510622024536, "avg_line_length": 27.068965911865234, "blob_id": "d17566f8a390d3249af7030d97835b96b98f8578", "content_id": "249314929a451e19423542360926f1ca08f0ce9e", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "permissive", "max_line_length": 67, "num_lines": 58, "path": "/tests/TestLM.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.lm import LM\n\nclass TestLM(TestCase):\n\n def setUp(self):\n pass\n\n def test_n_neq_zero(self):\n with self.assertRaises(ValueError):\n LM(n=0)\n\n def test_vocabulary(self):\n lm = LM()\n lm.add_doc(doc_id='doc1', doc_terms='apple tree'.split())\n lm.add_doc(doc_id='doc2', doc_terms='orange juice'.split())\n vocab_returned = lm.get_vocabulary()\n vocab_expected = set(['apple','tree','orange','juice'])\n self.assertEqual(vocab_returned, vocab_expected)\n\n def helper_test_ngrams(self, n, sent, expected_ngrams):\n lm = LM(n=n)\n ngrams_returned = lm.to_ngrams(sent.split())\n self.assertEqual(ngrams_returned, expected_ngrams)\n\n\n def test_ngram1(self):\n self.helper_test_ngrams(\n n=1, \n sent='i like apples and oranges',\n expected_ngrams=[\n ['i'], ['like'], ['apples'], ['and'], ['oranges']\n ]\n )\n\n def test_ngram2(self):\n self.helper_test_ngrams(\n n=2, \n sent='i like apples and oranges',\n expected_ngrams=[\n ['i', 'like'], \n ['like', 'apples'], \n ['apples', 'and'], \n ['and', 'oranges']\n ]\n )\n\n def test_ngram3(self):\n self.helper_test_ngrams(\n n=3, \n sent='i like apples and oranges',\n expected_ngrams=[\n ['i', 'like', 'apples'],\n ['like', 'apples', 'and'],\n ['apples', 'and', 'oranges']\n ]\n )\n " }, { "alpha_fraction": 0.6076294183731079, "alphanum_fraction": 0.6457765698432922, "avg_line_length": 25, "blob_id": "76dc3a577638ebfff01019ebd9f27687c331d8e1", "content_id": "e61dd0a23f127e2b0f3bc203a7f9cfaf0df1376d", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "permissive", "max_line_length": 74, "num_lines": 14, "path": "/tests/TestProgress.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.progress import Progress\n\nclass TestProgress(TestCase):\n\n def setUp(self):\n self.p = Progress(n=1002, percent=10)\n\n def test_progress_counter(self):\n total = 0\n for i in range(0,1002):\n total += self.p.show(message='Testing progress:', silent=True)\n self.assertEqual(total,10) \n" }, { "alpha_fraction": 0.49939441680908203, "alphanum_fraction": 0.5066612958908081, "avg_line_length": 27.4252872467041, "blob_id": "61cd739a5f94856100c9a853c915d1debc6cf251", "content_id": "f74c384ded7045a51081ec6f916471a0fbd24794", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2477, "license_type": "permissive", "max_line_length": 62, "num_lines": 87, "path": "/irlib/matrixcooccurrence.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "''' \nInformations Retrieval Library\n==============================\nMatrixCooccurrence: You give it a Matrix,\nand it creates new co-occurrence matrix of its features\n'''\n\n# Author: Tarek Amr <@gr33ndata> \n\nimport sys, math\nfrom matrix import Matrix\nfrom superlist import SuperList\n\nfrom itertools import permutations\n\nclass MatrixCooccurrence(Matrix):\n\n def __init__(self, mx=None):\n \n self.orig_mx = mx\n self.terms = self.orig_mx.vocabulary()\n \n N = len(self.terms)\n square = [[0 for _ in range(0,N)] for _ in range(0,N)]\n for doc in self.orig_mx.docs:\n terms_indeces = self._nonzeros(doc['terms']) \n for c in permutations(terms_indeces,2):\n square[c[0]][c[1]] += 1\n for cc in terms_indeces:\n square[cc][cc] += 1\n \n self.docs = []\n for i in range(len(self.terms)):\n self.docs.append({ 'id': self.terms[i], \n 'class': '', \n 'terms': square[i]})\n \n def _nonzeros(self, x):\n nz = []\n for i in range(0,len(x)):\n if x[i] != 0:\n nz.append(i)\n return nz \n \n def normalize(self):\n for i in range(len(self.docs)):\n terms = self.docs[i]['terms']\n idf = terms[i]\n for j in range(len(terms)):\n terms[j] = float(terms[j]) / idf\n self.docs[i]['terms'] = terms\n \nif __name__ == '__main__':\n\n mx = Matrix()\n mx.add_doc(doc_id=1,\n doc_terms=['apple', 'juice', 'fruit'],\n doc_class= '0',\n frequency=True, do_padding=True) \n mx.add_doc(doc_id=2,\n doc_terms=['orange', 'juice', 'fruit'],\n doc_class= '0',\n frequency=True, do_padding=True)\n mx.add_doc(doc_id=3,\n doc_terms=['tomato', 'juice', 'food'],\n doc_class= '0',\n frequency=True, do_padding=True) \n \n print 'Matrix' \n print mx.vocabulary()\n for doc in mx.docs:\n print doc['terms'] \n #print mx \n \n mxcc = MatrixCooccurrence(mx)\n print 'MatrixCooccurrence' \n print mxcc.vocabulary()\n for doc in mxcc.docs:\n print doc['id'], doc['terms']\n #print mxcc \n \n print 'MatrixCooccurrence (Normalized)' \n #mxcc.normalize()\n mxcc.tf_idf(do_idf=True) \n print mxcc.vocabulary()\n for doc in mxcc.docs:\n print doc['id'], doc['terms'] \n" }, { "alpha_fraction": 0.5607655644416809, "alphanum_fraction": 0.5655502676963806, "avg_line_length": 20.75, "blob_id": "42f0c1006a3d3491737bfb60c1fec3d838b9d6c6", "content_id": "1aadeb81e22eb5b883c417babb0dbc966658fd33", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "permissive", "max_line_length": 55, "num_lines": 48, "path": "/examples/twitter/search.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "# Search in tweets\n\nimport os, sys\n\n# Adding this to path to be able to import irlib\nsys.path.append('../../')\n\nfrom irlib.preprocessor import Preprocessor\nfrom irlib.matrix import Matrix\n\ndef readfiles(fold_path='all-folds/fold1/'):\n\n prep = Preprocessor()\n mx = Matrix()\n\n files = os.listdir(fold_path)\n for filename in files:\n fd = open('%s/%s' % (fold_path, filename), 'r')\n file_data = fd.read()\n terms = prep.ngram_tokenizer(text=file_data)\n mx.add_doc(doc_id=filename, doc_terms=terms, \n frequency=True, do_padding=True)\n\n\n print 'Number of read documents:', len(mx.docs)\n print 'Number of read terms', len(mx.terms)\n #print mx.terms[0:5], mx.terms[-5:-1]\n print mx.terms\n print mx.docs\n\ndef search():\n\n while True:\n q = raw_input(\"Search: \")\n q = q.strip()\n if not q:\n return\n else:\n #search here\n pass\n \ndef main():\n readfiles()\n #search()\n \n \nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.7586206793785095, "alphanum_fraction": 0.7586206793785095, "avg_line_length": 13.5, "blob_id": "b0afce2df485fef3efe3f1a77f7b7cc3218ecc57", "content_id": "20ffa1712243973fa7a1ba8d0edb36877bc0b443", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 29, "license_type": "permissive", "max_line_length": 22, "num_lines": 2, "path": "/makefile", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "test:\n\tpython tests/tests.py\n" }, { "alpha_fraction": 0.877284586429596, "alphanum_fraction": 0.877284586429596, "avg_line_length": 28.538461685180664, "blob_id": "6f40ba14e612fb44dbb3f41c6a49dadcde4a2734", "content_id": "f099dcd1b5c126761a3ce9a8729148a28a94b95f", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "permissive", "max_line_length": 51, "num_lines": 13, "path": "/tests/__init__.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nimport irlib\n\nfrom tests.TestLM import TestLM\nfrom tests.TestSuperList import TestSuperList\nfrom tests.TestPreprocessor import TestPreprocessor\nfrom tests.TestMatrix import TestMatrix\nfrom tests.TestMetrics import TestMetrics\nfrom tests.TestProgress import TestProgress\nfrom tests.TestAnalysis import TestAnalysis\nfrom tests.TestEvaluation import TestEvaluation" }, { "alpha_fraction": 0.6758832335472107, "alphanum_fraction": 0.6758832335472107, "avg_line_length": 43.89655303955078, "blob_id": "2c51ada06eaae6659c9b303d16f5683c55d9fe38", "content_id": "a460abac77190b66c88f85549ddf66b17b91bb54", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1302, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/TODO.rst", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "Things to be done\n-----------------\n\n- [ ] We need to de-couple the dataset from the classifiers\n We can follow scikit's dataset.target and dataset.data approach\n This will make it easier for MI to deal with data before classification\n\n- [x] Best way, is to implement the dataset as a Vector Space, \n since this is an IR Library.\n\n * Vector Space will look like scikit's dataset. [see above]\n * Function to convert ot TF and/or IDF each document, or all.\n * Let's offer a way to serialize new queries, \n however, no need to put queries in a Vector Space as we do now.\n\n- [ ] We need add pruning and MI (Mutual Information) again to our code\n Use it to skip columns from the VSM as well.\n\n- [x] We need to add basic TF-IDF search capabilities to our Vecotr Space.\n Both Euclidean and Cosine distances should be added here.\n\n- [ ] We need a way to dump VSM into file (pickle) and read it back\n Do padding automatically if not done before comparisons or tf.idf\n\n- [ ] Add statistics to VSM, ie. most frequent terms, histograms, etc.\n We probably add special class for that, MI can go here too.\n\n- [ ] We need to implement Ye's shapelet classifier.\n Probably implement it as standalone, not here in irlib.\n" }, { "alpha_fraction": 0.6004149317741394, "alphanum_fraction": 0.6029045581817627, "avg_line_length": 34.44117736816406, "blob_id": "92bad0efe0382a901c17736f4ba530b8a70841d2", "content_id": "69d5f814abb46449496c16eb349104309083466d", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2410, "license_type": "permissive", "max_line_length": 61, "num_lines": 68, "path": "/tests/TestPreprocessor.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.preprocessor import Preprocessor, my_nltk\n\nclass TestPreprocessor(TestCase):\n\n def setUp(self):\n pass \n\n def test_term2ch(self):\n p = Preprocessor()\n charlist = p.term2ch('help')\n self.assertEqual(charlist, ['h', 'e', 'l', 'p']) \n\n def test_stemmer(self):\n p = Preprocessor(stem=True)\n stemmed = p.stemmer('running')\n if my_nltk:\n self.assertEqual(stemmed,'run') \n else: \n self.assertTrue(False,'NLTK is not installed') \n\n def test_stemmer_lower(self):\n p = Preprocessor(lower=True, stem=True)\n stemmed = p.stemmer('Running')\n if my_nltk:\n self.assertEqual(stemmed,'run') \n else: \n self.assertTrue(False,'NLTK is not installed') \n \n def test_tokenizer_lower(self):\n p = Preprocessor(lower=True, stem=False)\n tokens = p.tokenizer('This is IRLib')\n self.assertEqual(tokens,['this','is','irlib'])\n\n def test_2gram_tokenizer(self):\n p = Preprocessor(lower=False, stem=False, ngram=2)\n returned_tokens = p.ngram_tokenizer('how do you do?')\n expected_tokens = ['how do', 'do you', 'you do']\n self.assertEqual(returned_tokens, expected_tokens)\n\n def test_3gram_tokenizer(self):\n p = Preprocessor(lower=False, stem=False, ngram=3)\n returned_tokens = p.ngram_tokenizer('how do you do?')\n expected_tokens = ['how do you', 'do you do']\n self.assertEqual(returned_tokens, expected_tokens)\n\n def test_is_mention(self):\n is_it = Preprocessor.is_mention('@twitter')\n self.assertEqual(is_it, True)\n is_it = Preprocessor.is_mention('#twitter')\n self.assertEqual(is_it, False)\n\n def test_is_hashtag(self):\n is_it = Preprocessor.is_hashtag('@twitter')\n self.assertEqual(is_it, False)\n is_it = Preprocessor.is_hashtag('#twitter')\n self.assertEqual(is_it, True)\n\n def test_is_link(self):\n is_it = Preprocessor.is_link('hello world')\n self.assertEqual(is_it, False)\n is_it = Preprocessor.is_link('http://www.yahoo.com')\n self.assertEqual(is_it, True)\n is_it = Preprocessor.is_link('https://www.yahoo.com')\n self.assertEqual(is_it, True)\n is_it = Preprocessor.is_link('www.yahoo.com')\n self.assertEqual(is_it, True)\n" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6966666579246521, "avg_line_length": 22.076923370361328, "blob_id": "4758423f6b0ce69efd7157d87f5a7b3d95fff169", "content_id": "2db95f37c6a08ffa610ef38e339510749b8afaaa", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "permissive", "max_line_length": 45, "num_lines": 13, "path": "/setup.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nsetup(\n\tname='irlib',\n\tversion='0.1.1',\n\tauthor='Tarek Amr',\n\tauthor_email='[email protected]',\n url='https://github.com/gr33ndata/irlib',\n\tpackages=['irlib'],\n\tlicense='LICENSE.txt',\n\tdescription='Inforamtion Retrieval Library',\n\tlong_description=open('README.rst').read()\n)\n" }, { "alpha_fraction": 0.5694080591201782, "alphanum_fraction": 0.5725510716438293, "avg_line_length": 30.26229476928711, "blob_id": "365d3c1975f95c7a28d985fd769f5902e7ec4c6c", "content_id": "b5ff6db40e754951bd9867fb46a6dea1a8daddb0", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1909, "license_type": "permissive", "max_line_length": 68, "num_lines": 61, "path": "/tests/TestEvaluation.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom irlib.evaluation import Evaluation\n\nclass TestEvaluation(TestCase):\n\n def setUp(self):\n pass\n\n def test_correct_label_list(self):\n e = Evaluation()\n e.ev('Apples', 'Oranges')\n e.ev('Melons', 'Bananas')\n expected_labels = ['Apples', 'Oranges', 'Melons', 'Bananas']\n returned_labels = e.get_classes_labels()\n self.assertItemsEqual(returned_labels, expected_labels)\n\n def test_correct_overall_accuracy(self):\n e = Evaluation()\n e.ev('Apples' , 'Oranges')\n e.ev('Oranges', 'Oranges')\n e.ev('Apples' , 'Apples')\n e.ev('Oranges', 'Apples')\n expected_accuracy = 0.5\n returned_accuracy = e.overall_accuracy(percent=False)\n self.assertEqual(returned_accuracy, expected_accuracy)\n\n def test_correct_overall_fp(self):\n e = Evaluation()\n e.ev('Apples' , 'Oranges')\n e.ev('Apples' , 'Bananas')\n e.ev('Apples' , 'Apples')\n expected_fp = 2\n returned_fp = e.fp('Apples')\n self.assertEqual(returned_fp, expected_fp)\n\n def test_correct_overall_tp(self):\n e = Evaluation()\n e.ev('Apples' , 'Oranges')\n e.ev('Apples' , 'Apples')\n e.ev('Apples' , 'Apples')\n expected_tp = 2\n returned_tp = e.tp('Apples')\n self.assertEqual(returned_tp, expected_tp)\n\n def test_correct_overall_fn(self):\n e = Evaluation()\n e.ev('Apples' , 'Oranges')\n e.ev('Bananas', 'Apples')\n e.ev('Apples' , 'Apples')\n expected_fn = 1\n returned_fn = e.fn('Apples')\n self.assertEqual(returned_fn, expected_fn)\n\n def test_correct_overall_tn(self):\n e = Evaluation()\n e.ev('Apples' , 'Oranges')\n e.ev('Apples' , 'Apples')\n expected_tn = 0\n returned_tn = e.tn('Apples')\n self.assertEqual(returned_tn, expected_tn)\n\n\n" }, { "alpha_fraction": 0.76998370885849, "alphanum_fraction": 0.76998370885849, "avg_line_length": 39.86666488647461, "blob_id": "c226104da446861e6a4030df0405f161a2b04b83", "content_id": "d8a4061ef71256f32deb20256477ca3411e390b7", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 613, "license_type": "permissive", "max_line_length": 86, "num_lines": 15, "path": "/examples/turing chat/README.rst", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "Question and Answer\n===================\n\nqa.py is a *very* simple implementation of a computer-based question & answer program,\na la Alan Turing tests.\n\nIt basically reads a list of questions and answers from qa.txt,\nthen it uses IR/Euclidean distance to search for the best matching question,\nand returns the answer for the retrieved question.\n\nI've noticed the Euclidean distance performs much better than cosine distance,\nsince we are searching in very short documents (questions).\n\nI do not think we need to convert our vecotr space into TF.IDF, \nonce more, beacuase we are searching in very short documents.\n" }, { "alpha_fraction": 0.49246013164520264, "alphanum_fraction": 0.4993537366390228, "avg_line_length": 25.976743698120117, "blob_id": "37c0a25d758f9a9199114c50a359319a1c4fdb8c", "content_id": "bcecd3604dfdb9f5adf2abf3cf762a324abfad32", "detected_licenses": [ "MIT", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2321, "license_type": "permissive", "max_line_length": 73, "num_lines": 86, "path": "/examples/turing chat/qa.py", "repo_name": "gr33ndata/irlib", "src_encoding": "UTF-8", "text": "# Using IR to answer your question\n# Not so smart question and answer system\n\nimport os, sys\nimport random\n\n# Adding this to path to be able to import irlib\nsys.path.append('../../')\n\nfrom irlib.preprocessor import Preprocessor\nfrom irlib.matrix import Matrix\nfrom irlib.metrics import Metrics\n\n#qa_list = {'id':{'q': 'question', 'a': 'answer')}\n\nclass QA:\n \n def __init__(self):\n self.file_name = 'qa.txt'\n self.qa_list = {}\n self.qa_id = 0\n self.prep = Preprocessor()\n self.mx = Matrix()\n self.metric = Metrics()\n \n def randomize(self, a):\n for i in range(len(a)):\n a[i] = random.randint(0,1)\n\n def readfile(self):\n\n fd = open(self.file_name,'r')\n for line in fd.readlines():\n line = line.strip().lower().split(':')\n if len(line) != 2: \n continue\n elif line[0] == 'q':\n q_line = ' '.join(line[1:])\n self.qa_id += 1\n self.qa_list[self.qa_id] = {'q': q_line, 'a': ''}\n terms = self.prep.ngram_tokenizer(text=q_line)\n self.mx.add_doc(doc_id=self.qa_id, doc_terms=terms, \n frequency=True, do_padding=True)\n elif line[0] == 'a': \n a_line = ' '.join(line[1:])\n self.qa_list[self.qa_id]['a'] = a_line\n \n #print 'Number of read questions and answers:', len(self.mx.docs)\n #print 'Number of read terms', len(self.mx.terms)\n \n def ask(self, q=''):\n\n q_id = 0\n q_distance = 99999\n\n terms = self.prep.ngram_tokenizer(text=q)\n q_vector = self.mx.query_to_vector(terms, frequency=False)\n\n if sum(q_vector) == 0:\n self.randomize(q_vector)\n\n for doc in self.mx.docs:\n distance = self.metric.euclid_vectors(doc['terms'], q_vector)\n if distance < q_distance:\n q_distance = distance\n q_id = doc['id']\n \n print 'Tarek:', self.qa_list[q_id]['a']\n \ndef main():\n\n qa = QA()\n qa.readfile()\n\n while True:\n q = raw_input(\"\\nAsk me something: \")\n q = q.strip()\n if not q:\n return\n else:\n qa.ask(q=q)\n\n \n \nif __name__ == \"__main__\":\n main()\n\n" } ]
18
arjun-rao/Hand_Motion_Recognizer
https://github.com/arjun-rao/Hand_Motion_Recognizer
c98a6351c1b1b32e2a708f1d3996368e22653fc9
0485e7123e9f6bcaffa64fba10181c7977de6fc3
9ae7fd91152381d8d4a6bd1e099607dee5aadf44
refs/heads/master
2022-04-27T01:26:42.297764
2020-04-27T19:06:00
2020-04-27T19:06:00
259,213,090
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8050941228866577, "alphanum_fraction": 0.8062015771865845, "avg_line_length": 99.44444274902344, "blob_id": "3e20fff6661f4ea92360ae4f17c49786fd27e2b0", "content_id": "182c99bfc2bf72b123b1618b2709d07a1253f0e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 907, "license_type": "no_license", "max_line_length": 480, "num_lines": 9, "path": "/Readme.md", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "# Hand Movement Recognizer\n\nIn this project, we'll make a glove that can recognize some basic hand movements, using a ​MicroBit, and a few sensors. We'll be using the Bluetooth capabilities on the MicroBit, in conjunction with an Android App and an Web Server to train a machine learning model to identify hand movements.\n\n## Getting Started\n\nA majority of the effort involved in this project is on the software side, and all the code needed to run this project is available in this repository. The code base involves 3 components, the code to generate a HEX file for the MicroBit, the Android App codebase​ which is heavily based on the MicroBit Foundation's MicroBit Blue app, with modifications made for this specific use case, and a web server with code for training a Tensorflow based model to identify hand movements.\n\nInstructions for building the hardware component is available on instructable." }, { "alpha_fraction": 0.6336032152175903, "alphanum_fraction": 0.7044534683227539, "avg_line_length": 19.375, "blob_id": "a357564ef07a9be4bd760558cf139292bf19859c", "content_id": "3138201b0d1bfcb0261fcc3a8ae7fb2eea0163f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 494, "license_type": "no_license", "max_line_length": 149, "num_lines": 24, "path": "/gesture_server/README.md", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "# Hand Movement Recognition\n Classify hand movements from a BBC Micro:bit by analysing the x, y and z plane acceleration readings and sensor values from a Force and Flex Sensor.\n\nTo be used in conjunction with the Android App.\n\n## Installation\n**Software Requirements**\n- Python 3.6.7\n- Keras 2.2.4\n- Tensorflow 1.13.1\n- Microfs 1.3.1\n- Mu-Editor 1.0.2\n- Pyserial 3.4\n- Numpy 1.16.2\n- Pandas 0.24.1\n- Matplotlib 2.2.2\n- Scikit-learn 0.20.\n- Flask 1.1.2\n\n## Usage\n\n```\npython server.py\n```\n\n\n\n\n\n" }, { "alpha_fraction": 0.5214120149612427, "alphanum_fraction": 0.5277777910232544, "avg_line_length": 42.20000076293945, "blob_id": "eee83bd154b17dfed4bc1b7715b660905abd7698", "content_id": "61a987a51a951436e6a389dc70d25603462f4ab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "no_license", "max_line_length": 138, "num_lines": 40, "path": "/gesture_server/src/Read Microbit.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "from microfs import ls, get, rm\nfrom datetime import datetime\nimport sys\nimport os\nimport time\n\noutput_extension = \".csv\"\nTITLE = (\"\\n\\t\\t\\t\\t\\t#################\"\n \"\\n\\t\\t\\t\\t\\t# Read Microbit #\"\n \"\\n\\t\\t\\t\\t\\t#################\\n\")\n\ndef main():\n print(TITLE)\n csv_files = [file for file in ls() if file.endswith(output_extension)] # Load all csv file names from the Microbit\n if not csv_files:\n print(\" No '{}' files to copy!\".format(output_extension))\n return 0\n name = input(\" Enter target name: \")\n name = name.capitalize() if name else \"Unnamed\"\n # Create sub-directory in RAW_Data directory\n dir_name = os.path.join(\"RAW_Data\", \"{} {}\".format(name, datetime.now().strftime(\"%d %m %Y %H-%M\")))\n os.makedirs(dir_name, exist_ok=True) # Make the sub-directory if it doesn't exist\n print()\n for i, file in enumerate(csv_files):\n print(\" Progress: {:<50} ({}/{})\".format(\"█\"*int(50*(i+1)/len(csv_files)), i+1, len(csv_files)), end=\"\\r\")\n f_name = \"{}{}{}\".format(name, i, output_extension) # Prepare file name at destination directory\n get(file, os.path.join(dir_name, f_name)) # Copy file from Microbit to given directory as f_name\n time.sleep(1)\n rm(file)\n time.sleep(1) # Remove file from Microbit\n print(\"\\n\\n {} files moved to '{}'\".format(i+1, dir_name))\n return 0\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as ex:\n print(\"\\n ERROR: {}\".format(ex.args))\n finally:\n input(\" Press Enter to exit\")\n" }, { "alpha_fraction": 0.7497348785400391, "alphanum_fraction": 0.7497348785400391, "avg_line_length": 35.230770111083984, "blob_id": "df928c408c40043bd0532b3bd11a1636def7306a", "content_id": "3f9d87c2459b5a05112e9b01169951a9750c2eaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 943, "license_type": "permissive", "max_line_length": 209, "num_lines": 26, "path": "/gesture-recognizer-microbit/README.md", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "# Gesture-Recognizer\n\nAuthor: Arjun Rao\n\nYou must install yotta compiler tool chain to compile this. Follow the instructions for Yotta installation from the build enviroment section below.\n\nThis project contains the source code for generating the HEX file for the microbit.\n\n```\nyt clean\nyt build\n```\n\nThe HEX file for you micro:bit with then be generated and stored in build\\bbc-microbit-classic-gcc\\source\\gesture-recognizer-combined.hex\n\n\n## Links\n\n[micro:bit runtime docs](http://lancaster-university.github.io/microbit-docs/) | [microbit-dal](https://github.com/lancaster-university/microbit-dal) | [uBit](https://github.com/lancaster-university/microbit)\n\n## Build Environments\n\n| Build Environment | Documentation |\n| ------------- |-------------|\n| ARM mbed online | http://lancaster-university.github.io/microbit-docs/online-toolchains/#mbed |\n| yotta | http://lancaster-university.github.io/microbit-docs/offline-toolchains/#yotta |\n\n" }, { "alpha_fraction": 0.4393787682056427, "alphanum_fraction": 0.47545090317726135, "avg_line_length": 36.31775665283203, "blob_id": "561e1595f89e6efd4a10b490c973017fe9c7dde3", "content_id": "275c6b4261cfd61e5e12106fbb41a5ccdcea2825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3992, "license_type": "no_license", "max_line_length": 123, "num_lines": 107, "path": "/gesture_server/src/Collect Data.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "from microbit import display, Image, sleep, button_a, button_b, running_time, accelerometer\nfrom micropython import const\n# States\nGET_DATA_NUM = const(0)\nREADY_TO_SAMPLE = const(1)\nSAMPLE_DATA = const(2)\nEXIT = const(-1)\n# Global constants\nCHANNEL = const(13)\nMAX_FILES_NUM = const(12)\nMAX_ATTEMPT = const(3)\nSAMPLE_PERIOD = const(10) # Sample period in ms\nSAMPLE_DURATION = const(3000) # Total sample time in ms\nACK = const(\"0\")\nNACK = const(\"1\")\n# LED Numbers; 3, 2, 1\nNUM_IMGS = const([Image(\"90909:90909:90909:99999:99999\"),\n Image(\"99909:90909:90909:90909:90999\"),\n Image(\"00000:00090:99999:99999:00000\")])\n\ndef countdown(t=3):\n \"\"\"\n Display countdown animation of given second(s)\n @param t : Time (in seconds)\n @returns : Nothing\n \"\"\"\n max_t = len(NUM_IMGS)\n if (t > max_t):\n t = max_t\n elif (t < 1):\n t = 1\n display.show(NUM_IMGS[-t:], loop=False, delay=1000)\n\ndef setPixelTill(cx, cy, value=9):\n \"\"\"\n Sets all pixel from (0, 0) till the given (x, y) coordinate to given value\n @param cx : X-coordiante of the final pixel\n @param cy : Y-coordinate of the final pixel\n @param value : Value to set for the pixels\n @returns : Nothing\n \"\"\"\n for y in range(5):\n for x in range(5):\n display.set_pixel(x, y, value)\n if x==cx and y==cy:\n return\n\ndef main():\n data_sent = 0\n state = GET_DATA_NUM\n while True:\n # State 0\n if state == GET_DATA_NUM:\n data_num = 1\n attempt = 0\n while True:\n cy, cx = divmod(data_num-1, 5) # Cursor x and y depending on the data_num\n display.set_pixel(cx, cy, 9)\n if button_a.is_pressed() and button_b.is_pressed():\n state = READY_TO_SAMPLE # TODO: Change state to some other state\n data_sent = 0\n sleep(500)\n break\n elif button_a.is_pressed():\n if data_num > 1:\n display.set_pixel(cx, cy, 0) # Clear LED pixel if data_num > 1\n data_num = data_num - 1 if (data_num > 1) else 1\n elif button_b.is_pressed():\n data_num = data_num + 1 if (data_num < MAX_FILES_NUM) else MAX_FILES_NUM\n sleep(200)\n # State 1\n elif state == READY_TO_SAMPLE:\n while True:\n if button_a.is_pressed():\n state = SAMPLE_DATA\n break\n elif button_b.is_pressed():\n display.clear()\n cy, cx = divmod(data_num-data_sent-1, 5)\n setPixelTill(cx, cy, 9)\n else:\n display.show(Image.ARROW_W)\n sleep(200)\n # State 2\n elif state == SAMPLE_DATA:\n countdown(3)\n display.show(Image.TARGET)\n with open(\"file_{}.csv\".format(data_sent), \"w\") as data_file:\n data_file.write(\"x,y,z\\n\")\n initial_time = running_time()\n while (running_time()-initial_time) < SAMPLE_DURATION:\n t0 = running_time()\n data_file.write(\"{},{},{}\\n\".format(*accelerometer.get_values()))\n t_diff = running_time()-t0\n sleep(0 if (SAMPLE_PERIOD-t_diff)<0 else SAMPLE_PERIOD-t_diff)\n data_sent += 1\n if (data_num-data_sent)>0:\n state = READY_TO_SAMPLE\n else:\n state = EXIT\n # State 3\n elif state == EXIT:\n display.show(Image.HAPPY)\n break\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6090957522392273, "alphanum_fraction": 0.6149353981018066, "avg_line_length": 35.701297760009766, "blob_id": "1994d87d00f6c8a74c0cc4baff16beb3b4194c67", "content_id": "b61beb370cca5b186adf78b67f991fa86b6acb19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5651, "license_type": "no_license", "max_line_length": 116, "num_lines": 154, "path": "/gesture_server/server.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "from flask import Flask, request, redirect, jsonify, render_template\nimport os\nimport json\nimport csv\nfrom collections import defaultdict\nimport numpy as np\nimport traceback\nfrom threading import Thread\nimport pickle\nimport tensorflow as tf\nimport keras\n\nimport ml_model\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n\nevents = defaultdict(int)\npreds = defaultdict(int)\nmodel = None\nfirstLoad = False\nmode_file_path = None\nlabel_encoder = None\nfeature_names = ['x', 'y', 'z', 'p', 'r', 'f', 'flx']\nmax_review_length = 120\nbackground_thread = None\nsession = tf.Session(graph=tf.Graph())\n\ndef try_loading_models():\n global model, label_encoder, mode_file_path\n mode_file_path = os.path.join(\"Models\", \"latest.HDF5\")\n with session.graph.as_default():\n keras.backend.set_session(session)\n if (model is None or firstLoad==False) and os.path.isfile(mode_file_path):\n print('Loading saved model')\n model = ml_model.load_trained_model(mode_file_path)\n label_encoder = pickle.load(open('./Models/latest_le.pkl', 'rb'))\n return model, label_encoder\n return model, label_encoder\n\ndef train_model():\n global model, mode_file_path, label_encoder, background_thread\n with session.graph.as_default():\n keras.backend.set_session(session)\n try:\n x_train, y_train, f = ml_model.get_train_data(events, feature_names, max_review_length)\n x_train, y_train = ml_model.shuffle_train_data(x_train, y_train, len(feature_names))\n labels_train, le = ml_model.encode_labels(y_train)\n NUM_SAMPLES = len(labels_train)\n NUM_LABELS = len(np.unique(y_train))\n data_train = ml_model.parse_train_data(x_train, NUM_SAMPLES, max_review_length)\n print(f'Training model for dataset: {data_train.shape}')\n print(f'Labels: {np.unique(y_train)}')\n model_binary, model_path = ml_model.train_model(data_train, labels_train, max_review_length, NUM_LABELS)\n model = model_binary\n mode_file_path = model_path\n label_encoder = le\n pickle.dump(label_encoder, open('./Models/latest_le.pkl', 'wb'))\n json.dump(events, open('events.json', 'w'))\n print(\"Training Complete\")\n return True\n except:\n traceback.print_exc()\n return False\n\n\ndef predict(fname):\n global model, mode_file_path, label_encoder, firstLoad\n with session.graph.as_default():\n keras.backend.set_session(session)\n if not firstLoad:\n model, label_encoder = try_loading_models()\n firstLoad = True\n if not os.path.isfile(mode_file_path):\n if background_thread is not None:\n background_thread.join()\n if model is not None:\n if label_encoder is None:\n label_encoder = pickle.load(open('./Models/latest_le.pkl', 'rb'))\n try:\n result = ml_model.predict_from_file(fname, model, feature_names, max_review_length, label_encoder)\n except:\n try_loading_models()\n result = ml_model.predict_from_file(fname, model, feature_names, max_review_length, label_encoder)\n print(result)\n return result\n print('Model Not Trained Yet')\n return ''\n\[email protected]('/api/upload_train', methods = ['POST'])\ndef upload_train():\n body = request.form.to_dict()\n g_id = body['g_id']\n data = json.loads(body['data'])\n print(f'Received data for training: {g_id}')\n with open(f'./RAW_data/{g_id}_{events[g_id]}.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow(feature_names)\n writer.writerows(data)\n events[g_id] += 1\n return jsonify({'status' : f'OK:{events[g_id]}'}), 200\n\n\[email protected]('/api/start_train', methods = ['POST'])\ndef start_train():\n global background_thread\n print('Received train request')\n\n if len(events) != 0:\n if background_thread is not None and background_thread.isAlive():\n return jsonify({'status' : f'Training request in progress...'}), 200\n background_thread = Thread(target=train_model)\n background_thread.start()\n # background_thread.join()\n # train_model()\n return jsonify({'status' : f'New Training request received'}), 200\n else:\n return jsonify({'status' : f'Not enough data to train'}), 200\n\[email protected]('/api/get_prediction', methods = ['POST'])\ndef get_prediction():\n body = request.form.to_dict()\n g_id = body['g_id']\n data = json.loads(body['data'])\n print(f'Received data for prediction: {g_id}')\n file_name = f'./RAW_data/predict/{g_id}_{preds[g_id]}.csv'\n with open(file_name, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(feature_names)\n writer.writerows(data)\n preds[g_id] += 1\n if background_thread is not None and background_thread.is_alive():\n background_thread.join()\n result = predict(file_name)\n if result != '':\n return jsonify({'status' : f'{result}'}), 200\n else:\n return jsonify({'status' : f'No Model Trained For Prediction'}), 200\n\n\[email protected]('/')\ndef index():\n user_agent = request.headers.get('User-Agent')\n return '<p>Your browser is %s</p>' % user_agent\n\nif __name__ == '__main__':\n if os.path.isfile('events.json'):\n event_list = json.load(open('events.json', 'r'))\n for key, value in event_list.items():\n events[key] = value\n print(f'Data: {events}')\n try_loading_models()\n app.run(host = '0.0.0.0', port = 5001, threaded=False)" }, { "alpha_fraction": 0.6235277056694031, "alphanum_fraction": 0.6286986470222473, "avg_line_length": 43.06962203979492, "blob_id": "1363328136335f4ce8d290e929533fe247415a35", "content_id": "4edbcd8c9bfb89bdb99671a64afffb23e57c2677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6962, "license_type": "no_license", "max_line_length": 146, "num_lines": 158, "path": "/gesture_server/src/Utils.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "import os.path\nimport numpy as np\n\nLABELS = (\"file\", \"down\")\n\ndef getLabel(file_name):\n \"\"\"\n Returns the label index from the file name\n @param file_name : Name of file with acceleration data\n @return : Integer value: -1 for invalid file_name, else 0-4 for different gestures\n \"\"\"\n file_name = os.path.split(file_name)[-1].lower()\n for i, label in enumerate(LABELS):\n if file_name.startswith(label):\n return i\n return -1\n\ndef normalize(data):\n \"\"\"\n Normalize a 1D numpy array to the value range 0-1\n @param data : 1D numpy array with values\n @return : 1D numpy array with normalized values\n \"\"\"\n min_val, max_val = min(data), max(data)\n return (data - min_val)/(max_val - min_val)\n\ndef movingAvg(data, window=5):\n \"\"\"\n Moving average function for smoothing the data with given window width\n @param data : 1D array\n @param window : Window size for calculating average\n @return : Smooth 1D numpy array\n \"\"\"\n return np.array([np.ma.average(data[i : i+window]) for i in range(len(data))])\n\ndef getFileNames(dataDir, extension=\".csv\"):\n \"\"\"\n Returns the list of files of given format from the given directory and all of its subdirectories using recursion\n @param dataDir : Root direcory path to search for files\n @param extension : Extension of the file format\n @return : List of file names (with subdirectory addresses)\n \"\"\"\n file_names = []\n for name in os.listdir(dataDir):\n abs_path = os.path.join(dataDir, name) # Absolute address of current file/directory\n if os.path.isdir(abs_path):\n file_names += getFileNames(abs_path, extension=extension)\n elif abs_path.endswith(extension):\n file_names.append(abs_path)\n return file_names\n\ndef pad_constant(array, max_length, pad_pos=\"end\"):\n \"\"\"\n Pad given array with a constant value to achieve given maximum length\n @param array : Array to pad\n @param max_length : Maximum length of the array after padding\n @param pad_pos : \"start\" - Pad at the beginning with initial value\n \"end\" - Pad at the end with ending value\n @return : Padded array\n \"\"\"\n pad_pos = pad_pos.lower()\n padLen = max_length-len(array) if (len(array) < max_length) else 0\n if pad_pos == \"start\":\n padValue = array[0]\n padWidth = (padLen, 0)\n else:\n padValue = array[-1]\n padWidth = (0, padLen)\n array = np.pad(array, padWidth, mode=\"constant\", constant_values=padValue)\n return array\n\ndef execute_layers(inputs, layers):\n \"\"\"\n Computes each input to all given layers and returns their outputs in a list\n @param inputs : List of inputs to be passed through the given layer(s)\n @param layers : List of layers though which each input will be passed\n @return : List of output generated from each input\n \"\"\"\n outputs = []\n for _input in inputs:\n _output = _input\n for layer in layers:\n _output = layer(_output)\n outputs.append(_output)\n return outputs\n\ndef load_processed_data(f_name, review_length, pad_pos=\"end\", train_ratio=0.75):\n \"\"\"\n Loads acceleration data from processed file\n @param f_name : File name\n @para review_length : Max number of sample points for acceleration of each axis; padding length\n @param pad_pos : \"start\" - Pad at the beginning with initial value\n \"end\" - Pad at the end with ending value\n @param train_ratio : Percentage of total data to use for training\n @return : (train data, train labels, test data, test labels) where data = acceleration x, y, z and labels = one-hot-encoded labels\n \"\"\"\n from keras.utils import to_categorical\n # Load the csv file, extract data and labels from it and format them\n with open(f_name, \"r\") as csv_file:\n data = [[], [], []]\n labels = []\n for row in csv_file:\n _data = list(map(float, row.split(\",\"))) # Load acceleration reading as float\n _data, _label = np.array(_data[: -1]), _data[-1]\n _data = uniform_split(_data, parts=3) # Split acceleration data into 3 equal parts\n # Pad axis readings and reshape them\n for i, axis_array in enumerate(_data):\n axis_array = pad_constant(axis_array, max_length=review_length, pad_pos=pad_pos)\n axis_array = np.reshape(axis_array, (1, axis_array.shape[0]))\n data[i].append(axis_array)\n labels.append(_label)\n # Split the data and labels for training and testing\n label_counter = {label:0 for label in set(labels)} # Counter for each label saved for training; default count value is zero\n train_amount = round(train_ratio*len(labels)/len(set(labels))) # Amount of training data\n train_indexes = np.zeros(len(labels)) # Index value of 1 refers to training data\n for i, label in enumerate(labels):\n if label_counter[label] < train_amount:\n label_counter[label] += 1\n train_indexes[i] = 1\n if all(count>=train_amount for count in label_counter.values()):\n # All labels have required amount of training data\n break\n train_indexes = (train_indexes == 1)\n data = np.array(data)\n labels = to_categorical(labels)\n return data[:,train_indexes], labels[train_indexes], data[:,~train_indexes], labels[~train_indexes]\n\ndef uniform_split(array, parts=1):\n \"\"\"\n Splits a given array into given equal parts\n @param array : Array to split\n @param parts : Number of parts the array should be splitted to; (0 < parts <= length of array)\n @return : Splitted array (number of items = parts)\n \"\"\"\n ARRAY_LENGTH = len(array)\n assert parts <= ARRAY_LENGTH, \"Number of parts exceedes array langth; should be max {}, but got {}\".format(ARRAY_LENGTH, parts)\n if parts > 1:\n interval = round(ARRAY_LENGTH/parts)\n array = np.split(array, range(interval, ARRAY_LENGTH, interval))[: parts]\n return array\n\ndef load_raw_data(dataframe, cols=None, movingAvgWindow=5, normalizeData=True):\n \"\"\"\n Load raw data from given pandas.DataFrame object\n @param dataframe : Pandas dataframe with data\n @param cols : Columns of the csv file to load\n @param movingAvgWindow : Window width for the moving average\n @param normalizeData : True to normalize data\n @return : Numpy array after smoothing (and normalizing) the raw data\n \"\"\"\n if cols is None:\n cols = (\"x\",\"y\",\"z\")\n # Perform columnwise moving window average on given columns\n data = np.array([movingAvg(dataframe[col], window=movingAvgWindow) for col in cols])\n data = data.flatten()\n if normalizeData:\n data = normalize(data)\n return data" }, { "alpha_fraction": 0.619385838508606, "alphanum_fraction": 0.6494672894477844, "avg_line_length": 33.446044921875, "blob_id": "da2631378a96aa42def975f5ab98b0d03ccd340e", "content_id": "2237982066e26e09e46d18e24253d53aeb63d642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4787, "license_type": "no_license", "max_line_length": 116, "num_lines": 139, "path": "/gesture_server/src/Data Sender.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "from microbit import display, Image, sleep, button_a, button_b, running_time, accelerometer\nfrom micropython import const\nimport radio\n# States\nREADY = const(1)\nSAMPLE_DATA = const(2)\nSEND_DATA = const(3)\nEXIT = const(-1)\n# Global constants\nCHANNEL = const(25)\nMAX_ATTEMPT = const(1)\nSAMPLE_INTERVAL = const(10) # Sample period in ms\nSAMPLE_DURATION = const(1500) # Total sample time in ms\nACK = const(b\"0\")\nNACK = const(b\"1\")\n# LED Numbers (3,2,1) rotated 90 degrees clockwise\nNUM_IMGS = const([Image(\"90909:90909:90909:99999:99999\"),\n\t\t\t\t Image(\"99909:90909:90909:90909:90999\"),\n\t\t\t\t Image(\"00000:00090:99999:99999:00000\")])\n\ndef retry(attempts=MAX_ATTEMPT, retry_interval=100, valid_responses=None, invalid_responses=None, invalid_msg=None):\n\t\"\"\"\n\tDecorator to call a function for multiple times depending on its return value\n\t@param attempts\t\t : Maximum number of attempts\n\t@param retry_interval : Wait time (in milliseconds) between each retry\n\t@param valid_responses : Valid responses as list/tuple\n\t@param invalid_responses: Invalid responses as list/tuple\n\t@param invalid_msg : Message to display when invalid response received\n\t@returns : Decorator function\n\t\"\"\"\n\tdef decorator(func):\n\t\tdef wrapper(*args, **kwargs):\n\t\t\tfor attempt in range(attempts):\n\t\t\t\tres = func(*args, **kwargs)\t\t\t\t\t\t\t\t\t# Call the function\n\t\t\t\tif valid_responses is None and invalid_responses is None:\n\t\t\t\t\treturn res \t\t\t\t\t\t\t\t\t\t\t\t# Return reponse\n\t\t\t\telse:\n\t\t\t\t\tif valid_responses is not None and res in valid_responses:\n\t\t\t\t\t\treturn res \t\t\t\t\t\t\t\t\t\t\t# Return received valid response\n\t\t\t\t\telif invalid_responses is not None and res not in invalid_responses:\n\t\t\t\t\t\treturn res \t\t\t\t\t\t\t\t\t\t\t# Return received response which was not invalid\n\t\t\t\t\tif invalid_msg is not None:\n\t\t\t\t\t\tprint(invalid_msg)\n\t\t\t\tsleep(retry_interval)\n\t\t\treturn res\n\t\treturn wrapper\n\treturn decorator\n\ndef countdown(t=3):\n\t\"\"\"\n\tDisplay countdown animation of given second(s)\n\t@param t : Time (in seconds)\n\t@returns : Nothing\n\t\"\"\"\n\tmax_t = len(NUM_IMGS)\n\tif (t > max_t):\n\t\tt = max_t\n\telif (t <= 0):\n\t\tt = 1\n\tdisplay.show(NUM_IMGS[-t:], loop=False, delay=1000)\n\ndef waitForACK(timer=2000, pooling_interval=10):\n\t\"\"\"\n\tWaits for acknowledgement for given amount of time\n\t@param timer : Maximum waiting time for the acknowledgement\n\t@param pooling_interval : Interval to check for acknowledgement\n\t@returns : Received acknowledgement\n\t\t\t\t\t\t\t NACK for timeout\n\t\"\"\"\n\tinitial_time = running_time()\n\twhile (running_time() - initial_time) < timer:\n\t\tresponse = radio.receive()\n\t\tif response is not None:\n\t\t\treturn response\n\t\tsleep(pooling_interval)\n\treturn NACK\n\n@retry(attempts=MAX_ATTEMPT, valid_responses=(ACK,), invalid_responses=(NACK,))\ndef sendMsg(data, wait_time=1000, pooling_interval=10):\n\t\"\"\"\n\tSends given data and waits for acknowledgement\n\t@param data : Data as string or bytes\n\t@param wait_time : Maximum waiting time for the acknowledgement\n\t@param pooling_interval : Interval to check for acknowledgement\n\t@returns : Received acknowledgement\n\t\"\"\"\n\tradio.send(data)\n\tresponse = waitForACK(timer=wait_time, pooling_interval=pooling_interval)\n\treturn response\n\ndef main():\n\tstate = READY\n\twhile True:\n\t\t# Ready state\n\t\tif state == READY:\n\t\t\twhile True:\n\t\t\t\tif button_a.is_pressed():\t\t\t\n\t\t\t\t\tstate = SAMPLE_DATA\n\t\t\t\t\tbreak\n\t\t\t\telif button_b.is_pressed():\n\t\t\t\t\tradio.on()\n\t\t\t\t\tradio.send(\"done\")\n\t\t\t\t\tradio.off()\n\t\t\t\telse:\n\t\t\t\t\tdisplay.show(Image.ARROW_W)\n\t\t\t\tsleep(100)\n\t\t# Sample Data state\n\t\telif state == SAMPLE_DATA:\n\t\t\tdata_sent = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Reset data sent value\n\t\t\tcountdown(3)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Show countdown on the Microbit LED\t\n\t\t\tdisplay.show(Image.TARGET)\n\t\t\tradio.on()\n\t\t\tinitial_time = running_time()\n\t\t\twhile (running_time()-initial_time) < SAMPLE_DURATION:\n\t\t\t\tt0 = running_time()\n\t\t\t\tif data_sent == 0:\t\t\t\t\t\t\t\t\t\t\t\t\t# Turn off all Microbit LEDs\n\t\t\t\t\tdisplay.clear()\n\t\t\t\tcx, cy = divmod(data_sent, 5)\t\t\t\t\t\t\t\t\t\t# Get current LED pixel coordinate of the BBC Microbit\n\t\t\t\tradio.send(str(accelerometer.get_values()))\n\t\t\t\tdisplay.set_pixel(4-cx, cy, 9)\n\t\t\t\tdata_sent = 0 if data_sent >= 24 else data_sent+1\t\t\t\t\t# Increase and limit data_sent value within 0-24 range\n\t\t\t\twait_t = SAMPLE_INTERVAL-(running_time()-t0)\t\t\t\t\t\t# Time till next sample\n\t\t\t\tif (wait_t > 0):\n\t\t\t\t\tsleep(wait_t)\n\t\t\tradio.send(\"done\")\n\t\t\tradio.off()\n\t\t\tstate = READY\n\t\t# Exit state\n\t\telif state == EXIT:\n\t\t\tdisplay.show(Image.HAPPY)\n\t\t\treturn 0\n\t\tsleep(100)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tradio.config(channel=CHANNEL, data_rate=radio.RATE_2MBIT, power=7, queue=20)\n\t\tmain()\n\tfinally:\n\t\tradio.off()" }, { "alpha_fraction": 0.7456262707710266, "alphanum_fraction": 0.7607988715171814, "avg_line_length": 46.94696807861328, "blob_id": "8ac194f8b33bd39f5475f2a4ba2652d30643ed1a", "content_id": "25e2b05a4e738843f077a05efbcf327edb22a446", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6459, "license_type": "permissive", "max_line_length": 624, "num_lines": 132, "path": "/microbit-blue/README.md", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "# micro:bit Blue\r\n\r\n## Version: 1.5.4\r\n\r\n\r\n * Author: Martin Woolley\r\n * Twitter: @bluetooth_mdw\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n * http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n\r\n\r\n__Description:__ \r\n\r\nThis application contains a series of demonstrations which use the BBC micro:bit Bluetooth profile in various ways. It's purpose is both to act as a demo and to provide a source of example code which shows how to use the Bluetooth profile from Android. \r\n\r\n__Instructions:__<br>\r\n\r\nInstall the latest apk file from the releases folder. Note that the application requires at least version 2.0.0 of the micro:bit run time and the smartphone or tablet should be paired with the micro:bit before the application can be used. \r\n\r\n__Requirements:__<br>\r\n\r\nThe application should work on Android version 4.4 or later. Version 5 or later is recommended however. Limited testing has been carried out across Android versions however so please report any issues found.\r\n\r\n__Known Issues:__\r\n\r\n1. Needs a demo of analogue output\r\n2. Needs a demo of digital input\r\n3. Needs a demo of analogue input\r\n4. Sometimes the digital output demo is unable to determine the initial state of pin 0\r\n5. Animal Vegetable Mineral demo: If you disconnect from the micro:bit then reconnect you will not receive indication messages from the micro:bit any more. Reset the micro:bit and reconnect to solve this. Awaiting new micro:bit API to avoid the underlying issue. \r\n\r\n__Version History:__\r\n\r\n__1.5.4__\r\n- It's now possible to switch off data smoothing in the Accelerometer screen to see the effect it has\r\n\r\n__1.5.3__\r\n- Bug fix: out of range x/y coordinates sometimes broke touch processing in game controller.\r\n\r\n__1.5.2__\r\n- Bug fix: bug processing touch events in game controller caused rare crashes.\r\n\r\n__1.5.1__\r\n- Bug fix: application crashed when launched if Bluetooth was not already switched on\r\n\r\n__1.5.0__\r\n- Added further information to the main help screen\r\n\r\n__1.4.9__\r\n- Changed application name to micro:bit Blue\r\n\r\n__1.4.8__\r\n- Added the Trivia Scoreboard demo\r\n\r\n__1.4.7__\r\n- Swapped UART TX and RX UUIDs to be in line with Nordic Semiconductor official documentation. Note that this was correct with respect to earlier versions of the micro:bit runtime code. An underlying issue regarding the UART service, its characteristics and their properties has since been fixed and so the Animal Vegetable Mineral demo will only work with a hex file build from this later code base. The fix in question went into github master on 25th May 2016.\r\n\r\n__1.4.6__\r\n- Added Refresh Services menu item to the demo Menu Activity. GATT services (and characteristics and descriptors) discovered when connecting to a micro:bit are cached by Android. There's currently no way to tell Android that the GATT services 9etc) have changed because your micro:bit code has changed and so this menu item lets you manually initiate flushing of the cache and a full service (re)discovery process. If you get an error message saying a demo cannot be used because your micro:bit doesn't have a required Bluetooth service and you are 100% convinced it does have it, try using the Refresh Services menu option.\r\n\r\n__1.4.5__\r\n- Added UART service Animal Vegetable Mineral demo\r\n- Tightened up closing of BluetoothGatt object on disconnect in the hope that this will improve realisbility on older Android versions\r\n- Guarded execution of some operations in BleAdapterService according to the connection state\r\n- moved wrapping of UI text messages originating from the BleAdapterService to the Activity that receives them\r\n\r\n__1.4.4__\r\n- Added simple interpretation of Magnetometer Bearing data, display the nearest point of the compass\r\n\r\n__1.4.3__\r\n- Included device Bluetooth address in scan results list to make it possible to distinguish between multiple paired micro:bits\r\n\r\n__1.4.2__\r\n\r\n- Removed redundant DualDPadControllerActivity.java\r\n- Fixed game controller event code initialisation bug\r\n- Fixed bug which resulted in attempts to pair with an unpaired micro:bit when the main settings screen had deselected filtering of unpaired devices i.e. we want to be able to work with unpaired micro:bits probably because we're developing and testing.\r\n- Fixed bug in multi-touch handling in the dual d-pad game controller\r\n\r\n__1.4.1__\r\n\r\n- Display current heart rate instead of histogram every 10 seconds\r\n\r\n__1.4.0__\r\n\r\n- Added Heart Rate Histogram demo\r\n\r\n__1.3.1__\r\n\r\n- Improved D-Pad Controller screen with proper graphic\r\n- Introduced vibration for feedback when using the D-Pad Controller\r\n- Introduced multi-touch support for the D-Pad Controller\r\n- Event Code and Event Values used by the D-Pad Controller are now configurable in a Settings screen.\r\n- Tidied up layout of some screens to work better across Android device types\r\n\r\n__1.3.0__\r\n\r\n- Added Dual D-Pad Controller demo\r\n- Added screen orientation properties for all activities\r\n- Changed keep alive function to read the firmware revision string characteristic since the hardware revision string characteristic is being removed from the profile\r\n- Removed hardware revision string from the device information screen\r\n\r\n__1.2.0__\r\n\r\n- Added Bluetooth Services menu item to the menu page. Produces a report showing which Bluetooth services are present on or absent from the connected micro:bit\r\n__1.2.0__\r\n\r\n- Modified to work with the \"out of box\" general release of the micro:bit runtime which uses Bluetooth security including pairing and white listing.\r\n\r\n__1.1.0__\r\n\r\n- Uses Android 5 scanning APIs if on 5.0 or later else uses old scanning APIs\r\n\r\n__1.0.0__ \r\n\r\n- Initial version which used the Android 4.x scanning APIs\r\n\r\n## Contributing\r\n\r\nPull Requests are not being accepted at this time. If you find a bug or have an idea for a new feature, please submit details in the issue tracker. In the case of new features, it may be possible to collaborate on the development. This will be assessed on a case by case basis.\r\n\r\nThank you" }, { "alpha_fraction": 0.6311407089233398, "alphanum_fraction": 0.6594504714012146, "avg_line_length": 26.953489303588867, "blob_id": "56d5de9c7f81983f9bcc0d5928c1cefdadb63bd7", "content_id": "6fb12fed4130f21eb44b85ab8a935d5ff99c032e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 85, "num_lines": 43, "path": "/gesture_server/src/Data Receiver.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport radio\nfrom micropython import const\nfrom microbit import display, Image, sleep, button_a, button_b, running_time, uart\n\n# Global constants\nCHANNEL = const(25)\n\ndef main():\n\tdata_received = 0\n\tdisplay.show(Image.HAPPY)\n\twhile True:\n\t\tresponse = radio.receive()\n\t\tif response is not None:\t\t\t\t\t\t\t# Some data received\n\t\t\tuart.write(response+\"\\n\")\t\t\t\t\t\t# Write received data with line break on serial bus\n\t\t\tif data_received == 0:\n\t\t\t\tdisplay.clear()\t\t\t\t\t\t\t\t# Clear Microbit LEDs\n\t\t\tif \",\" in response:\t\t\t\t\t\t\t\t# \"(x,y,z)\" data received\n\t\t\t\tcy, cx = divmod(data_received, 5)\n\t\t\t\tdisplay.set_pixel(cx, cy, 9)\n\t\t\t\t# Increase and limit data_received value within 0-24 range\n\t\t\t\tdata_received = 0 if data_received >= 24 else data_received+1\n\t\t\telif response == \"done\":\n\t\t\t\tdata_received = 0\n\t\t\t\tdisplay.show(Image.YES)\n\t\t\telif response == \"exit\":\n\t\t\t\tdisplay.show(Image.HAPPY)\n\t\t\t\tsleep(2000)\n\t\t\t\tdisplay.clear()\n\t\t\t\tbreak\n else:\n\t\t\tsleep(100)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tuart.init(baudrate=115200, bits=8, parity=None, stop=1)\n\t\tradio.config(channel=CHANNEL, data_rate=radio.RATE_2MBIT, power=7, queue=150)\n\t\tradio.on()\n\t\tmain()\n\tfinally:\n\t\tuart.close()\n\t\tradio.off()" }, { "alpha_fraction": 0.6735600829124451, "alphanum_fraction": 0.6809045076370239, "avg_line_length": 41.76033020019531, "blob_id": "e46a879e85de10e1058e9d04285fb223c27f849a", "content_id": "deecb6e3e9396c3596d59111462a27f527ad3435", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5174, "license_type": "no_license", "max_line_length": 114, "num_lines": 121, "path": "/gesture_server/ml_model.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "import os.path\nimport numpy as np\nfrom datetime import datetime\nfrom collections import Counter\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.utils import shuffle\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom tensorflow.keras import Input\nfrom sklearn.metrics import confusion_matrix\nfrom tensorflow.keras.models import load_model\n\n\ndef shuffle_train_data(x_train, y_train, num_features):\n shuffled_x = shuffle(*x_train, y_train)\n x_train = shuffled_x[:num_features]\n y_train = shuffled_x[num_features]\n return x_train, y_train\n\ndef get_train_data(events, feature_names, max_review_length):\n features = []\n y_train = []\n num_features = len(feature_names)\n for i in range(num_features):\n features.append([])\n for event in events.keys():\n for fname in range(events[event]):\n df = pd.read_csv(f'./RAW_Data/{event}_{fname}.csv')\n print(f'{event}_{fname}.csv')\n for idx, name in enumerate(feature_names):\n features[idx].append(np.array(df[name][:max_review_length]))\n y_train.append(event)\n return features, y_train, feature_names\n\ndef parse_train_data(features, num_samples, max_review_length):\n feature_list = [np.array(feature).reshape(num_samples, 1, max_review_length) for feature in features]\n data_train = np.array(feature_list)\n return data_train\n\ndef parse_test_data(fname, feature_names, max_review_length):\n df = pd.read_csv(fname)\n features = []\n num_features = len(feature_names)\n for i in range(num_features):\n features.append([])\n for idx, name in enumerate(feature_names):\n features[idx].append(np.array(df[name][:max_review_length]))\n data = [np.array(np.array(feature).reshape(1, 1, max_review_length)) for feature in features]\n return data\n\ndef encode_labels(y_train):\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(y_train)\n # binary encode\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n return onehot_encoded, label_encoder\n\ndef invert_onehot(label, le):\n inverted = le.inverse_transform([np.argmax(label)])\n return inverted\n\ndef execute_layers(inputs, layers):\n \"\"\"\n Computes each input to all given layers and returns their outputs in a list\n @param inputs : List of inputs to be passed through the given layer(s)\n @param layers : List of layers though which each input will be passed\n @return : List of output generated from each input\n \"\"\"\n outputs = []\n for _input in inputs:\n _output = _input\n for layer in layers:\n _output = layer(_output)\n outputs.append(_output)\n return outputs\n\ndef init_model(max_review_length, NUM_LABELS):\n input_x = Input(shape=(1, max_review_length), name=\"Acceleration_x\")\n input_y = Input(shape=(1, max_review_length), name=\"Acceleration_y\")\n input_z = Input(shape=(1, max_review_length), name=\"Acceleration_z\")\n input_p = Input(shape=(1, max_review_length), name=\"pitch\")\n input_r = Input(shape=(1, max_review_length), name=\"roll\")\n input_frc = Input(shape=(1, max_review_length), name=\"force\")\n input_flx = Input(shape=(1, max_review_length), name=\"flex\")\n shared_layers = (LSTM(max_review_length, activation=\"tanh\", name=\"Shared_LSTM\", dropout=0.25),\n Dense(NUM_LABELS*3*64, activation=\"relu\", name=\"Shared_Dense_1\"),\n Dense(NUM_LABELS*3*64, activation=\"relu\", name=\"Shared_Dense_2\"),\n Dense(NUM_LABELS*1*64, activation=\"relu\", name=\"Shared_Dense_3\"))\n shared_output = execute_layers(\n inputs=(input_x, input_y, input_z, input_p, input_r, input_frc, input_flx), layers=shared_layers)\n concat = keras.layers.concatenate(shared_output,name=\"Concatenate\")\n dense_1 = Dense(39, activation=\"relu\", name=\"Dense_1\")(concat)\n main_output = Dense(NUM_LABELS, activation=\"softmax\", name=\"Classification_Layer\")(dense_1)\n model = Model(inputs=(input_x, input_y, input_z, input_p, input_r, input_frc, input_flx), outputs=main_output)\n model.compile(loss='categorical_crossentropy', optimizer=\"adam\", metrics=['accuracy'])\n print(\"Model Summary\", model.summary(), sep=\"\\n\")\n return model\n\n\ndef train_model(data_train, labels_train, max_review_length, num_labels):\n model = init_model(max_review_length, num_labels)\n history = model.fit(x=[*data_train], y=labels_train, epochs=10, batch_size=10)\n if not os.path.exists(\"Models\"):\n os.makedirs(\"Models\")\n file_path = os.path.join(\"Models\", \"latest.HDF5\")\n model.save(file_path)\n return model, file_path\n\ndef load_trained_model(file_path):\n return load_model(file_path)\n\ndef predict_from_file(fname, model, feature_names, max_review_length, le):\n data = parse_test_data(fname, feature_names, max_review_length)\n return invert_onehot(model.predict(data)[0], le)[0]\n" }, { "alpha_fraction": 0.5787992477416992, "alphanum_fraction": 0.5844277739524841, "avg_line_length": 58.24074172973633, "blob_id": "af1385e8d017760b5ad8539d316a3c2efb2061a9", "content_id": "d4c80ed87fed7b9196366498973a344ef3a6f9c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3202, "license_type": "no_license", "max_line_length": 139, "num_lines": 54, "path": "/gesture_server/src/Process Data.py", "repo_name": "arjun-rao/Hand_Motion_Recognizer", "src_encoding": "UTF-8", "text": "import os.path\nimport argparse\nimport pandas as pd\nfrom time import time\nfrom random import shuffle\nfrom datetime import datetime\nfrom Utils import getLabel, normalize, movingAvg, getFileNames, load_raw_data\n\nTITLE = (\"\\n\\t\\t\\t\\t\\t################\" \n \"\\n\\t\\t\\t\\t\\t# Process Data #\" \n \"\\n\\t\\t\\t\\t\\t################\\n\")\n\ndef main(dataDir, movingAvgWindow, normalizeData):\n print(TITLE)\n rootDir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\t\t# Main directory of the project\n print(\" Moving Average Window: {}, Normalize Data: {}\".format(movingAvgWindow, normalizeData))\n dataDir = os.path.join(rootDir, dataDir) \t\t\t\t\t\t# Go one directory back from rootDir and go to /Data/%data_type%\n if not os.path.exists(dataDir):\n raise OSError(\"Invalid directory: {}\".format(dataDir))\n print(\" Data directory: {}\".format(dataDir))\n files = getFileNames(dataDir) # Get file names (with full address) from dataDir directory\n shuffle(files)\n out_f_name = \"Processed Data {}.csv\".format(datetime.now().strftime(\"%d.%m.%Y %H.%M\"))\n fileFullPath = os.path.join(rootDir, out_f_name) \t\t\t\t\t# Create full path of the output csv file\n with open(fileFullPath, \"w\") as csv_file:\n print(\" Saving data at: {}\".format(fileFullPath))\n total_files = len(files)\n for i, f_name in enumerate(files):\n label = getLabel(f_name) # Get label of the data from its file name\n df = pd.read_csv(f_name) # Load given csv file as pandas.DataFrame\n df.columns = pd.Index(i.strip() for i in df.columns) # Strip redundant spaces from column names\n data = load_raw_data(df, cols=(\"x\", \"y\", \"z\"), movingAvgWindow=movingAvgWindow, normalizeData=normalizeData)\n str_data = \",\".join(map(str, data)) # Convert values in data into string and join with ','\n csv_file.write(\"{},{}\\n\".format(str_data, label)) # Write data and label to the csv file \n progress_ratio = (i+1)/total_files # Calculate current progress\n print(\" Progress : {:░<30} {:>6.2f}% ({:>3}/{:<3})\".format(\n \t\"█\"*int(30*progress_ratio), 100*progress_ratio, i+1, total_files), end=\"\\r\")\n print(\"\\n\")\n\nif __name__ == \"__main__\":\n # Getting arguments from the command prompt\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--data\", help=\"Directory with data\", type=str, default=\"RAW_Data\")\n parser.add_argument(\"-w\", \"--width\", help=\"Moving average window width\", type=int, default=13)\n parser.add_argument(\"-n\", \"--normalize\", help=\"True to normalize data\", type=bool, default=True)\n args = parser.parse_args()\n t0 = time() \n try:\n main(dataDir=args.data, movingAvgWindow=args.width, normalizeData=args.normalize)\n except Exception as ex:\n print(\"\\n ERROR: {}\".format(ex.args))\n finally:\n print(\" Time taken: {:.2f} s\".format(time()-t0))\n input(\" Press Enter to exit \")" } ]
12
munchmonk/explorer
https://github.com/munchmonk/explorer
17c3f2f429e04228f5e54334927f81c4f74d8ec4
2e38f95025fe57a2540ce0e553bcf798d0e78fe2
adfd8411638483351cce5b107cdfefb309da0450
refs/heads/master
2020-06-18T17:17:37.033083
2020-04-05T17:30:03
2020-04-05T17:30:03
196,378,940
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6608046293258667, "alphanum_fraction": 0.6786316633224487, "avg_line_length": 28.492891311645508, "blob_id": "95c61bd9b34be9fa7703aa1adae4e3d5de7aeb64", "content_id": "a5d27b4ca4b92f18973cf112dfd31d4d0dad6491", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12458, "license_type": "no_license", "max_line_length": 151, "num_lines": 422, "path": "/bbb.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\n\n\"\"\"\n\tnext:\n\t\tvary water tile\n\t\tdo 2 missing water tiles\n\t\tenlarge water/grass tiles to make it clear whether player can or can't walk there\n\n\ttodo:\n\t\tlinks (e.g. enter a house, new path, etc.)\n\t\t(tile animations?)\n\t\tos agnostic image load path (hardcoded at the moment)\n\t\tsanitize input like for real\n\n\n\"\"\"\n\n# bbb -> Game class, main\n# ccc -> lots of smaller classes\n# ddd -> Player class\n\n\n\nimport pygame\nimport sys\nimport os\nimport time\nimport pickle\nimport copy\nimport random\n\nimport ccc\nimport const\nimport ddd\n\n\nclass Game:\n\tdef __init__(self):\n\t\tpygame.init()\n\n\t\t# Playing on Mac - fullscreen\n\t\tif (1280, 800) in pygame.display.list_modes():\n\t\t\t# self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\t\t\tself.screen = pygame.display.set_mode((const.SCREENWIDTH, const.SCREENHEIGHT))\n\n\t\t# Playing on TV - 1024 x 768\n\t\tif (1280, 960) in pygame.display.list_modes():\n\t\t\tself.screen = pygame.display.set_mode((const.SCREENWIDTH, const.SCREENHEIGHT), pygame.FULLSCREEN)\n\n\t\t# self.screen = pygame.display.set_mode((const.SCREENWIDTH, const.SCREENHEIGHT), pygame.FULLSCREEN)\n\t\t# self.screen = pygame.display.set_mode((const.SCREENWIDTH, const.SCREENHEIGHT))\n\t\t# display_info = pygame.display.Info()\n\t\t# self.screen = pygame.display.set_mode((display_info.current_w, display_info.current_h))\n\t\t# self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n\n\t\tself.allplayers = pygame.sprite.Group()\n\t\tself.allpokemon = pygame.sprite.Group()\n\t\tself.allneedles = pygame.sprite.Group()\n\t\tself.allpokeballs = pygame.sprite.Group()\n\t\tself.allsprites = pygame.sprite.Group()\n\n\t\tself.player = ddd.Player(self, 0, 0)\n\n\t\tself.clock = pygame.time.Clock()\n\t\tself.dt = 0\n\n\t\tself.joysticks = []\n\n\t\tself.base_tiles = dict()\n\t\tself.map_width = 0\n\t\tself.map_height = 0\n\t\tself.horiz_tiles = 0\n\t\tself.vert_tiles = 0\n\t\tself.alltiles = pygame.sprite.Group()\n\n\t\tself.fight_mode = False\n\t\tself.fight_background = pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokemon/fight_background.png'))\n\n\t\tself.pokedex = []\n\t\tself.pokedex_mode = False\n\t\tself.curr_pokedex_selection = None\n\t\tself.pokedex_background = pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokemon/pokedex_background.png')) \n\t\tself.unknown_pokemon = pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokemon/unknown.png'))\n\n\t\tself.font_size = 16\n\t\tself.myfont = pygame.font.Font(os.path.join(os.path.dirname(__file__), 'assets/misc/manti_sans_fixed.otf'), self.font_size)\n\n\t\tself.tile_layers = []\n\n\t\tself.curr_level = 'level_1'\n\t\t\n\t\tself.setup_joysticks()\n\t\tself.setup_level(1, 1, ddd.Player.DOWN)\n\t\t# self.setup_level(4, 11, ddd.Player.DOWN)\n\n\tdef spawn_pokemon(self, pokemon_tag):\n\t\tcommon_odds = 0\n\t\tuncommon_odds = 0\n\t\trare_odds = 0\n\n\t\tif 'COMMON' in pokemon_tag:\n\t\t\tcommon_odds += 0.2\n\t\t\tuncommon_odds += 0.01\n\t\telif 'UNCOMMON' in pokemon_tag:\n\t\t\tcommon_odds += 0.1\n\t\t\tuncommon_odds += 0.1\n\t\telif 'RARE' in pokemon_tag:\n\t\t\tuncommon_odds += 0.2\n\t\t\trare_odds += 0.05\n\n\t\tcommon_pool, uncommon_pool, rare_pool = self.populate_spawn_pools()\n\n\t\tif random.random() < rare_odds and rare_pool:\n\t\t\tself.start_fight(random.choice(rare_pool))\n\t\telif random.random() < uncommon_odds and uncommon_pool:\n\t\t\tself.start_fight(random.choice(uncommon_pool))\n\t\telif random.random() < common_odds and common_pool:\n\t\t\tself.start_fight(random.choice(common_pool))\n\n\n\tdef populate_spawn_pools(self):\n\t\tcommon_pool = list()\n\t\tuncommon_pool = list()\n\t\trare_pool = list()\n\n\t\tif self.curr_level == 'forest':\n\t\t\tcommon_pool += [1, 4, 7]\n\t\t\tuncommon_pool += [25]\n\n\t\treturn common_pool, uncommon_pool, rare_pool\n\t\n\n\tdef start_fight(self, dex_id):\n\t\tself.fight_mode = True\n\t\tself.play_fight_transition()\n\t\tnew_mon = ccc.Pokemon(self, dex_id)\n\t\tself.player.curr_pokemon = new_mon\n\t\tself.player.curr_needle = new_mon.catch_needle\n\n\t\tif new_mon not in self.player.pokemon_seen:\n\t\t\tself.player.pokemon_seen.append(dex_id)\n\t\t\tself.player.pokemon_seen.sort()\n\n\n\tdef play_fight_transition(self):\n\t\tthickness = 1\n\n\t\twhile thickness < min(const.SCREENWIDTH, const.SCREENHEIGHT):\n\t\t\tpygame.draw.rect(self.screen, (0, 0, 0), pygame.Rect(0, 0, const.SCREENWIDTH, const.SCREENHEIGHT), thickness)\n\t\t\tpygame.display.flip()\n\t\t\tthickness += 10\n\t\t\tpygame.time.wait(35)\n\n\n\n\n\tdef find_tile_by_coord(self, coord):\n\t\t# It has to return a list because there might be more than one tile in the same place due to different layers\n\t\ttiles = []\n\t\tfor tile in self.alltiles:\n\t\t\tif tile.x == coord[0] and tile.y == coord[1]:\n\t\t\t\ttiles.append(tile)\n\t\treturn tiles\n\n\tdef load_tiles(self):\n\t\tself.base_tiles = dict()\n\t\tself.alltiles = pygame.sprite.Group()\n\n\t\tmetadata_path = os.path.join('assets', self.curr_level)\n\t\tmetadata_path = os.path.join(metadata_path, 'metadata.txt')\n\t\tmetadata_path = os.path.join(os.path.dirname(__file__), metadata_path)\n\n\t\twith open(metadata_path, 'r') as in_file:\n\t\t\tfor line in in_file:\n\t\t\t\ttext = line.split()\n\n\t\t\t\tif text:\n\t\t\t\t\tself.base_tiles[text[0]] = ccc.BaseTile(text, self.curr_level)\n\n\tdef build_portals(self):\n\t\t# To be fed via file in the future\n\n\t\tif self.curr_level == 'level_1':\n\t\t\tself.add_portal(4, 10, 'level_2_PORTAL_3_4', ddd.Player.DOWN)\n\t\t\tself.add_portal(8, 12, 'level_2_PORTAL_9_8', ddd.Player.DOWN)\n\t\t\tself.add_portal(1, 0, 'forest_PORTAL_1_18', ddd.Player.UP)\n\n\t\tif self.curr_level == 'level_2':\n\t\t\tself.add_portal(3, 3, 'level_1_PORTAL_4_11', ddd.Player.DOWN)\n\t\t\tself.add_portal(9, 7, 'level_1_PORTAL_8_13', ddd.Player.DOWN)\n\n\t\tif self.curr_level == 'forest':\n\t\t\tself.add_portal(1, 19, 'level_1_PORTAL_1_1', ddd.Player.DOWN)\n\t\t\tself.add_portal(18, 0, 'cave_PORTAL_3_14', ddd.Player.UP)\n\n\t\tif self.curr_level == 'cave':\n\t\t\tself.add_portal(3, 15, 'forest_PORTAL_18_1', ddd.Player.DOWN)\n\n\tdef add_portal(self, x, y, portal_tag, facing):\n\t\t# N.B. use deep copies for tile tags\n\t\tportal_tag = portal_tag + '_' + str(facing)\n\n\t\ttile = [t for t in self.find_tile_by_coord((x, y)) if t.tile_data][0]\n\t\tnew_data = copy.deepcopy(tile.tile_data)\n\t\tnew_data.append(portal_tag)\n\t\ttile.tile_data = new_data\n\n\tdef build_map(self):\n\t\tself.tile_layers = []\n\n\t\ti = 0\n\t\twhile True:\n\t\t\tcurr_layer = []\n\t\t\tmap_file = 'metadata_' + str(i) + '.p'\n\t\t\tmap_path = os.path.join('assets', self.curr_level)\n\t\t\tmap_path = os.path.join(map_path, map_file)\n\t\t\t\n\t\t\ttry:\n\t\t\t\twith open(os.path.join(os.path.dirname(__file__), map_path), 'rb') as in_file:\n\t\t\t\t\tmetadata = pickle.load(in_file)\n\n\t\t\t\t\tself.horiz_tiles = len(metadata[0])\n\t\t\t\t\tself.vert_tiles = len(metadata)\n\t\t\t\t\tself.map_width = self.horiz_tiles * const.TILESIZE\n\t\t\t\t\tself.map_height = self.vert_tiles * const.TILESIZE\n\n\t\t\t\t\tfor y in range(len(metadata)):\n\t\t\t\t\t\tfor x in range(len(metadata[y])):\n\t\t\t\t\t\t\t# Below code was creating a bug where two different tiles overlap; I'll leave it commented for now in case there is a valid reason\n\t\t\t\t\t\t\t# I wanted to overwrite the tile. I can't think of any now\n\n\t\t\t\t\t\t\t# # Overwrites existing tiles\n\t\t\t\t\t\t\t# existing_tiles = self.find_tile_by_coord((x, y))\n\t\t\t\t\t\t\t# if existing_tiles:\n\t\t\t\t\t\t\t# \tfor existing_tile in existing_tiles:\n\t\t\t\t\t\t\t# \t\t# existing_tile.tile_data = None\n\n\t\t\t\t\t\t\tcurr_layer.append(ccc.Tile(self, x, y, metadata[y][x]))\n\n\t\t\t\tself.tile_layers.append(curr_layer)\n\t\t\t\ti += 1\n\n\t\t\texcept:\n\t\t\t\tbreak\n\n\tdef quit(self):\n\t\tpygame.quit()\n\t\tsys.exit()\n\n\tdef setup_joysticks(self):\n\t\tpygame.joystick.init()\n\n\t\tfor i in range(0, pygame.joystick.get_count()):\n\t\t\tself.joysticks.append(pygame.joystick.Joystick(i))\n\t\t\tself.joysticks[i].init()\n\n\tdef setup_level(self, spawn_x, spawn_y, facing):\n\t\tself.load_tiles()\n\t\tself.build_map()\n\t\tself.build_portals()\n\t\tself.player.reset(spawn_x, spawn_y, facing)\n\t\tself.camera = ccc.Camera(self)\n\t\tself.fight_mode = False\n\t\tself.pokedex_mode = False\n\t\tself.pokedex = []\n\t\tself.curr_pokedex_selection = None\n\n\tdef show_curr_tile(self):\n\t\t# For debugging/mapbuilding only\n\t\tfor player in self.allplayers:\n\t\t\tprint(player.curr_tile)\n\n\n\tdef index_to_coord(self, indexes):\n\t\tx = indexes[0] * const.TILESIZE\n\t\ty = indexes[1] * const.TILESIZE\n\t\treturn x, y\n\n\n\tdef update(self):\n\t\tself.allsprites.update()\n\t\tself.camera.update(self.player)\n\n\t\t# Comment out if not debugging/mapbuilding\n\t\t# self.show_curr_tile()\n\n\tdef draw_fight_mode(self):\n\t\tself.screen.blit(self.fight_background, (0, 0))\n\n\t\tfor sprite in self.allpokemon:\n\t\t\tself.screen.blit(sprite.image, sprite.rect)\n\n\t\t\tbar_surf, bar_rect = sprite.get_catch_bar()\n\t\t\tself.screen.blit(bar_surf, bar_rect)\n\n\t\tfor sprite in self.allneedles:\n\t\t\tself.screen.blit(sprite.image, sprite.rect)\n\n\t\tfor sprite in self.allpokeballs:\n\t\t\tself.screen.blit(sprite.image, sprite.rect)\n\n\tdef get_pokedex(self):\n\t\tempty_string = '----------'\n\t\tpokedex = []\n\n\t\tif not self.player.pokemon_seen:\n\t\t\tpokedex.append((empty_string, False))\n\n\t\telse:\n\t\t\tfor i in range(1, self.player.pokemon_seen[-1] + 1):\n\t\t\t\tif i in self.player.pokemon_seen:\n\t\t\t\t\tif i in self.player.pokemon_caught:\n\t\t\t\t\t\tpokedex.append((self.get_pokemon_name(i), True))\n\t\t\t\t\telse:\n\t\t\t\t\t\tpokedex.append((self.get_pokemon_name(i), False))\n\t\t\t\telse:\n\t\t\t\t\tpokedex.append((empty_string, False))\n\n\t\tself.curr_pokedex_selection = 0\n\t\treturn pokedex\n\n\n\n\n\tdef draw_pokedex_mode(self):\n\t\tself.screen.blit(self.pokedex_background, (0, 0))\n\n\t\t# # 10 = longest pokemon name (kangaskhan); a manti_sans_fixed with font_size of 16 makes each character 11px wide\n\t\t# # this aligns to the left and rescales if the font has been enlarged\n\t\tleft = const.SCREENWIDTH - 10 * 11 * (self.font_size / 16)\n\t\tcentery = const.SCREENHEIGHT / 2\n\t\tpokeball_img = ccc.Pokeball.IMG[ccc.Pokeball.SMALL]\n\n\t\t# Print currently selected pokemon name\n\t\tsurf = self.myfont.render(self.pokedex[self.curr_pokedex_selection][0], False, (0, 0, 255), (0, 0, 0))\n\t\tself.screen.blit(surf, surf.get_rect(left=left, centery=centery))\n\t\tif self.pokedex[self.curr_pokedex_selection][1]:\n\t\t\tself.screen.blit(pokeball_img, pokeball_img.get_rect(left=left-16, centery=centery))\n\n\t\t# Print names above\n\t\tfor i in range(self.curr_pokedex_selection - 1, -1, -1):\n\t\t\tsurf = self.myfont.render(self.pokedex[i][0], False, (0, 0, 255))\n\t\t\tself.screen.blit(surf, surf.get_rect(left=left, centery=centery - (self.curr_pokedex_selection - i) * self.font_size / 16 * 20))\n\t\t\tif self.pokedex[i][1]:\n\t\t\t\tself.screen.blit(pokeball_img, pokeball_img.get_rect(left=left-16, centery=centery - (self.curr_pokedex_selection - i) * self.font_size / 16 * 20))\n\n\t\t# Print names below\n\t\tfor i in range(self.curr_pokedex_selection + 1, len(self.pokedex)):\n\t\t\tsurf = self.myfont.render(self.pokedex[i][0], False, (0, 0, 255))\n\t\t\tself.screen.blit(surf, surf.get_rect(left=left, centery=centery + (i - self.curr_pokedex_selection) * self.font_size / 16 * 20))\n\t\t\tif self.pokedex[i][1]:\n\t\t\t\tself.screen.blit(pokeball_img, pokeball_img.get_rect(left=left-16, centery=centery + (i - self.curr_pokedex_selection) * self.font_size / 16 * 20))\n\t\t\n\t\t# Print pokemon image\n\t\timg = self.unknown_pokemon\n\n\t\tif self.pokedex[self.curr_pokedex_selection][0][0] != '-':\n\t\t\timg = pygame.image.load(ccc.Pokemon.MAIN_PATH + ccc.Pokemon.INDIVIDUAL_PATH[self.curr_pokedex_selection + 1])\n\n\t\tself.screen.blit(img, img.get_rect(left=const.SCREENWIDTH / 6, centery=centery))\n\n\n\tdef get_pokemon_name(self, dex_id):\n\t\tall_pokemon = {1: 'Bulbasaur',\n\t\t\t\t\t\t4: 'Charmander',\n\t\t\t\t\t\t7: 'Squirtle',\n\t\t\t\t\t\t25: 'Pikachu'}\n\n\t\treturn all_pokemon[dex_id]\n\n\n\n\tdef draw_walking_mode(self):\n\t\t# Draw everything 'below' the player\n\t\tfor layer in self.tile_layers:\n\t\t\tfor tile in layer:\n\t\t\t\tif not tile.print_above_player and not tile.invisible:\n\t\t\t\t\tself.screen.blit(tile.image, self.camera.apply(tile))\t\n\n\t\t# Draw the player\n\t\tfor sprite in self.allplayers:\n\t\t\tself.screen.blit(sprite.image, self.camera.apply(sprite))\n\n\t\t# Draw everything 'above' the player\n\t\tfor layer in self.tile_layers:\n\t\t\tfor tile in layer:\n\t\t\t\tif tile.print_above_player and not tile.invisible:\n\t\t\t\t\tself.screen.blit(tile.image, self.camera.apply(tile))\n\n\n\tdef draw(self):\n\t\tself.screen.fill((0, 0, 0))\n\n\t\tif self.fight_mode:\n\t\t\tself.draw_fight_mode()\n\n\t\telif self.pokedex_mode:\n\t\t\tself.draw_pokedex_mode()\n\n\t\telse:\n\t\t\tself.draw_walking_mode()\n\n\t\tpygame.display.flip()\n\n\n\tdef play(self):\n\t\twhile True:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tself.quit()\n\t\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\t\tself.quit()\n\n\t\t\tself.update()\n\t\t\tself.draw()\n\t\t\tself.dt = self.clock.tick(45)\n\nif __name__ == '__main__':\n\tGame().play()\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6378600597381592, "alphanum_fraction": 0.6707819104194641, "avg_line_length": 17.769229888916016, "blob_id": "480d709c4fdaa5ecfcb747a27e138bfe3b81d0e6", "content_id": "4c1ca189b59b32cd71849caba10aa67adbe6acfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/sest.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\nf = open('allpok.txt', 'r')\n\ntop = 0\nfor line in f.readlines():\n\tif len(line) > top:\n\t\tprint(line, top)\n\t\ttop = len(line)\n\n\n# answer = Kangaskhan, 10 characters" }, { "alpha_fraction": 0.6441717743873596, "alphanum_fraction": 0.7361963391304016, "avg_line_length": 26.33333396911621, "blob_id": "c6f4efd40c37a5b109eaef2f138cfdea95975a31", "content_id": "cd8b1f24653717fece05866e683d1a8769d93af5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 65, "num_lines": 6, "path": "/const.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\nTILESIZE = 32\nSCREENWIDTH = TILESIZE * 11 # 11\nSCREENHEIGHT = TILESIZE * 10 # 10" }, { "alpha_fraction": 0.6637045741081238, "alphanum_fraction": 0.6790536642074585, "avg_line_length": 31.63018798828125, "blob_id": "f1f6344c9e678be49d3995b4c3dcdf4c2e7cb777", "content_id": "8f4e8a8c22c5ded3e223306f536c81504bae1a6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8670, "license_type": "no_license", "max_line_length": 161, "num_lines": 265, "path": "/ccc.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\nimport pygame\nimport os\nimport time\n\nimport const\n\n\nclass Camera:\n\tdef __init__(self, game):\n\t\t# Hint: think of the camera as a moving rectangle, draw it on paper, and the lower/upper bounds will make sense - you have to keep the rectangle inside the map\n\t\tself.game = game\n\t\tself.screen_width = game.screen.get_width()\n\t\tself.screen_height = game.screen.get_height()\n\t\tself.map_width = self.game.map_width\n\t\tself.map_height = self.game.map_height\n\t\tself.x = 0\n\t\tself.y = 0\n\n\tdef apply(self, target):\n\t\treturn pygame.Rect(target.rect.x - self.x, target.rect.y - self.y, target.rect.width, target.rect.height)\n\n\tdef update(self, target):\n\t\t# Center camera on target\n\t\tself.x = target.rect.centerx - self.screen_width / 2\n\t\tself.y = target.rect.centery - self.screen_height / 2\n\n\t\t# Move camera left/top half a tile if the map width/height has an even number of tiles - avoid cutting tiles in half\n\t\tif not(self.screen_width / const.TILESIZE % 2):\n\t\t\tself.x += const.TILESIZE / 2\n\t\tif not(self.screen_height / const.TILESIZE % 2):\n\t\t\tself.y += const.TILESIZE / 2\n\n\t\t# Stop updating if target is too close to left/top edges - don't see black bits outside of map\n\t\tself.x = max(self.x, 0)\n\t\tself.y = max(self.y, 0)\n\n\t\t# Stop updating if target is too close to right/bottom edges - don't see black bits outside of map\n\t\tself.x = min(self.x, self.map_width - self.screen_width)\n\t\tself.y = min(self.y, self.map_height - self.screen_height)\n\n\t\t# Center the camera if the map is smaller than the screen (note: camera values will be negative)\n\t\tif self.map_width <= self.screen_width:\n\t\t\tself.x = -(self.screen_width - self.map_width) / 2\n\t\tif self.map_height <= self.screen_height:\n\t\t\tself.y = -(self.screen_height - self.map_height) / 2\n\t\t\t\n\nclass BaseTile:\n\tdef __init__(self, tile_data, level):\n\t\tself.tile_data = tile_data\n\t\tlevel_path = os.path.join('assets', level)\n\t\ttile_path = os.path.join(level_path, self.tile_data[0])\n\t\tself.image = pygame.image.load(os.path.join(os.path.dirname(__file__), tile_path)).convert_alpha()\n\n\nclass Tile(pygame.sprite.Sprite):\n\tANIMFRAME_COOLDOWN = 1\n\n\tdef __init__(self, game, x, y, tile_name):\n\t\tself.game = game\n\t\tself.groups = game.allsprites, game.alltiles\n\t\tpygame.sprite.Sprite.__init__(self, self.groups)\n\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.coord = self.game.index_to_coord((self.x, self.y))\n\t\tself.tile_data = self.game.base_tiles[tile_name].tile_data\n\t\tself.print_above_player = True if 'TOP_LAYER' in self.tile_data else False\n\t\tself.invisible = True if 'INVISIBLE' in self.tile_data else False\n\n\t\tself.images = list()\n\t\tself.find_images()\n\t\tself.img_index = 0\n\t\tself.last_anim = 0\n\t\tself.image = self.images[self.img_index]\n\t\tself.rect = self.image.get_rect(topleft=(self.coord))\n\n\n\n\tdef find_images(self):\n\t\t# Add the default one\n\t\tself.images.append(self.game.base_tiles[self.tile_data[0]].image)\n\n\t\t# Add any more tiles with the format TILE_NAME_ID ANIM_TILE_NAME ID\n\t\tif self.tile_data[1][:4] == 'ANIM':\n\t\t\tfor base_tile in self.game.base_tiles.values():\n\t\t\t\t# Don't add itself and don't add a tile more than once\n\t\t\t\tif base_tile.tile_data[0] != self.tile_data[0] and base_tile.tile_data[1] == self.tile_data[1] and base_tile.image not in self.images:\n\t\t\t\t\tself.images.insert(int(base_tile.tile_data[2]), base_tile.image)\n\n\tdef update(self):\n\t\tif len(self.images) > 1:\n\t\t\tif time.time() - self.last_anim > Tile.ANIMFRAME_COOLDOWN:\n\t\t\t\tself.img_index = (self.img_index + 1) % len(self.images)\n\t\t\t\tself.image = self.images[self.img_index]\n\t\t\t\tself.last_anim = time.time()\n\n\nclass CatchNeedle(pygame.sprite.Sprite):\n\tdef __init__(self, game, difficulty, succes_edges):\n\t\tself.game = game\n\t\tself.groups = game.allsprites, game.allneedles\n\t\tpygame.sprite.Sprite.__init__(self, self.groups)\n\n\t\tself.image = pygame.Surface((10, 45))\n\t\tself.image.fill((255, 0, 128))\n\t\tself.rect = self.image.get_rect(topleft=(0, const.SCREENHEIGHT - 65))\n\t\tself.succes_edges = succes_edges\n\n\t\tself.dir = 1\n\t\t# self.speed = difficulty + 10\n\t\tself.speed = 1\n\t\tself.stopped = False\n\n\tdef update(self):\n\t\tif self.stopped:\n\t\t\treturn\n\n\t\tdx = self.dir * self.speed * self.game.dt\n\t\tif abs(dx) < 1:\n\t\t\tdx = self.dir\n\t\tself.rect.x += dx\n\n\t\tif (self.rect.x <= 0 and self.dir < 0) or (self.rect.x >= const.SCREENWIDTH - self.rect.width and self.dir > 0):\n\t\t\tself.dir *= -1\n\n\tdef stop(self):\n\t\tself.stopped = True\n\n\tdef success(self):\n\t\tif self.rect.right < self.succes_edges[0]:\n\t\t\treturn False\n\t\tif self.rect.left > self.succes_edges[1]:\n\t\t\treturn False\n\t\treturn True\n\n\n\nclass Pokemon(pygame.sprite.Sprite):\n\tMAIN_PATH = os.path.join(os.path.dirname(__file__), 'assets/pokemon/')\n\tINDIVIDUAL_PATH = {0: 'transparent.png',\n\t\t\t\t\t\t1: 'bulbasaur.png',\n\t\t\t\t\t\t4: 'charmander.png',\n\t\t\t\t\t\t7: 'squirtle.png',\n\t\t\t\t\t\t25: 'pikachu.png'\n\t\t\t\t\t\t}\n\n\tdef __init__(self, game, dex_id):\n\t\tself.game = game\n\t\tself.groups = game.allsprites, game.allpokemon\n\t\tpygame.sprite.Sprite.__init__(self, self.groups)\n\n\t\tself.dex_id = dex_id\n\t\tself.image = pygame.image.load(Pokemon.MAIN_PATH + Pokemon.INDIVIDUAL_PATH[self.dex_id])\n\t\tself.rect = self.image.get_rect(centerx=(const.SCREENWIDTH / 2), centery=(const.SCREENHEIGHT / 3))\n\t\tself.transparent = False\n\n\t\tself.difficulty = 0\n\t\tself.green_bar_edges = (0, 0)\n\t\tself.set_difficulty()\n\n\t\tself.catch_needle = CatchNeedle(self.game, self.difficulty, self.green_bar_edges)\n\n\tdef update(self):\n\t\tif self.transparent:\n\t\t\tself.image = pygame.image.load(Pokemon.MAIN_PATH + Pokemon.INDIVIDUAL_PATH[0])\n\n\tdef set_transparent(self):\n\t\tself.transparent = True\n\t\t\n\n\tdef set_difficulty(self):\n\t\tif self.dex_id in (1, 4, 7):\n\t\t\tself.difficulty = 0\n\t\t\tself.green_bar_edges = (const.SCREENWIDTH * 1 / 3, const.SCREENWIDTH * 2 / 3)\n\t\telif self.dex_id in (25, 25):\n\t\t\tself.difficulty = 1\n\t\t\tself.green_bar_edges = (const.SCREENWIDTH * 3 / 7, const.SCREENWIDTH * 4 / 7)\n\n\tdef get_catch_bar(self):\n\t\tgreen_bar_width = self.green_bar_edges[1] - self.green_bar_edges[0]\n\t\tred_bar_width = (const.SCREENWIDTH - green_bar_width) / 2\n\t\tbar_height = 15\n\t\tbar_top = const.SCREENHEIGHT - 50\n\n\t\tgreen_bar = pygame.Surface((green_bar_width, bar_height))\n\t\tgreen_bar.fill((0, 255, 0))\n\t\tleft_red_bar = pygame.Surface((red_bar_width, bar_height))\n\t\tleft_red_bar.fill((255, 0, 0))\n\t\tright_red_bar = pygame.Surface((red_bar_width, bar_height))\n\t\tright_red_bar.fill((255, 0, 0))\n\n\t\tret_surface = pygame.Surface((const.SCREENWIDTH, bar_height))\n\t\tret_surface.blit(left_red_bar, (0, 0))\n\t\tret_surface.blit(green_bar, (red_bar_width, 0))\n\t\tret_surface.blit(right_red_bar, (red_bar_width + green_bar_width, 0))\n\n\t\tret_rect = pygame.Rect(0, bar_top, const.SCREENWIDTH, bar_height)\n\n\t\treturn ret_surface, ret_rect\n\n\nclass Pokeball(pygame.sprite.Sprite):\n\tOPEN, HALF_OPEN, CLOSED, TRANSPARENT, SMALL = 'open', 'half_open', 'closed', 'transparent', 'small'\n\n\tIMG = {OPEN: pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_open.png')),\n\t\t\tHALF_OPEN: pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_half_open.png')),\n\t\t\tCLOSED: pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_closed.png')),\n\t\t\tTRANSPARENT: pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/transparent.png')),\n\t\t\tSMALL: pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_small.png')),}\n\n\tdef __init__(self, game, pokemon, success):\n\t\tself.game = game\n\t\tself.groups = game.allsprites, game.allpokeballs\n\t\tpygame.sprite.Sprite.__init__(self, self.groups)\n\n\t\tself.success = success\n\t\tself.state = Pokeball.OPEN\n\t\tself.image = None\n\t\tself.set_image()\n\t\tself.rect = self.image.get_rect(centerx=const.SCREENWIDTH / 2, top=const.SCREENHEIGHT)\n\t\tself.pokemon = pokemon\n\n\t\tself.speed = -0.6\n\t\tself.dead = False\n\t\tself.kill_timer = 0\n\n\tdef set_image(self):\n\t\tself.image = Pokeball.IMG[self.state]\n\n\tdef update(self):\n\t\t# Movement\n\t\tdy = 0\n\t\tif self.speed > 0:\n\t\t\tdy = max(self.speed * self.game.dt, 1)\n\t\telif self.speed < 0:\n\t\t\tdy = min(self.speed * self.game.dt, -1)\n\n\t\tself.rect.y += dy\n\n\t\t# Going up\n\t\tif self.state == Pokeball.OPEN and self.rect.y <= 15:\n\t\t\tself.state = Pokeball.HALF_OPEN\n\t\t\tself.set_image()\n\t\t\tself.speed = 0.15\n\n\t\t# Going down\n\t\tif self.state == Pokeball.HALF_OPEN and self.rect.y >= 100:\n\t\t\tif self.success:\n\t\t\t\tself.state = Pokeball.CLOSED\n\t\t\t\tself.set_image()\n\t\t\t\tself.speed = 0\n\t\t\t\tself.pokemon.set_transparent()\n\t\t\telse:\n\t\t\t\tself.state = Pokeball.TRANSPARENT\n\t\t\t\tself.set_image()\n\n\t\t\tself.kill_timer = time.time()\n\n\t\tif self.kill_timer > 0 and time.time() - self.kill_timer > 2.5:\n\t\t\tself.dead = True\n\t\t\tself.kill()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.640275239944458, "alphanum_fraction": 0.6547145843505859, "avg_line_length": 30.820987701416016, "blob_id": "190b2d3297bc174a2ee6de2511c017946c1fc172", "content_id": "2b039eb9ab0320a80342d586d3e96ca6e5aa6953", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10322, "license_type": "no_license", "max_line_length": 139, "num_lines": 324, "path": "/ddd.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\nimport pygame\nimport os\nimport time\n\nimport ccc\nimport const\n\n\nclass Player(pygame.sprite.Sprite):\n\tRIGHT, LEFT, UP, DOWN = range(4)\n\n\tIMG = {RIGHT: [pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_right_0.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_right_1.png'))],\n\n\t\t\tLEFT: [pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_left_0.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_left_1.png'))],\n\n\t\t\tUP: [pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_up_0.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_up_1.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_up_2.png'))],\n\n\t\t\tDOWN: [pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_down_0.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_down_1.png')),\n\t\t\t\t\tpygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/player/player_down_2.png'))]}\n\n\tPOKEBALL_IMG = {'open': pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_open.png')),\n\t\t\t\t\t'half_open': pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_half_open.png')),\n\t\t\t\t\t'closed': pygame.image.load(os.path.join(os.path.dirname(__file__), 'assets/pokeballs/pokeball_closed.png'))}\n\n\tMOVEMENT_COOLDOWN = 0.1\n\tSPEED = 0.1\n\tANIMFRAME_COOLDOWN = 0.2\n\t\n\tdef __init__(self, game, spawn_x, spawn_y):\n\t\tself.game = game\n\t\tself.groups = game.allsprites, game.allplayers\n\t\tpygame.sprite.Sprite.__init__(self, self.groups)\n\n\t\t# This below is a copy of reset() but I'd rather it be clear what attributes Player has instead of declaring them outside of init()\n\t\tself.curr_tile = [spawn_x, spawn_y]\n\t\tself.facing = Player.DOWN\n\t\tself.anim_frame = 0\n\t\tself.image = Player.IMG[self.facing][self.anim_frame]\n\t\tself.rect = self.image.get_rect(topleft=(self.game.index_to_coord(self.curr_tile)))\n\n\t\tself.last_anim = 0\n\t\tself.last_movement = 0\n\t\tself.dir = [0, 0]\n\t\tself.target_tile = None\n\t\tself.target_x = None\n\t\tself.target_y = None\n\n\t\tself.curr_pokemon = None\n\t\tself.curr_needle = None\n\t\tself.curr_pokeball = None\n\t\tself.pokemon_caught = list()\n\t\tself.pokemon_seen = list()\n\n\t\tself.last_pokedex_scroll = 0\n\n\tdef get_input(self):\n\t\t# Don't accept input if player is already moving\n\t\tif self.dir != [0, 0]:\n\t\t\treturn\n\n\t\tkeys = pygame.key.get_pressed()\n\n\t\t# Fight mode\n\t\tif self.game.fight_mode:\n\t\t\tif keys[pygame.K_q] and not self.curr_pokeball:\n\t\t\t\tself.exit_fight_mode()\n\t\t\telif keys[pygame.K_SPACE] and not self.curr_pokeball:\n\t\t\t\tself.throw_pokeball()\n\n\t\t# Pokedex mode\n\t\tif self.game.pokedex_mode:\n\t\t\tif keys[pygame.K_q]:\n\t\t\t\tself.exit_pokedex_mode()\n\t\t\telif keys[pygame.K_w] and time.time() - self.last_pokedex_scroll > 0.15:\n\t\t\t\tself.move_up_pokedex()\n\t\t\telif keys[pygame.K_s] and time.time() - self.last_pokedex_scroll > 0.15:\n\t\t\t\tself.move_down_pokedex()\n\n\n\t\t# Walking mode\n\t\telse:\n\t\t\tif keys[pygame.K_w] or keys[pygame.K_UP]:\n\t\t\t\tself.dir = [0, -1]\n\t\t\telif keys[pygame.K_a] or keys[pygame.K_LEFT]:\n\t\t\t\tself.dir = [-1, 0]\n\t\t\telif keys[pygame.K_s] or keys[pygame.K_DOWN]:\n\t\t\t\tself.dir = [0, 1]\n\t\t\telif keys[pygame.K_d] or keys[pygame.K_RIGHT]:\n\t\t\t\tself.dir = [1, 0]\n\t\t\telif keys[pygame.K_p]:\n\t\t\t\tself.game.pokedex_mode = True\n\n\t\t\tif self.game.joysticks:\n\t\t\t\tif self.game.joysticks[0].get_axis(0) > 0.5:\n\t\t\t\t\tself.dir = [1, 0]\n\t\t\t\tif self.game.joysticks[0].get_axis(0) < -0.5:\n\t\t\t\t\tself.dir = [-1, 0]\n\t\t\t\tif self.game.joysticks[0].get_axis(1) > 0.5:\n\t\t\t\t\tself.dir = [0, 1]\n\t\t\t\tif self.game.joysticks[0].get_axis(1) < -0.5:\n\t\t\t\t\tself.dir = [0, -1]\n\n\t\t\tself.set_facing()\n\n\tdef set_facing(self):\n\t\tif self.dir == [1, 0]:\n\t\t\tself.facing = Player.RIGHT\n\t\telif self.dir == [-1, 0]:\n\t\t\tself.facing = Player.LEFT\n\t\telif self.dir == [0, -1]:\n\t\t\tself.facing = Player.UP\n\t\telif self.dir == [0, 1]:\n\t\t\tself.facing = Player.DOWN\n\n\tdef is_tile_legal(self, tile):\n\t\t# Check edges\n\t\tif tile[0] < 0 or tile[1] < 0:\n\t\t\treturn False\n\t\t# if tile[0] > self.game.map.get_horiz_tiles() - 1 or tile[1] > self.game.map.get_vert_tiles() - 1:\n\t\tif tile[0] > self.game.horiz_tiles - 1 or tile[1] > self.game.vert_tiles - 1:\n\t\t\treturn False\n\n\t\t# Custom rules - to be processed by an external function and potentially level by level in the future\n\t\ttags = []\n\n\t\tfor t in self.game.find_tile_by_coord(tile):\n\t\t\tif t.tile_data:\n\t\t\t\tfor tag in t.tile_data:\n\t\t\t\t\ttags.append(tag)\n\n\t\tif any(tag in ('WATER', 'BOULDER', 'ROCK') for tag in tags):\n\t\t\treturn False\n\n\t\tif 'TREE' in tags and 'BOTTOM' in tags:\n\t\t\treturn False\n\n\t\tif 'CASA' in tags:\n\t\t\tif any(tag in ('BOTTOM_LEFT', 'BOTTOM_RIGHT', 'TOP_LEFT', 'MID_TOP', 'TOP_RIGHT') for tag in tags):\n\t\t\t\treturn False\n\n\t\treturn True\n\n\n\tdef throw_pokeball(self):\n\t\tself.curr_needle.stop()\n\t\tself.curr_pokeball = ccc.Pokeball(self.game, self.curr_pokemon, self.curr_needle.success())\n\n\n\tdef update_fight(self):\n\t\tif not self.game.fight_mode:\n\t\t\treturn \n\n\t\tif self.curr_needle.stopped:\n\t\t\tif self.curr_needle.success():\n\t\t\t\tif self.curr_pokemon.dex_id not in self.pokemon_caught:\n\t\t\t\t\tself.pokemon_caught.append(self.curr_pokemon.dex_id)\n\t\t\t\t\tself.pokemon_caught.sort()\n\t\t\tif self.curr_pokeball.dead:\n\t\t\t\tself.curr_pokeball = None\n\t\t\t\tself.exit_fight_mode()\n\n\tdef update_pokedex(self):\n\t\tif not self.game.pokedex_mode:\n\t\t\treturn\n\n\t\tif not self.game.pokedex:\n\t\t\tself.game.pokedex = self.game.get_pokedex()\n\n\n\tdef move_up_pokedex(self):\n\t\tif self.game.curr_pokedex_selection > 0:\n\t\t\tself.game.curr_pokedex_selection -= 1\n\t\t\tself.last_pokedex_scroll = time.time()\n\n\n\n\tdef move_down_pokedex(self):\n\t\tif self.game.curr_pokedex_selection < len(self.game.pokedex) - 1:\n\t\t\tself.game.curr_pokedex_selection += 1\n\t\t\tself.last_pokedex_scroll = time.time()\n\n\n\tdef exit_fight_mode(self):\n\t\tself.game.fight_mode = False\n\t\tself.curr_pokemon.kill()\n\t\tself.curr_pokemon = None\n\t\tself.curr_needle.kill()\n\t\tself.curr_needle = None\n\n\tdef exit_pokedex_mode(self):\n\t\tself.game.pokedex_mode = False\n\t\tself.game.pokedex = []\n\n\n\tdef move(self):\n\t\tif self.game.fight_mode:\n\t\t\treturn\n\n\t\tif self.dir != [0, 0] and time.time() - self.last_movement >= Player.MOVEMENT_COOLDOWN and not self.target_tile:\n\t\t\tself.target_tile = [0, 0]\n\t\t\tself.target_tile[0] = self.curr_tile[0] + self.dir[0]\n\t\t\tself.target_tile[1] = self.curr_tile[1] + self.dir[1]\n\n\t\t\tif not self.is_tile_legal(self.target_tile):\n\t\t\t\tself.target_tile = None\n\t\t\t\tself.dir = [0, 0]\n\t\t\telse:\n\t\t\t\tself.last_movement = time.time()\n\t\t\t\tself.target_x = self.rect.x + self.dir[0] * const.TILESIZE\n\t\t\t\tself.target_y = self.rect.y + self.dir[1] * const.TILESIZE\t\n\n\t\tif self.target_tile:\n\t\t\t# Calculate base dx and dy tied to FPS\n\t\t\tdx = self.dir[0] * Player.SPEED * self.game.dt\n\t\t\tdy = self.dir[1] * Player.SPEED * self.game.dt\n\n\t\t\t# Make sure dx and dy are at least 1 if the player is moving - it can round down to 0 keeping it still if not\n\t\t\tif self.dir[0] and abs(dx) < 1:\n\t\t\t\tdx = self.dir[0]\n\t\t\tif self.dir[1] and abs(dy) < 1:\n\t\t\t\tdy = self.dir[1]\n\n\t\t\t# Move player, making sure not to move them past the target tile\n\t\t\tif dx > 0:\n\t\t\t\tself.rect.x = min(self.rect.x + dx, self.target_x)\n\t\t\tif dy > 0:\n\t\t\t\tself.rect.y = min(self.rect.y + dy, self.target_y)\n\t\t\tif dx < 0:\n\t\t\t\tself.rect.x = max(self.rect.x + dx, self.target_x)\n\t\t\tif dy < 0:\n\t\t\t\tself.rect.y = max(self.rect.y + dy, self.target_y)\n\n\t\t\tif self.rect.x == self.target_x and self.rect.y == self.target_y:\n\t\t\t\tself.curr_tile = self.target_tile\n\t\t\t\tself.target_tile = None\n\t\t\t\tself.target_x = None\n\t\t\t\tself.target_y = None\n\n\t\t\t\t# This helps keeping the walking animation smooth instead of restarting it every tile\n\t\t\t\told_dir = self.dir\n\t\t\t\tself.dir = [0, 0]\n\t\t\t\tself.get_input()\n\t\t\t\tif self.dir != old_dir:\n\t\t\t\t\tself.anim_frame = 0\n\n\t\t\t\t# Special actions\n\t\t\t\tfor tile in self.game.find_tile_by_coord(self.curr_tile):\n\t\t\t\t\tif tile.tile_data:\n\t\t\t\t\t\tfor tag in tile.tile_data:\n\t\t\t\t\t\t\tif 'PORTAL' in tag:\n\t\t\t\t\t\t\t\tself.go_through_portal(tag)\n\n\t\t\t\t\t\t\tif 'POKEMON_SPAWN_POINT' in tag:\n\t\t\t\t\t\t\t\t# This avoids a bug where by having a nonzero dir, it \"moves\" to the current tile that spawned a pokemon thus spawning a second one\n\t\t\t\t\t\t\t\t# it also prevents the player from moving after fighting a pokemon\n\t\t\t\t\t\t\t\tself.dir = [0, 0]\n\t\t\t\t\t\t\t\tself.game.spawn_pokemon(tile.tile_data)\n\n\n\tdef go_through_portal(self, portal_tag):\n\t\t# Portal tag format: NEWLEVELNAME_PORTAL_SPAWNX_SPAWNY_FACING\n\t\ttag_string = portal_tag.split('_')\n\n\t\tspawn_x = int(tag_string[-3])\n\t\tspawn_y = int(tag_string[-2])\n\t\tfacing = int(tag_string[-1])\n\t\tnew_level = '_'.join(tag_string[:-4])\n\n\t\tself.game.curr_level = new_level\n\t\tself.game.setup_level(spawn_x, spawn_y, facing)\n\n\n\n\tdef reset(self, spawn_x, spawn_y, facing):\n\t\tself.curr_tile = [spawn_x, spawn_y]\n\t\tself.facing = facing\n\t\tself.anim_frame = 0\n\t\tself.image = Player.IMG[self.facing][self.anim_frame]\n\t\tself.rect = self.image.get_rect(topleft=(self.game.index_to_coord(self.curr_tile)))\n\n\t\tself.last_anim = 0\n\t\tself.last_movement = 0\n\t\tself.dir = [0, 0]\n\t\tself.target_tile = None\n\t\tself.target_x = None\n\t\tself.target_y = None\n\n\n\tdef update_sprite(self):\n\t\tif self.dir == [0, 0]:\n\t\t\tself.anim_frame = 0\n\t\telif self.dir == [0, 1]:\n\t\t\tif time.time() - self.last_anim > Player.ANIMFRAME_COOLDOWN:\n\t\t\t\tself.anim_frame = (self.anim_frame + 1) % len(Player.IMG[Player.DOWN])\n\t\t\t\tself.last_anim = time.time()\n\t\telif self.dir == [0, -1]:\n\t\t\tif time.time() - self.last_anim > Player.ANIMFRAME_COOLDOWN:\n\t\t\t\tself.anim_frame = (self.anim_frame + 1) % len(Player.IMG[Player.UP])\n\t\t\t\tself.last_anim = time.time()\n\t\telif self.dir == [1, 0]:\n\t\t\tif time.time() - self.last_anim > Player.ANIMFRAME_COOLDOWN:\n\t\t\t\tself.anim_frame = (self.anim_frame + 1) % len(Player.IMG[Player.RIGHT])\n\t\t\t\tself.last_anim = time.time()\n\t\telif self.dir == [-1, 0]:\n\t\t\tif time.time() - self.last_anim > Player.ANIMFRAME_COOLDOWN:\n\t\t\t\tself.anim_frame = (self.anim_frame + 1) % len(Player.IMG[Player.LEFT])\n\t\t\t\tself.last_anim = time.time()\n\n\t\tself.image = Player.IMG[self.facing][self.anim_frame]\n\n\tdef update(self):\n\t\tself.get_input()\n\t\tself.move()\n\t\tself.update_fight()\n\t\tself.update_pokedex()\n\t\tself.update_sprite()\n\n\n\n\n\n\n\n\t\t" }, { "alpha_fraction": 0.6443001627922058, "alphanum_fraction": 0.6875901818275452, "avg_line_length": 29.799999237060547, "blob_id": "329940c9f860dbeb382d81ff19d9ba60e4fb8f4a", "content_id": "12b7f124d3faebd7742fe5c26973634f2de8e864", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 90, "num_lines": 45, "path": "/test.py", "repo_name": "munchmonk/explorer", "src_encoding": "UTF-8", "text": "#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7\n# coding: utf-8\n\nimport pygame\nimport sys\npygame.init()\n\nwidth, height = 800, 600\n\nscreen = pygame.display.set_mode((width, height))\npath = 'assets/pokeballs/pokeball_closed.png'\norig = pygame.image.load(path)\nlarge = pygame.image.load(path)\nlarge = pygame.transform.scale(large, (large.get_width() * 2, large.get_height() * 2))\nlarger = pygame.image.load(path)\nlarger = pygame.transform.scale(larger, (larger.get_width() * 3, larger.get_height() * 3))\n\n# print(orig.get_width(), orig.get_height())\n# print(large.get_width(), large.get_height())\n# print(larger.get_width(), larger.get_height())\n\n# myfont = pygame.font.Font(None, 30)\n# myfont = pygame.font.Font('assets/misc/lemonmilk_font.otf', 30)\ns = '0123456789'\n# s = '0'\nsize = 16 # 32-> 22 per character, 16-> 11 per character\nmyfont = pygame.font.Font('assets/misc/manti_sans_fixed.otf', size)\nmyfontsurf = myfont.render(s, False, (0, 255, 0))\nprint(myfontsurf.get_rect().width)\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\n\t\tscreen.fill((0, 0, 0))\n\t\tcentery = height / 2\n\t\tleft = width - len(s) * 11\n\t\tscreen.blit(myfontsurf, myfontsurf.get_rect(left=left, centery=centery))\n\t\t# screen.blit(orig, (0, 0))\n\t\t# screen.blit(large, (0, 64))\n\t\t# screen.blit(larger, (0, 256))\n\n\tpygame.display.flip()\n" } ]
6
k0kishima/machine_learning_hands_on
https://github.com/k0kishima/machine_learning_hands_on
e2a389e999f571880894f9f9b913f496e7cd93bd
3eaf8443f11b5d2e91622dc16bbf0bd869b85a18
47a1987f9808fc4ca86caf37efd0f75d691e0d38
refs/heads/master
2023-06-16T06:48:37.271226
2021-07-12T10:59:53
2021-07-12T10:59:53
356,713,039
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6035503149032593, "alphanum_fraction": 0.6232741475105286, "avg_line_length": 29.727272033691406, "blob_id": "d24c385a33c008df3c4923062a5e17b4a127ffa7", "content_id": "199af64927d577670cc64b53a82a686fd905be36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 128, "num_lines": 33, "path": "/keiba_machine_learning/netkeiba/models.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "import os\nfrom typing import IO\nfrom keiba_machine_learning.models import Race as Base\nfrom keiba_machine_learning.netkeiba.constants import DATABASE_PAGE_BASE_URL, RACE_DATA_DIR, ENCODING_OF_WEB_PAGE\n\n\nclass Race(Base):\n def __hash__(self) -> int:\n return int(f'{self.year}{self.race_track.value:02d}{self.series_number:02d}{self.day_number:02d}{self.race_number:02d}')\n\n @property\n def id(self) -> int:\n return self.__hash__()\n\n @property\n def url(self) -> str:\n \"\"\"\n 以下のようなnetkeibaでのレース結果ページを返す\n https://db.netkeiba.com/race/201901010101\n\n Returns:\n str: netkeibaでのレース結果ページのURL\n \"\"\"\n return '/'.join([str(url_parts)\n for url_parts in [DATABASE_PAGE_BASE_URL, \"race\", self.id]])\n\n @property\n def file_path(self) -> str:\n return os.path.join(RACE_DATA_DIR, f'{self.id}.html')\n\n @property\n def file(self) -> IO:\n return open(self.file_path, mode='r', encoding=ENCODING_OF_WEB_PAGE)\n" }, { "alpha_fraction": 0.68360435962677, "alphanum_fraction": 0.7018970251083374, "avg_line_length": 26.849056243896484, "blob_id": "2e7244b83547c6270558fe3445cbe0a6a45bcde0", "content_id": "d9b14797e930ea121a1b19b8ecb4d90e3bcca33c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1960, "license_type": "no_license", "max_line_length": 111, "num_lines": 53, "path": "/scripts/netkeiba/download_race_pages.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "\"\"\"netkeibaからレースファイルをダウンロードするスクリプト\n\n 例えば以下のURLのようなものがレースファイルである\n https://db.netkeiba.com/race/201901010101\n\n このスクリプトでは指定された年のレースのファイルを全てダウンロードする\n\n netkeiba側の仕様でレースデータが存在しないページにアクセスしても404をHTTPステータスコードとしてレスポンスしないので、\n ここでは内容を気にせず保存を行う\n (データが存在しないことによる異常の処理はパーサーの責務とする)\n\n\n Args:\n year (int): コマンドライン引数としてダウンロード対象とするレースが開催された年を指定する\n\n Examples:\n ※ 実行時はパスを通すこと\n ※ 以下はコマンドライン上にて\n\n source venv/bin/activate\n export PYTHONPATH=\".:$PYTHONPATH\"\n python scripts/netkeiba/download_race_pages.py 2019\n\"\"\"\nimport os\nimport time\nimport sys\nimport urllib.request\nfrom tqdm import tqdm\n\nfrom keiba_machine_learning.netkeiba.constants import RACE_DATA_DIR\nfrom keiba_machine_learning.models import RaceTrac\nfrom keiba_machine_learning.netkeiba.models import Race\n\n\nos.makedirs(RACE_DATA_DIR, exist_ok=True)\n\nargs = sys.argv\nYEAR = int(args[1])\n\nraces = []\nfor race_track in RaceTrac:\n for series_number in range(1, Race.MAX_SERIES_NUMBER + 1):\n for day_number in range(1, Race.MAX_DAY_NUMBER + 1):\n for race_number in range(1, Race.MAX_RACE_NUMBER + 1):\n races.append(Race(year=YEAR, race_track=race_track,\n series_number=series_number, day_number=day_number, race_number=race_number))\n\nfor race in tqdm(races):\n if os.path.exists(race.file_path):\n continue\n else:\n urllib.request.urlretrieve(race.url, race.file_path)\n time.sleep(1)\n" }, { "alpha_fraction": 0.6925343871116638, "alphanum_fraction": 0.7210215926170349, "avg_line_length": 13.54285717010498, "blob_id": "2d0ebf0f4e6319f8a38ba43bc9c2f6057b256097", "content_id": "e8fb922786c89f4c006b8c7a78d2c4fd1378ba7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 98, "num_lines": 70, "path": "/README.md", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "## 概要\n\n機械学習の自習用リポジトリ\n<br>\nテーマを競馬予想としてロジスティック回帰を行う\n\n## セットアップ\n\n### リポジトリを clone\n\n```bash\ngit clone [email protected]:k0kishima/machine_learning_hands_on.git\n```\n\n### 仮想環境構築\n\n```bash\ncd /path/to/project\npython3 -m venv venv\n```\n\n### パッケージインストール\n\n```bash\nsource venv/bin/activate\npip install -r requirements.txt\n```\n\n## 運用\n\n- 事前に `venv` を有効化しておくこと\n- パスを通しておくこと\n\n```bash\nsource venv/bin/activate\nexport PYTHONPATH=\".:$PYTHONPATH\"\n```\n\n### データの入手\n\n引数は対象の年\n\n```bash\npython scripts/netkeiba/download_race_pages.py 2019\n```\n\nDataFrame を pickle で保存(素振りなので移植性や再利用性は特に気にしない)\n\n```bash\npython scripts/netkeiba/create_race_result_data_frame.py\n```\n\n### 予想の実施\n\n`jupyter lab` を起動しておく\n\n```bash\njupyter lab\n```\n\n[http://localhost:8888/](http://localhost:8888/) へアクセスし、以下のNoteBookを実行する\n\n[logistic_regression_exam.ipynb](./logistic_regression_exam.ipynb)\n\n## その他\n\n### 参考資料\n\n* [競馬予想で始める機械学習〜完全版〜](https://zenn.dev/dijzpeb/books/848d4d8e47001193f3fb)\n* [【競馬予想AI】Pythonで正規表現を使って競馬データを加工する【機械学習】 - YouTube](https://www.youtube.com/watch?v=FPnzEgKBy8w)\n" }, { "alpha_fraction": 0.6600732803344727, "alphanum_fraction": 0.7018315196037292, "avg_line_length": 27.4375, "blob_id": "3647680871715b40ec10a576b6d4f926789da50c", "content_id": "f79ee69b626b87a96e043586a90999de377a8e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1945, "license_type": "no_license", "max_line_length": 113, "num_lines": 48, "path": "/keiba_machine_learning/types.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom typing import TypedDict\nfrom keiba_machine_learning.models import RaceTrac, TrackKind, TrackDirection, TrackSurface, Weather, HorseGender\n\n\nclass RaceInformation(TypedDict):\n \"\"\"レース情報をスクレイピングした結果として返すべき dict の構造を定義するクラス\"\"\"\n title: str\n race_track: RaceTrac\n track_kind: TrackKind\n track_direction: TrackDirection\n race_distance_by_meter: int\n track_surface: TrackSurface\n weather: Weather\n race_number: int\n starts_at: datetime\n\n\nclass RaceRecord(TypedDict):\n \"\"\"レース結果の1行として返すべき dict の構造を定義するクラス\n\n 各々の着に対応\n\n 例えば以下のページだと9頭立てなので1〜9着まであり、各々の着にこのdictが対応する\n https://db.netkeiba.com/race/201901010101\n\n > 1 1 1 ゴルコンダ 牡2 54 ルメール 1:48.3 ** 1-1-1-1 36.5 1.4 1 518(-16) [東]木村哲也 サンデーレーシング 500.0\n\n 上記のようなレース結果の表の1行を保持するデータ構造\n ※ 全項目を保持するわけではない\n \"\"\"\n # TODO 重要指標である「着差」を入れる\n # アタマ、クビ、ハナ、n馬身など競馬特有の尺度をどう保持するのが適切なのかは一考する必要がある\n order_of_placing: int # 着順\n bracket_number: int # 枠番\n horse_number: int # 馬番\n horse_id: int\n horse_name: str\n horse_age: int\n horse_gender: HorseGender\n impost: float # 斤量\n jockey_id: str # ※ \"05203\"のような0埋めで5桁が大半だが引退した騎手だと\"z0004\"みたいな変則的な書式も存在している\n jockey_name: str\n race_time: float # タイム\n win_betting_ratio: float # 単勝倍率\n favorite_order: int # 人気\n horse_weight: float # 馬体重\n weight_change: float # 体重変化\n" }, { "alpha_fraction": 0.5433850884437561, "alphanum_fraction": 0.5528455376625061, "avg_line_length": 34.41884994506836, "blob_id": "d11996a0a4669fdd1865d8d2e55bba6212894a26", "content_id": "04ee57c70039be19a76828ab0e8ac303c4cbf76e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7581, "license_type": "no_license", "max_line_length": 169, "num_lines": 191, "path": "/keiba_machine_learning/netkeiba/scrapers.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "import re\nfrom datetime import datetime\nfrom typing import IO, List\nfrom bs4 import BeautifulSoup\nfrom keiba_machine_learning.models import RaceTracFactory, TrackKindFactory, TrackDirectionFactory, TrackSurfaceFactory, WeatherFactory, HorseGenderFactory, TrackSurface\nfrom keiba_machine_learning.types import RaceInformation, RaceRecord\n\n# NOTE: バージョニングは必要に応じて行う\n# 例えばスクレイピング先がリニューアルされてDOMががらりと変わってしまったらこのスクリプトは使えなくなる\n# その場合、コード自体は残しておいてリニューアルされたサイトに対応するものは新しいバージョンで実装を行う\n# その際に名前空間も切る\n# (YAGNIの原則に則って今の段階では作らない)\n\n\nclass DataNotFound(Exception):\n pass\n\n\nclass IncompatibleDataDetected(Exception):\n pass\n\n\nclass RaceInformationScraper:\n @staticmethod\n def scrape(file: IO) -> RaceInformation:\n \"\"\"\n Args:\n file (IO): netkeibaのレース結果ページのHTMLファイル\n\n Returns:\n RaceInformation\n \"\"\"\n soup = BeautifulSoup(file, 'html.parser')\n\n # netkeiba側の仕様でレースデータが存在しないページにアクセスしても404をHTTPステータスコードとしてレスポンスしないので、\n # データが存在しない掲載されていないファイルが渡ってくることも想定してここでデータがない場合の制御をする\n race_result_table = soup.find('table', attrs={'summary': 'レース結果'})\n if race_result_table is None:\n raise DataNotFound\n\n title_element = soup.select_one(\n '#main > div > div > div > diary_snap > div > div > dl > dd > h1')\n text_under_the_title = soup.select_one(\n '#main > div > div > div > diary_snap > div > div > dl > dd > p > diary_snap_cut > span').get_text()\n race_number_element = soup.select_one(\n '#main > div > div > div > diary_snap > div > div > dl > dt')\n\n if s := re.search(r'(\\d{4})m', text_under_the_title):\n race_distance_by_meter = int(s.group(1))\n else:\n raise ValueError(\"can't parse race distance.\")\n\n if s := re.search(r'\\d+', race_number_element.get_text()):\n race_number = int(s.group())\n else:\n raise ValueError(\"can't parse race number.\")\n\n race_track_name = soup.select_one(\n '#main > div > div > div > ul > li > a.active').get_text()\n\n track_kind_mark = text_under_the_title[0]\n if track_kind_mark == '障':\n raise IncompatibleDataDetected\n\n track_direction_mark = text_under_the_title[1]\n if track_direction_mark == '直':\n raise IncompatibleDataDetected\n\n if s := re.search(r'天候 : (\\w+)', text_under_the_title):\n weather_mark = s.group(1)\n else:\n raise ValueError(\"can't parse weather.\")\n\n return {\n 'title': title_element.get_text(),\n 'race_track': RaceTracFactory.create(race_track_name),\n 'track_kind': TrackKindFactory.create(track_kind_mark),\n 'track_direction': TrackDirectionFactory.create(track_direction_mark),\n 'race_distance_by_meter': race_distance_by_meter,\n 'track_surface': TrackSurfaceParser.parse(text_under_the_title),\n 'weather': WeatherFactory.create(weather_mark),\n 'race_number': race_number,\n 'starts_at': datetime(2019, 7, 27, 9, 50),\n }\n\n\nclass RaceResultScraper:\n @staticmethod\n def scrape(file: IO) -> List[RaceRecord]:\n \"\"\"\n Args:\n file (IO): netkeibaのレース結果ページのHTMLファイル\n\n Returns:\n List[RaceRecord]\n \"\"\"\n soup = BeautifulSoup(file, 'html.parser')\n race_result_table = soup.find('table', attrs={'summary': 'レース結果'})\n if race_result_table is None:\n raise DataNotFound\n race_result_table_rows = race_result_table.find_all('tr')\n\n race_records = []\n\n # 最初の要素は項目行(header)なのでスキップ\n for row in race_result_table_rows[1:]:\n cells = row.find_all('td')\n\n try:\n order_of_placing = int(cells[0].get_text())\n except ValueError:\n # 正常な結果として入ってくる自然数以外に\n # 2(降) 、中、除、取 などが入ってくる\n # 最初のものは降着とわかるが、それ以外のものはまだ意味がわかってないのでいったん記録しない\n continue\n\n bracket_number = int(cells[1].get_text())\n horse_number = int(cells[2].get_text())\n\n if s := re.search(r'horse/(\\d+)', cells[3].find('a')['href']):\n horse_id = int(s.group(1))\n else:\n raise ValueError(\"can't parse horse id.\")\n\n horse_name = cells[3].get_text().strip()\n horse_age = int(cells[4].get_text()[1])\n horse_gender = HorseGenderFactory.create(cells[4].get_text()[0])\n impost = float(cells[5].get_text())\n\n if s := re.search(r'jockey/(\\d+)', cells[6].find('a')['href']):\n jockey_id = s.group(1)\n else:\n raise ValueError(\"can't parse jockey id.\")\n\n jockey_name = cells[6].get_text().strip()\n\n if f := re.findall(r'^(\\d{1}):(\\d{2})\\.(\\d{1})', cells[7].get_text()):\n minute, second, split_second = [\n int(time_data) for time_data in f[0]]\n race_time = (minute * 60) + second + (split_second * 0.1)\n else:\n raise ValueError(\"can't parse jockey id.\")\n\n win_betting_ratio = float(cells[12].get_text())\n favorite_order = int(cells[13].get_text())\n\n if f := re.findall(r'(\\d{3})\\(([+-]?\\d{1,2})\\)', cells[14].get_text()):\n horse_weight, weight_change = [\n int(weight_data) for weight_data in f[0]]\n else:\n raise ValueError(\"can't parse weight data.\")\n\n race_record: RaceRecord = {\n 'order_of_placing': order_of_placing,\n 'bracket_number': bracket_number,\n 'horse_number': horse_number,\n 'horse_id': horse_id,\n 'horse_name': horse_name,\n 'horse_age': horse_age,\n 'horse_gender': horse_gender,\n 'impost': impost,\n 'jockey_id': jockey_id,\n 'jockey_name': jockey_name,\n 'race_time': race_time,\n 'win_betting_ratio': win_betting_ratio,\n 'favorite_order': favorite_order,\n 'horse_weight': horse_weight,\n 'weight_change': weight_change,\n }\n race_records.append(race_record)\n\n return race_records\n\n\nclass TrackSurfaceParser:\n @staticmethod\n def parse(text: str) -> TrackSurface:\n \"\"\"\n Args:\n text (str): レース情報のテキスト\n\n 例:\n ダ左1200m / 天候 : 曇 / ダート : 良 / 発走 : 13:10\n\n Returns:\n TrackSurface\n \"\"\"\n if s := re.search(r'(芝|ダート) : (\\w+)', text):\n return TrackSurfaceFactory.create(s.group(2))\n else:\n raise ValueError(\"can't parse track surface.\")\n" }, { "alpha_fraction": 0.3609439432621002, "alphanum_fraction": 0.4098828136920929, "avg_line_length": 34.07777786254883, "blob_id": "0ef8afd450a2d9b2b578d87128864cc281e898b8", "content_id": "455551107266fdbb9471c028c8ae1b488021fb9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6520, "license_type": "no_license", "max_line_length": 84, "num_lines": 180, "path": "/keiba_machine_learning/netkeiba/tests/test_race_result_scraping.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "import pytest\nimport os\n\nfrom keiba_machine_learning.models import HorseGender\nfrom keiba_machine_learning.netkeiba.scrapers import RaceResultScraper, DataNotFound\nfrom keiba_machine_learning.netkeiba.constants import ENCODING_OF_WEB_PAGE\n\nbase_path = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_to_scrape_general_race_result():\n file_path = os.path.normpath(os.path.join(\n base_path, \"./fixtures/201901010101.html\"))\n\n with open(file_path, mode=\"r\", encoding=ENCODING_OF_WEB_PAGE) as file:\n expect_data = [\n {\n 'order_of_placing': 1,\n 'bracket_number': 1,\n 'horse_number': 1,\n 'horse_id': 2017105318,\n 'horse_name': 'ゴルコンダ',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 54,\n 'jockey_id': '05339',\n 'jockey_name': 'ルメール',\n 'race_time': 108.3,\n 'win_betting_ratio': 1.4,\n 'favorite_order': 1,\n 'horse_weight': 518,\n 'weight_change': -16,\n },\n {\n 'order_of_placing': 2,\n 'bracket_number': 3,\n 'horse_number': 3,\n 'horse_id': 2017104612,\n 'horse_name': 'プントファイヤー',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 54,\n 'jockey_id': '05203',\n 'jockey_name': '岩田康誠',\n 'race_time': 110.1,\n 'win_betting_ratio': 3.5,\n 'favorite_order': 2,\n 'horse_weight': 496,\n 'weight_change': -8,\n },\n {\n 'order_of_placing': 3,\n 'bracket_number': 4,\n 'horse_number': 4,\n 'horse_id': 2017103879,\n 'horse_name': 'ラグリマスネグラス',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 51,\n 'jockey_id': '01180',\n 'jockey_name': '団野大成',\n 'race_time': 110.9,\n 'win_betting_ratio': 46.6,\n 'favorite_order': 6,\n 'horse_weight': 546,\n 'weight_change': 6,\n },\n {\n 'order_of_placing': 4,\n 'bracket_number': 8,\n 'horse_number': 9,\n 'horse_id': 2017106259,\n 'horse_name': 'キタノコドウ',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 51,\n 'jockey_id': '01179',\n 'jockey_name': '菅原明良',\n 'race_time': 111.5,\n 'win_betting_ratio': 56.8,\n 'favorite_order': 7,\n 'horse_weight': 458,\n 'weight_change': -8,\n },\n {\n 'order_of_placing': 5,\n 'bracket_number': 5,\n 'horse_number': 5,\n 'horse_id': 2017104140,\n 'horse_name': 'ネモフィラブルー',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 54,\n 'jockey_id': '01062',\n 'jockey_name': '川島信二',\n 'race_time': 111.7,\n 'win_betting_ratio': 140.3,\n 'favorite_order': 9,\n 'horse_weight': 436,\n 'weight_change': 0,\n },\n {\n 'order_of_placing': 6,\n 'bracket_number': 8,\n 'horse_number': 8,\n 'horse_id': 2017101930,\n 'horse_name': 'マイネルラクスマン',\n 'horse_age': 2,\n 'horse_gender': HorseGender.MALE,\n 'impost': 54,\n 'jockey_id': '01091',\n 'jockey_name': '丹内祐次',\n 'race_time': 112.1,\n 'win_betting_ratio': 9.7,\n 'favorite_order': 3,\n 'horse_weight': 480,\n 'weight_change': 8,\n },\n {\n 'order_of_placing': 7,\n 'bracket_number': 2,\n 'horse_number': 2,\n 'horse_id': 2017100184,\n 'horse_name': 'サンモンテベロ',\n 'horse_age': 2,\n 'horse_gender': HorseGender.FEMALE,\n 'impost': 54,\n 'jockey_id': '01109',\n 'jockey_name': '黛弘人',\n 'race_time': 112.5,\n 'win_betting_ratio': 114.7,\n 'favorite_order': 8,\n 'horse_weight': 450,\n 'weight_change': 2,\n },\n {\n 'order_of_placing': 8,\n 'bracket_number': 7,\n 'horse_number': 7,\n 'horse_id': 2017102953,\n 'horse_name': 'エスカレーション',\n 'horse_age': 2,\n 'horse_gender': HorseGender.FEMALE,\n 'impost': 54,\n 'jockey_id': '01093',\n 'jockey_name': '藤岡佑介',\n 'race_time': 112.5,\n 'win_betting_ratio': 26.1,\n 'favorite_order': 5,\n 'horse_weight': 448,\n 'weight_change': -4,\n },\n {\n 'order_of_placing': 9,\n 'bracket_number': 6,\n 'horse_number': 6,\n 'horse_id': 2017102421,\n 'horse_name': 'セイウンジュリア',\n 'horse_age': 2,\n 'horse_gender': HorseGender.FEMALE,\n 'impost': 54,\n 'jockey_id': '01032',\n 'jockey_name': '池添謙一',\n 'race_time': 112.6,\n 'win_betting_ratio': 16.4,\n 'favorite_order': 4,\n 'horse_weight': 470,\n 'weight_change': 0,\n },\n ]\n assert RaceResultScraper.scrape(file) == expect_data\n\n\ndef test_to_scrape_empty_page():\n file_path = os.path.normpath(os.path.join(\n base_path, \"./fixtures/empty_page.html\"))\n\n with open(file_path, mode=\"r\", encoding=ENCODING_OF_WEB_PAGE) as file:\n with pytest.raises(DataNotFound):\n assert RaceResultScraper.scrape(file)\n" }, { "alpha_fraction": 0.5042997598648071, "alphanum_fraction": 0.7014741897583008, "avg_line_length": 15.958333015441895, "blob_id": "52ef903a48bc44bf7a3cb3968f59a65de514019e", "content_id": "801a613427f0f7350e628ba764c7c5f18c5b8f2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 26, "num_lines": 96, "path": "/requirements.txt", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "anyio==2.2.0\nappnope==0.1.2\nargon2-cffi==20.1.0\nasync-generator==1.10\nattrs==20.3.0\nautopep8==1.5.6\nBabel==2.9.0\nbackcall==0.2.0\nbeautifulsoup4==4.9.3\nbleach==3.3.0\nblessings==1.7\nbpython==0.21\ncertifi==2020.12.5\ncffi==1.14.5\nchardet==4.0.0\ncurtsies==0.3.5\ncwcwidth==0.1.4\ndecorator==5.0.6\ndefusedxml==0.7.1\nentrypoints==0.3\nflake8==3.9.0\ngreenlet==1.0.0\nidna==2.10\nimbalanced-learn==0.8.0\nimblearn==0.0\niniconfig==1.1.1\nipykernel==5.5.3\nipython==7.22.0\nipython-genutils==0.2.0\njedi==0.18.0\nJinja2==2.11.3\njoblib==1.0.1\njson5==0.9.5\njsonschema==3.2.0\njupyter-client==6.1.12\njupyter-core==4.7.1\njupyter-packaging==0.7.12\njupyter-server==1.6.0\njupyterlab==3.0.13\njupyterlab-pygments==0.1.2\njupyterlab-server==2.4.0\nMarkupSafe==1.1.1\nmccabe==0.6.1\nmistune==0.8.4\nmypy==0.812\nmypy-extensions==0.4.3\nnbclassic==0.2.7\nnbclient==0.5.3\nnbconvert==6.0.7\nnbformat==5.1.3\nnest-asyncio==1.5.1\nnotebook==6.3.0\nnumpy==1.20.2\npackaging==20.9\npandas==1.2.3\npandocfilters==1.4.3\nparso==0.8.2\npexpect==4.8.0\npickleshare==0.7.5\npluggy==0.13.1\nprometheus-client==0.10.1\nprompt-toolkit==3.0.18\nptyprocess==0.7.0\npy==1.10.0\npycodestyle==2.7.0\npycparser==2.20\npydantic==1.8.1\npyflakes==2.3.1\nPygments==2.8.1\npyparsing==2.4.7\npyrsistent==0.17.3\npytest==6.2.2\npython-dateutil==2.8.1\npytz==2021.1\npyxdg==0.27\npyzmq==22.0.3\nrequests==2.25.1\nscikit-learn==0.24.1\nscipy==1.6.2\nSend2Trash==1.5.0\nsix==1.15.0\nsklearn==0.0\nsniffio==1.2.0\nsoupsieve==2.2.1\nterminado==0.9.4\ntestpath==0.4.4\nthreadpoolctl==2.1.0\ntoml==0.10.2\ntornado==6.1\ntqdm==4.59.0\ntraitlets==5.0.5\ntyped-ast==1.4.2\ntyping-extensions==3.7.4.3\nurllib3==1.26.4\nwcwidth==0.2.5\nwebencodings==0.5.1\n" }, { "alpha_fraction": 0.5258266925811768, "alphanum_fraction": 0.5377134084701538, "avg_line_length": 21.352657318115234, "blob_id": "ba3dd75cf754dc8778ed3ef53fd92a1212cbba3a", "content_id": "bedf7dab1fb5ccaf306ebeafe47a7400a9e3f072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5553, "license_type": "no_license", "max_line_length": 78, "num_lines": 207, "path": "/keiba_machine_learning/models.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom pydantic import Field\nfrom pydantic.dataclasses import dataclass\nimport datetime\n\n\nclass RaceTrac(Enum):\n \"\"\"競馬場に対応するモデル\n\n 東京競馬場・阪神競馬場など\n 全10場が個々のオブジェクトに対応\n\n \"\"\"\n SAPPORO = 1\n HAKODATE = 2\n FUKUSHIMA = 3\n NIGATA = 4\n TOKYO = 5\n NAKAYAMA = 6\n CHUKYO = 7\n KYOTO = 8\n HANSHIN = 9\n KOKURA = 10\n\n\nclass RaceTracFactory:\n @staticmethod\n def create(race_track_name: str) -> RaceTrac:\n \"\"\" 文字列からRaceTracオブジェクトを生成する\n\n Args:\n race_track_name (str): 競馬場の名前\n\n Returns:\n RaceTrac: \n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '札幌': 'SAPPORO',\n '函館': 'HAKODATE',\n '福島': 'FUKUSHIMA',\n '新潟': 'NIGATA',\n '東京': 'TOKYO',\n '中山': 'NAKAYAMA',\n '中京': 'CHUKYO',\n '京都': 'KYOTO',\n '阪神': 'HANSHIN',\n '小倉': 'KOKURA',\n }\n return RaceTrac[NAMES_INDEXED_BY_MARK_STR[race_track_name]]\n\n\n@dataclass\nclass Race:\n \"\"\" 必要最低限の属性のみを保持したレースの基底モデル \"\"\"\n # ブラウザURL直打ちして2着以下も取得できた年を暫定的に指定\n # 1985年はページ自体は閲覧できるが1着しか見れない(ログインすれば見れる旨は記載されていた)\n OLDEST_READABLE_YEAR = 1986\n\n # 2020年東京競馬場を基準にURL直打ちして確認したところ5までしかなかった\n # バッファ取って以下の値とした\n MAX_SERIES_NUMBER = 7\n\n # 上記と同様\n # 9日目まではあったがバッファを取って以下の値に\n MAX_DAY_NUMBER = 10\n\n MAX_RACE_NUMBER = 12\n\n race_track: RaceTrac\n year: int = Field(ge=OLDEST_READABLE_YEAR, le=datetime.date.today().year)\n series_number: int = Field(ge=1, le=MAX_SERIES_NUMBER)\n day_number: int = Field(ge=1, le=MAX_DAY_NUMBER)\n race_number: int = Field(ge=1, le=MAX_RACE_NUMBER)\n\n\nclass Weather(Enum):\n CLOUD = 1\n FINE = 2\n RAIN = 3\n LIGHT_RAIN = 4\n LIGHT_SNOW = 5\n SNOW = 6\n\n\nclass WeatherFactory:\n @staticmethod\n def create(weather_name: str) -> Weather:\n \"\"\" 文字列からWeatherオブジェクトを生成する\n\n Args:\n weather_name (str): 曇 | 晴 | 雨 | 小雨 | 小雪 | 雪\n\n Returns:\n Weather: \n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '曇': 'CLOUD',\n '晴': 'FINE',\n '雨': 'RAIN',\n '小雨': 'LIGHT_RAIN',\n '小雪': 'LIGHT_SNOW',\n '雪': 'SNOW',\n }\n return Weather[NAMES_INDEXED_BY_MARK_STR[weather_name]]\n\n\nclass TrackDirection(Enum):\n LEFT = 1\n RIGHT = 2\n\n\nclass TrackDirectionFactory:\n @staticmethod\n def create(track_direction_name: str) -> TrackDirection:\n \"\"\"文字列からTrackDirectionオブジェクトを生成する\n\n Args:\n track_direction_name (str): 右 | 左\n\n Returns:\n TrackDirection: \n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '左': 'LEFT',\n '右': 'RIGHT',\n }\n return TrackDirection[NAMES_INDEXED_BY_MARK_STR[track_direction_name]]\n\n\nclass TrackKind(Enum):\n GRASS = 1\n DIRT = 2\n JUMP = 3\n\n\nclass TrackKindFactory:\n @staticmethod\n def create(track_kind_name: str) -> TrackKind:\n \"\"\"文字列からTrackKindオブジェクトを生成する\n\n Args:\n track_kind_name (str): 芝 | ダート | 障害 \n\n Returns:\n TrackKind: \n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '芝': 'GRASS',\n 'ダート': 'DIRT',\n 'ダ': 'DIRT',\n '障害': 'JUMP',\n '障': 'JUMP',\n }\n return TrackKind[NAMES_INDEXED_BY_MARK_STR[track_kind_name]]\n\n\nclass TrackSurface(Enum):\n GOOD_TO_FIRM = 1 # 馬場が芝だと \"GOOD_TO_FIRM\"で、ダートだと\"Standard\"らしいが前者で統一\n GOOD = 2\n YIELDING = 3 # これもダートだと \"Muddy\" らしいが芝の用語だけを使う\n SOFT = 4 # 同じくダートだと \"Sloppy\" らしいが芝の用語だけを使う\n\n\nclass TrackSurfaceFactory:\n @staticmethod\n def create(track_surface_name: str) -> TrackSurface:\n \"\"\"文字列からTrackSurfaceオブジェクトを生成する\n\n Args:\n track_surface_name (str): 良 | 稍重 | 重 | 不良 \n\n Returns:\n TrackSurface: \n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '良': 'GOOD_TO_FIRM',\n '稍重': 'GOOD',\n '重': 'YIELDING',\n '不良': 'SOFT',\n }\n return TrackSurface[NAMES_INDEXED_BY_MARK_STR[track_surface_name]]\n\n\nclass HorseGender(Enum):\n MALE = 1\n FEMALE = 2\n CASTRATED = 3\n\n\nclass HorseGenderFactory:\n @staticmethod\n def create(gender_string: str) -> HorseGender:\n \"\"\"文字列からGenderオブジェクトを生成する\n\n Args:\n gender_string (str): 牡 | 牝 | セ\n ※ セ は騸馬(去勢された牡馬)を意味する\n\n Returns:\n HorseGender\n \"\"\"\n NAMES_INDEXED_BY_MARK_STR = {\n '牡': 'MALE',\n '牝': 'FEMALE',\n 'セ': 'CASTRATED',\n }\n return HorseGender[NAMES_INDEXED_BY_MARK_STR[gender_string]]\n" }, { "alpha_fraction": 0.593339741230011, "alphanum_fraction": 0.5939801335334778, "avg_line_length": 40.63999938964844, "blob_id": "f45b547465f2b9cfc099bb7f48ebe6983f60cd0f", "content_id": "d1bbd8d306375014594d4be6833c7cc0a8c8f762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3277, "license_type": "no_license", "max_line_length": 134, "num_lines": 75, "path": "/scripts/netkeiba/create_race_result_data_frame.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "\"\"\"netkeibaからダウンロードしたHTMLからpandasのDataFrameを生成するスクリプト\n\n Examples:\n ※ 実行時はパスを通すこと\n ※ 事前にファイルが用意されていること(`./download_race_pages.py` が実行済みであること)\n ※ 以下はコマンドライン上にて\n\n source venv/bin/activate\n export PYTHONPATH=\".:$PYTHONPATH\"\n python scripts/netkeiba/create_race_result_data_frame.py\n\"\"\"\n\nimport os\nfrom tqdm import tqdm\nimport pandas as pd\n\nfrom keiba_machine_learning.netkeiba.constants import RACE_DATA_DIR, ENCODING_OF_WEB_PAGE\nfrom keiba_machine_learning.netkeiba.scrapers import RaceInformationScraper, RaceResultScraper, DataNotFound, IncompatibleDataDetected\n\nrace_records = []\nfor file_name in tqdm(os.listdir(RACE_DATA_DIR)):\n file_path = os.path.normpath(os.path.join(RACE_DATA_DIR, file_name))\n\n with open(file_path, mode=\"r\", encoding=ENCODING_OF_WEB_PAGE) as file:\n try:\n race_id, _ = file_name.split('.')\n race_id = int(race_id)\n except ValueError:\n continue\n\n try:\n race_information = RaceInformationScraper.scrape(file)\n file.seek(0)\n\n rows = [{\n 'race_id': race_id,\n 'race_track': race_information['race_track'].value,\n 'track_kind': race_information['track_kind'].value,\n 'track_direction': race_information['track_direction'].value,\n 'race_distance_by_meter': race_information['race_distance_by_meter'],\n 'track_surface': race_information['track_surface'].value,\n 'weather': race_information['weather'].value,\n 'race_number': race_information['race_number'],\n 'starts_at': race_information['starts_at'],\n 'order_of_placing': race_record['order_of_placing'],\n 'bracket_number': race_record['bracket_number'],\n 'horse_number': race_record['horse_number'],\n 'horse_id': race_record['horse_id'],\n 'horse_name': race_record['horse_name'],\n 'horse_age': race_record['horse_age'],\n 'horse_gender': race_record['horse_gender'].value,\n 'impost': race_record['impost'],\n 'jockey_id': race_record['jockey_id'],\n 'jockey_name': race_record['jockey_name'],\n 'race_time': race_record['race_time'],\n 'win_betting_ratio': race_record['win_betting_ratio'],\n 'favorite_order': race_record['favorite_order'],\n 'horse_weight': race_record['horse_weight'],\n 'weight_change': race_record['weight_change'],\n } for race_record in RaceResultScraper.scrape(file)]\n\n race_records.extend(rows)\n except DataNotFound:\n pass\n except IncompatibleDataDetected:\n pass\n except Exception as e:\n print(f\"race_id: {race_id} can't parse.\")\n raise e\n\n\ndf = pd.DataFrame([], columns=race_records[0].keys())\ndf = pd.concat([df, pd.DataFrame.from_dict(race_records)])\ndf.to_pickle(os.path.normpath(os.path.join(\n RACE_DATA_DIR, 'race_results_data_frame.pickle')))\n" }, { "alpha_fraction": 0.6568291187286377, "alphanum_fraction": 0.6760466694831848, "avg_line_length": 38.378379821777344, "blob_id": "ca30f8ae09c59832f37965b35f25bff7819aba8b", "content_id": "474495accdf87aee36c994a21db0eb86b2b0d24b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1465, "license_type": "no_license", "max_line_length": 115, "num_lines": 37, "path": "/keiba_machine_learning/netkeiba/tests/test_race_information_scraping.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "import pytest\nimport os\nfrom datetime import datetime\n\nfrom keiba_machine_learning.models import RaceTrac, TrackKind, TrackDirection, TrackSurface, Weather\nfrom keiba_machine_learning.netkeiba.scrapers import RaceInformationScraper, DataNotFound, IncompatibleDataDetected\nfrom keiba_machine_learning.netkeiba.constants import ENCODING_OF_WEB_PAGE\n\nbase_path = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_to_scrape_general_race_information():\n file_path = os.path.normpath(os.path.join(\n base_path, \"./fixtures/201901010101.html\"))\n\n with open(file_path, mode=\"r\", encoding=ENCODING_OF_WEB_PAGE) as file:\n expect_data = {\n 'title': '2歳未勝利',\n 'race_track': RaceTrac.SAPPORO,\n 'track_kind': TrackKind.GRASS,\n 'track_direction': TrackDirection.RIGHT,\n 'race_distance_by_meter': 1800,\n 'track_surface': TrackSurface.GOOD_TO_FIRM,\n 'weather': Weather.CLOUD,\n 'race_number': 1,\n 'starts_at': datetime(2019, 7, 27, 9, 50),\n }\n assert RaceInformationScraper.scrape(file) == expect_data\n\n\ndef test_to_scrape_disability_race_page():\n file_path = os.path.normpath(os.path.join(\n base_path, \"./fixtures/disability_race_page.html\"))\n\n with open(file_path, mode=\"r\", encoding=ENCODING_OF_WEB_PAGE) as file:\n with pytest.raises(IncompatibleDataDetected):\n assert RaceInformationScraper.scrape(file)\n" }, { "alpha_fraction": 0.659649133682251, "alphanum_fraction": 0.7263157963752747, "avg_line_length": 34.625, "blob_id": "c9ce9199fb71a22e4fa28c3fb196a07b7a2e9f2b", "content_id": "2de8ed7e7d007b8b21d621c2d8699d7195d27af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/keiba_machine_learning/netkeiba/tests/test_race.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "from keiba_machine_learning.models import RaceTrac\nfrom keiba_machine_learning.netkeiba.models import Race\n\n\ndef test_identifier():\n race = Race(year=2020, race_track=RaceTrac.SAPPORO,\n series_number=1, day_number=1, race_number=1)\n assert race.id == 202001010101\n" }, { "alpha_fraction": 0.7189189195632935, "alphanum_fraction": 0.7189189195632935, "avg_line_length": 25.428571701049805, "blob_id": "b2b4983bc3e558621dd74617ffa9bbcc7415dbef", "content_id": "aa3ab0a7746dddb7351530e76a421531c6e66e29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 58, "num_lines": 7, "path": "/keiba_machine_learning/netkeiba/constants.py", "repo_name": "k0kishima/machine_learning_hands_on", "src_encoding": "UTF-8", "text": "import os\nfrom constants import DATA_DIR\n\nDATABASE_PAGE_BASE_URL = \"https://db.netkeiba.com\"\nENCODING_OF_WEB_PAGE = \"EUC-JP\"\n\nRACE_DATA_DIR = os.path.join(DATA_DIR, \"netkeiba\", \"race\")\n" } ]
12
Babatunde-Odunlami/fizzbuzz
https://github.com/Babatunde-Odunlami/fizzbuzz
997c4f8419fb2a3a5b861e453c5c6525f007b76c
944f72736bc814d620c6c26565575a2632903b7e
16182f0ed24bb66138c63a9d0caa3286d0a6666f
refs/heads/master
2023-06-23T15:14:10.696032
2021-07-25T20:40:41
2021-07-25T20:40:41
389,433,928
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6763284802436829, "avg_line_length": 23.352941513061523, "blob_id": "64f09abdf265c82ae88023fd4391e1dfcc19e020", "content_id": "aa92bb34c2a8bd34de0343e6132bcb5ae96678ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "no_license", "max_line_length": 82, "num_lines": 17, "path": "/FizzBuzz_challenge.py", "repo_name": "Babatunde-Odunlami/fizzbuzz", "src_encoding": "UTF-8", "text": "#Write your code below this row 👇\n\n#The Fizzbuzz game challenge\nn = input(\"Enter the maximum number for the range in the fizzbuzz competition \\n\")\n#converting the user input to integer\nnum = int(n) +1\nfor number in range (1, num):\n\tif number%3 == 0 and number%5 == 0:\n\t\tprint(\"FizzBuzz\")\n\telif number % 3 == 0:\n\t\tprint(\"Fizz\")\n\telif number % 5 == 0:\n\t\tprint(\"Buzz\")\n\telse:\n\t\tprint(number)\n\n#This is just a comment\n" } ]
1
cbf02000/handwriting-ocr
https://github.com/cbf02000/handwriting-ocr
bd92daaace0a15f49355f7d8d584a8eddc4a1dbe
5f91e39d7708afd75806ffa5dba4469c82cc06b4
099409f9f6bf716d5cda66ac8b4e4f7a859bf4a1
refs/heads/master
2021-01-23T14:56:04.376760
2014-01-30T17:55:26
2014-01-30T17:55:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6047143936157227, "alphanum_fraction": 0.6269265413284302, "avg_line_length": 28.399999618530273, "blob_id": "51ccd8dc625b827b4b72c2873144332bbd9f1107", "content_id": "bce6d35f3e71f44e1a14ed9795f265ff6626e5be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2206, "license_type": "no_license", "max_line_length": 116, "num_lines": 75, "path": "/tesseract-test.py", "repo_name": "cbf02000/handwriting-ocr", "src_encoding": "UTF-8", "text": "#!/opt/local/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport tesseract\nimport tornado.ioloop\nimport tornado.web\nimport tornado.options\nimport time\nimport StringIO\nimport sys\nimport os\nfrom PIL import Image\nimport cv2\nimport cv2.cv as cv\nimport json\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Hello, world\")\n\nclass OcrHandler(tornado.web.RequestHandler):\n def post(self):\n\n fileBody = self.request.files['image'][0]['body']\n img = Image.open(StringIO.StringIO(fileBody))\n seq = 0\n path = \"./ocr-images/\" + str(int(time.time())) + \"-\" + str(seq) + \".\" + str(img.format).lower()\n\n while os.path.exists(path):\n seq += 1\n path = \"./ocr-images/\" + str(int(time.time())) + \"-\" + str(seq) + \".\" + str(img.format).lower()\n\n img.save(path, img.format)\n api = tesseract.TessBaseAPI()\n api.Init(\".\",\"eng\",tesseract.OEM_DEFAULT)\n api.SetVariable(\"tessedit_char_whitelist\", \"0123456789ABCDEFGHIJKLMNOPQRSTUVMXYZabcdefghijklmnopqrstuvwxyz\")\n api.SetPageSegMode(tesseract.PSM_AUTO)\n\n image0=cv2.imread(path, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n \n threshold = 127\n image0 = cv2.threshold(image0, threshold, 255, cv2.THRESH_BINARY)[1]\n height,width = image0.shape\n\n #cv2.namedWindow(\"Test\")\n #cv2.imshow(\"Test\", image0)\n #cv2.waitKey(0)\n #cv2.destroyWindow(\"Test\")\n\n iplimage = cv.CreateImageHeader((width,height), cv.IPL_DEPTH_8U, 1)\n cv.SetData(iplimage, image0.tostring(),image0.dtype.itemsize * 1 * (width))\n tesseract.SetCvImage(iplimage,api)\n\n tesseract.SetCvImage(iplimage,api)\n resObj = dict()\n\n resObj['text'] = api.GetUTF8Text().replace(\"\\n\", \" \")\n resObj['confidence'] = api.MeanTextConf()\n\n\n print resObj\n\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(json.dumps(resObj))\n self.finish()\n\napplication = tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/ocr\", OcrHandler),\n])\n\nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.options.parse_command_line()\n tornado.ioloop.IOLoop.instance().start()\n\n" }, { "alpha_fraction": 0.7616387605667114, "alphanum_fraction": 0.7895717024803162, "avg_line_length": 32.4375, "blob_id": "512ec8d522a33a708d5490ae308b46af263c08fc", "content_id": "6ec4380a3a2a594023dc36c199020ff97aa15704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/python-tesseract/test_.py", "repo_name": "cbf02000/handwriting-ocr", "src_encoding": "UTF-8", "text": "import tesseract\napi = tesseract.TessBaseAPI()\napi.Init(\".\",\"eng\",tesseract.OEM_DEFAULT)\napi.SetVariable(\"tessedit_char_whitelist\", \"0123456789abcdefghijklmnopqrstuvwxyz\")\napi.SetPageSegMode(tesseract.PSM_AUTO)\n\nmImgFile = \"eurotext.jpg\"\nmBuffer=open(mImgFile,\"rb\").read()\nresult = tesseract.ProcessPagesBuffer(mBuffer,len(mBuffer),api)\nprint \"result(ProcessPagesBuffer)=\",result\nintPtr=api.AllWordConfidences()\nprint str(intPtr)\npyPtr=tesseract.cdata(intPtr,100)\nfor i in range(10):\n\tprint ord(pyPtr[i])\ntesseract.delete_intp(intPtr) \n\n" }, { "alpha_fraction": 0.6557376980781555, "alphanum_fraction": 0.6680327653884888, "avg_line_length": 16.428571701049805, "blob_id": "b8ad030a333484836985d9886aa95ab51e691260", "content_id": "0426a989cfabc7befb3217cc64f9f74543d412ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 244, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/python-tesseract/config.h", "repo_name": "cbf02000/handwriting-ocr", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef __darwin__\n\t#define __darwin__\n#endif\n#include \"fmemopen.h\"\n#ifndef __opencv2__\n\t#define __opencv2__\n#endif\n#include <opencv2/core/core_c.h>\n#ifndef __opencv__\n\t#define __opencv__\n#endif\n#include <cv.h>\n#include <Python.h>\n" }, { "alpha_fraction": 0.60317462682724, "alphanum_fraction": 0.60317462682724, "avg_line_length": 14.75, "blob_id": "a8ab3d35da64200d55c909c756c31d4b8be738d8", "content_id": "a719e1d83cc795f28d519258986d325554f44d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/README.md", "repo_name": "cbf02000/handwriting-ocr", "src_encoding": "UTF-8", "text": "handwriting-ocr\n===============\n\nTesseract + python web server\n" } ]
4
codehering/data_challanges_2021
https://github.com/codehering/data_challanges_2021
d1ff55f285d2f5736b0c6caf76d658e72ace8e48
d2548a4e06f2514e269526fe926218d705009194
f1b23b6e2948d3bbe0305da7a18ce3fe1e28f8f8
refs/heads/main
2023-06-01T20:34:30.398970
2021-07-10T20:17:04
2021-07-10T20:17:04
361,468,012
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6602032780647278, "alphanum_fraction": 0.682877242565155, "avg_line_length": 27.941177368164062, "blob_id": "2207cbaaacec230f0fb7d6ed2850efb59c67ae41", "content_id": "cc9a97be2b754f1d10ab782a24af45f8805583cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6395, "license_type": "no_license", "max_line_length": 141, "num_lines": 221, "path": "/experiments/vanilla_python/explorative/dimension_reduction_algorithms.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 4 11:51:30 2021\nThis script was used to explore different dimension reduction algorithms like UMAP, truncated SVD, t-SNE or a neural network as autoencoder. \n@author: annalena, freddy\n\"\"\"\nimport pandas as pd\nimport umap.umap_ as umap\nimport umap.utils\nimport umap.plot\nfrom datetime import datetime\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\ndata = pd.read_csv('data\\\\analysis_dataset_w_material.csv', sep=\";\")\ndel data[\"mindiam\"]\ndel data[\"axis\"]\ndel data[\"Unnamed: 0\"]\ndata = data.dropna()\ndata.shape\ndel data[\"findspot\"]\ncoins = data[\"coin\"].to_list()\ndata[\"material\"] = data[\"material\"].astype(\"category\")\ndata[\"material\"] = data[\"material\"].cat.codes\nfor c in data.columns:\n try:\n data[c] = data[c].astype(float)\n except:\n print(c)\ndel data[\"coin\"]\nscaled_data = StandardScaler().fit_transform(data)\nscaled_data = pd.DataFrame(scaled_data)\nscaled_data[\"coin\"] = coins\ndesign_data = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\design_dummys.csv\", sep=\";\")\nsvd = TruncatedSVD(n_components=2, n_iter=20, random_state=42)\ndesign_coins = design_data[\"id_coin\"].to_list()\ndel design_data[\"id_coin\"]\ndel design_data[\"Unnamed: 0\"]\nsvd.fit(design_data)\nnew = svd.transform(design_data)\nnew = pd.DataFrame(new)\nnew[\"id_coin\"] = design_coins\ncomplete_data = pd.merge(scaled_data, new, how=\"left\", left_on=\"coin\", right_on=\"id_coin\")\nprint(complete_data.shape)\ncomplete_data = complete_data.dropna()\ncoins = complete_data[\"coin\"]\ndel complete_data[\"coin\"]\n\ndel complete_data[\"id_coin\"]\nprint(complete_data.shape)\n\n\nfrom sklearn.decomposition import TruncatedSVD\n\n#mapper = umap.UMAP(metric='cosine', random_state=42, init='random').fit(complete_data) not working due to gpu\nsvd = TruncatedSVD(n_components=50, n_iter=10, random_state=42)\nsvd.fit(complete_data)\nnew = svd.transform(complete_data)\n\nimport matplotlib.pyplot as plt\nplt.scatter(\n new[:, 0],\n new[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\nr2 = umap.UMAP(random_state=42)\nr2.fit(new)\nembedding2 = r.transform(new)\n\nplt.scatter(\n embedding2[:, 0],\n embedding2[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\nx2, y2 = cross_validation(new)\nplt.scatter(\n x2,\n y2)\nplt.gca().set_aspect('equal', 'datalim')\n\nr = umap.UMAP(random_state=42)\nr.fit(complete_data)\nembedding = r.transform(complete_data)\nplt.scatter(\n embedding[:, 0],\n embedding[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\nimport random\ndef cross_validation(d, k=40):\n seeds = random.sample(range(0, 100000), k)\n embedding_x = 0\n embedding_y = 0\n for seed in seeds:\n r = umap.UMAP(random_state=seed)\n r.fit(d)\n embedding = r.transform(d)\n embedding_x += embedding[:, 0]\n embedding_y += embedding[:, 1]\n embedding_x = embedding_x / k\n embedding_y = embedding_y / k\n return embedding_x, embedding_y\n\nx, y = cross_validation(complete_data, k=20)\n\nplt.scatter(\n x,\n y)\nplt.gca().set_aspect('equal', 'datalim')\n\ncluster_dataset = pd.DataFrame({\"x\": x, \"y\": y})\nclustering = DBSCAN(eps=1, min_samples=10).fit(cluster_dataset)\nlabels = clustering.labels_\ncluster_dataset[\"label\"] = labels\n\nplt.scatter(\n cluster_dataset[\"x\"],\n cluster_dataset[\"y\"], c=cluster_dataset[\"label\"])\nplt.gca().set_aspect('equal', 'datalim')\n\nfrom sklearn.cluster import DBSCAN\n\n\n\n\nplt.scatter(\n embedding[:, 0],\n embedding[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\n#try autoencoder\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\ndef get_autoencoder(dims, act='relu'):\n n_stacks = len(dims) - 1\n x = Input(shape=(dims[0],), name='input')\n\n h = x\n for i in range(n_stacks - 1):\n h = Dense(dims[i + 1], activation=act, name='encoder_%d' % i)(h)\n\n h = Dense(dims[-1], name='encoder_%d' % (n_stacks - 1))(h)\n for i in range(n_stacks - 1, 0, -1):\n h = Dense(dims[i], activation=act, name='decoder_%d' % i)(h)\n\n h = Dense(dims[0], name='decoder_0')(h)\n\n model = Model(inputs=x, outputs=h)\n model.summary()\n return model\nX = complete_data.values\nX_train, X_test = train_test_split(X, test_size=0.2, random_state=11)\nbatch_size = 32\npretrain_epochs = 10\nencoded_dimensions = 50\nshape = [X.shape[-1], 1000, 1000, 500, encoded_dimensions]\nautoencoder = get_autoencoder(shape)\nprint(shape)\n\nencoded_layer = f'encoder_{(len(shape) - 2)}'\n\nprint(f'taking the last encoder layer:{encoded_layer}')\n\nhidden_encoder_layer = autoencoder.get_layer(name=encoded_layer).output\nencoder = Model(inputs=autoencoder.input, outputs=hidden_encoder_layer)\nautoencoder.compile(loss='mse', optimizer='adam')\n#train\nmodel_series = 'CLS_MODEL_' + datetime.now().strftime(\"%h%d%Y-%H%M\")\n\ncheckpointer = ModelCheckpoint(filepath=f\"{model_series}-model.h5\", verbose=0, save_best_only=True)\n\nautoencoder.fit(\n X_train,\n X_train,\n batch_size=batch_size,\n epochs=pretrain_epochs,\n verbose=1,\n validation_data=(X_test, X_test),\n callbacks=[checkpointer]\n)\n\nautoencoder = load_model(f\"{model_series}-model.h5\")\n\nweights_name = 'weights/' + model_series + \"-\" + str(pretrain_epochs) + '-ae_weights.h5'\nautoencoder.save_weights(weights_name)\n\nX_encoded = encoder.predict(X)\n\n\ndef learn_manifold(x_data, umap_min_dist=0.00, umap_metric='euclidean', umap_dim=10, umap_neighbors=30):\n md = float(umap_min_dist)\n return umap.UMAP(\n random_state=0,\n metric=umap_metric,\n n_components=umap_dim,\n n_neighbors=umap_neighbors,\n min_dist=md).fit_transform(x_data)\n\nX_reduced = learn_manifold(X_encoded, umap_neighbors=30, umap_dim=2)#int(encoded_dimensions/2))\nplt.scatter(\n X_reduced[:, 0],\n X_reduced[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\n\n#check tsne:\nfrom sklearn.manifold import TSNE \nimport time\ncat_dummys = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\entity_cats_prepared.csv\", sep=\";\")\ndel cat_dummys[\"Unnamed: 0\"]\ndel cat_dummys[\"id_coin\"]\ntime_start = time.time()\ntsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)\ntsne_results = tsne.fit_transform(cat_dummys)\nprint('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))\nplt.scatter(\n tsne_results[:, 0],\n tsne_results[:, 1])\nplt.gca().set_aspect('equal', 'datalim')" }, { "alpha_fraction": 0.7092651724815369, "alphanum_fraction": 0.7232428193092346, "avg_line_length": 27.79310417175293, "blob_id": "7dddb19b663822f1d12bd145bf3f39662e7809ff", "content_id": "81e180eaaf36c2d88f4787ebd6a9792196eb1421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2504, "license_type": "no_license", "max_line_length": 87, "num_lines": 87, "path": "/experiments/vanilla_python/explorative/dbscan.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 20 11:23:45 2021\nThe aim of this script was to understand dbscan clustering algorithm.\n@author: annalena, freddy\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\ndata = pd.read_csv(\"data\\\\analysis_dataset.csv\", sep=\";\")\ndel data[\"Unnamed: 0\"]\ncoins = data[\"coin\"].to_list()\ndel data[\"coin\"]\ndel data[\"axis\"]\ndel data[\"mindiam\"]\ndel data[\"findspot\"]\n\nfor col in data.columns:\n data[col] = [str(x).replace(\",\",\".\") for x in data[col]]\n data[col] = data[col].astype(float)\nimp = IterativeImputer(max_iter=10, random_state=0)\n#imp.fit(data)\n\n\nimp_data = data.copy()\nhead = data.columns\nfor col in data.columns:\n imp.fit(np.array(data[col]).reshape(-1,1))\n imp_data[col] = imp.transform(np.array(data[col]).reshape(-1,1))\n #to do single transformation\n#imp_data = imp.transform(data)\nnormalize_cols = [\"maxdiam\", \"weight\", \"enddate\", \"startdate\"]\nimp_data = pd.DataFrame(imp_data, columns=head)\n\n#pca start\npca = PCA(n_components=5)\npca.fit(imp_data)\nX_pca = pca.transform(imp_data)\n\n\npca_data.explained_variance_ratio_\n\nimp_data_numerical = imp_data[normalize_cols]\nimp_data = imp_data.drop(normalize_cols, axis=1)\nscale = StandardScaler()\nimp_data_numerical = scale.fit_transform(imp_data_numerical)\nimp_data_numerical = pd.DataFrame(data=imp_data_numerical, columns=normalize_cols)\nimp_data = imp_data.join(imp_data_numerical, how='outer')\n\nimp_data_numerical.corr()\nimp_data_sample = imp_data[[\"maxdiam\", \"startdate\"]]\nclustering = DBSCAN(eps=3, min_samples=10).fit(X_pca)\n\nlabels = clustering.labels_\ndata[\"labels\"] = labels\nunique_sampels = set(labels.tolist())\ndata_notnan = data[data[\"labels\"]!=-1]\nimp_data[\"labels\"] = labels\ndata_nans = imp_data[imp_data[\"labels\"]==-1]\n# another round of clustering:\ndel data_nans[\"labels\"]\npca = PCA(n_components=5)\npca.fit(data_nans)\nX_pca_nans = pca.transform(data_nans)\nclustering = DBSCAN(eps=5, min_samples=10).fit(X_pca_nans)\nlabels = clustering.labels_\nunique_sampels_2 = set(labels.tolist())\n\nplt.scatter(data_notnan.maxdiam, data_notnan.weight, c=data_notnan.labels, alpha = 0.6)\nplt.show()\n\n\n\n\n\n\nsamples = dict()\nfor label in labels:\n tmp = data[data[\"labels\"]==label]\n samples[label] = tmp.describe()" }, { "alpha_fraction": 0.679088294506073, "alphanum_fraction": 0.6971780061721802, "avg_line_length": 29.373626708984375, "blob_id": "d44fd34fe8ed063212bc8ada8684e04d8bc8e4b9", "content_id": "fd5c1f8ca9b45f76b5acc435d2e0de86b903c645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2764, "license_type": "no_license", "max_line_length": 173, "num_lines": 91, "path": "/experiments/vanilla_python/explorative/mixture_models.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 23 15:45:25 2021\nThe aim of this script was to understand Gaussian and Bayesian mixture models (GME/BME) for clustering.\nWith this script we were able to get a feel for GME/BME and we came to the conclusion that GME/BME offers no added value as a cluster algorithm compared to kmeans or dbscan.\nFor this reason, GME/BME was not used for the analysis part. \n@author: freddy, annalena\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.mixture import GaussianMixture, BayesianGaussianMixture\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata = pd.read_csv(\"data\\\\analysis_dataset.csv\", sep=\";\")\n\ndel data[\"Unnamed: 0\"]\ndel data[\"coin\"]\ndel data[\"axis\"]\ndel data[\"mindiam\"]\ndel data[\"findspot\"]\n\n\nimp = IterativeImputer(max_iter=10, random_state=0)\nimp_data = data.copy()\nhead = data.columns\nfor col in data.columns:\n imp.fit(np.array(data[col]).reshape(-1,1))\n imp_data[col] = imp.transform(np.array(data[col]).reshape(-1,1))\n \n\n# Set up a range of cluster numbers to try\nn_range = range(2,11)\n\n# Create empty lists to store the BIC and AIC values\nbic_score = []\naic_score = []\nX = imp_data.copy()\n# Loop through the range and fit a model\nfor n in n_range:\n gm = GaussianMixture(n_components=n, \n random_state=123, \n n_init=10)\n gm.fit(X)\n \n # Append the BIC and AIC to the respective lists\n bic_score.append(gm.bic(X))\n aic_score.append(gm.aic(X))\n \n# Plot the BIC and AIC values together\nfig, ax = plt.subplots(figsize=(12,8),nrows=1)\nax.plot(n_range, bic_score, '-o', color='orange')\nax.plot(n_range, aic_score, '-o', color='green')\nax.set(xlabel='Number of Clusters', ylabel='Score')\nax.set_xticks(n_range)\nax.set_title('BIC and AIC Scores Per Number Of Clusters')\n\nmodel = GaussianMixture(n_components=2)\nmodel.fit(X)\nyhat = model.predict(X)\nclusters = set(yhat)\nfrom numpy import where\nfor cluster in clusters:\n\t# get row indexes for samples with this cluster\n\trow_ix = where(yhat == cluster)\n\t# create scatter of these samples\n\tplt.scatter(X[row_ix, 0], X[row_ix, 1])\n# show the plot\nplt.show()\n\npca = PCA(n_components=2)\npca.fit(X)\nX_pca = pca.transform(X)\n\n\n\nbgm = BayesianGaussianMixture(n_components=6, n_init=10)\nbgm.fit(X_pca)\ntest = bgm.predict(X_pca)\ndf_pca = pd.DataFrame(X_pca, columns=[\"PCA1\", \"PCA2\"])\ndf_pca[\"labels\"] = test\ndata[\"labels\"] = test\nplt.scatter(df_pca.PCA1, df_pca.PCA2, c=df_pca.labels, alpha = 0.6)\nplt.show()\nfor i in range(6):\n tmp = data[data[\"labels\"]==i]\n plt.scatter(tmp.maxdiam, tmp.weight, c=tmp.labels, alpha = 0.6)\n plt.show()\nnp.round(bgm.weights_, 2)\n" }, { "alpha_fraction": 0.7136150002479553, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 70, "blob_id": "67f9cf1984d319ff8b358126a631da0b6f73d1eb", "content_id": "23079bd9a222414817e58386a96fbc8910a76664", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 213, "license_type": "no_license", "max_line_length": 144, "num_lines": 3, "path": "/dashboard/install_all_packages.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "#this files installes all necessary packages for R shiny dashboard\n\ninstall.packages(c(\"shiny\", \"shinydashboard\", \"ggplot2\", \"plotly\", \"dplyr\", \"DT\", \"stringr\", \"randomForest\", \"shinyjs\", \"sodium\", \"colorspace\"))\n" }, { "alpha_fraction": 0.6242956519126892, "alphanum_fraction": 0.6909181475639343, "avg_line_length": 61.20618438720703, "blob_id": "89b53b8552dea33e37d61a8db221a0c014676ae6", "content_id": "6b21625a735dc73b0635285db069e9deadfb9bdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6034, "license_type": "no_license", "max_line_length": 162, "num_lines": 97, "path": "/dashboard/frontend/etl.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "#backend etl script for cnn dashboard\nlibrary(stringr)\nlibrary(dplyr)\ndata <- readRDS(\"cnn/cnn_full.rds\")\ngeo <- read.csv(\"cnn/mint_geo.csv\")\ngeo <- geo[c(\"mint\", \"lat\", \"lon\")]\nabs_path <- \"C:/Users/fredi/Desktop/Uni/Data Challanges/CN/\"\ndata$mint <- str_trim(data$mint)\ndata <- merge(data, geo, all.x=T, by=\"mint\")\n\n#data_400bc <- read.csv2(paste0(abs_path,\"timeperiod/new/\",\"data_400bc_labels.csv\"), dec = \".\")\n#data_200bc <- read.csv2(paste0(abs_path,\"timeperiod/new/\",\"data_200bc_labels.csv\"), dec = \".\")\n#data_0bc <- read.csv2(paste0(abs_path,\"timeperiod/new/\",\"data_0bc_labels.csv\"), dec = \".\")\n#data_0ad <- read.csv2(paste0(abs_path,\"timeperiod/new/\",\"data_0ad_labels.csv\"), dec = \".\")\n#data_200ad <- read.csv2(paste0(abs_path,\"timeperiod/new/\",\"data_200ad_labels.csv\"), dec = \".\")\n\ndata_400bc <- read.csv(paste0(abs_path,\"timeperiod/new/\",\"data_400bc_labels.csv\"))\ndata_200bc <- read.csv(paste0(abs_path,\"timeperiod/new/\",\"data_200bc_labels.csv\"))\ndata_0bc <- read.csv(paste0(abs_path,\"timeperiod/new/\",\"data_0bc_labels.csv\"))\ndata_0ad <- read.csv(paste0(abs_path,\"timeperiod/new/\",\"data_0ad_labels.csv\"))\ndata_200ad <- read.csv(paste0(abs_path,\"timeperiod/new/\",\"data_200ad_labels.csv\"))\n\n\ndata <- subset(data, select=-c(X.1,Unnamed..0, findsport, X))\n#data_400bc <- subset(data_400bc, select=-c(Unnamed..0,Unnamed..0.1))\n#data_200bc <- subset(data_200bc, select=-c(Unnamed..0))\n#data_0bc <- subset(data_0bc, select=-c(Unnamed..0))\n#data_0ad <- subset(data_0ad, select=-c(Unnamed..0))\n#data_200ad <- subset(data_200ad, select=-c(Unnamed..0))\n\ndata_full_400bc <- merge( data_400bc, data, by=\"coin\", all.x=T)\ndata_full_200bc <- merge( data_200bc, data, by=\"coin\", all.x=T)\ndata_full_0bc <- merge( data_0bc, data, by=\"coin\", all.x=T)\ndata_full_0ad <- merge( data_0ad, data, by=\"coin\", all.x=T)\ndata_full_200ad <- merge( data_200ad, data, by=\"coin\", all.x=T)\ndata_full_400bc$n <- 1:nrow(data_full_400bc)\ndata_full_200bc$n <- 1:nrow(data_full_200bc)\ndata_full_0bc$n <- 1:nrow(data_full_0bc)\ndata_full_0ad$n <- 1:nrow(data_full_0ad)\ndata_full_200ad$n <- 1:nrow(data_full_200ad)\n\nsaveRDS(data_full_400bc, file = \"cnn/data_full_400bc.rds\")\nsaveRDS(data_full_200bc, file = \"cnn/data_full_200bc.rds\")\nsaveRDS(data_full_0bc, file = \"cnn/data_full_0bc.rds\")\nsaveRDS(data_full_0ad, file = \"cnn/data_full_0ad.rds\")\nsaveRDS(data_full_200ad, file = \"cnn/data_full_200ad.rds\")\n\nentitys <- read.csv2(\"cnn/design_data2.csv\", dec= \".\", encoding = \"UTF-8\")\nentitys <- entitys[entitys$Label_Entity != \"VERBS\",]\n\nentity_400bc <- entitys[entitys$id_coin %in% unique(data_400bc$coin),]\nentity_400bc <- merge(entity_400bc, data_full_400bc[c(\"coin\", \"x\", \"y\", \"kmeans_label\", \"dbscan_label\", \"hierarchy_label\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_200bc <- entitys[entitys$id_coin %in% unique(data_200bc$coin),]\nentity_200bc <- merge(entity_200bc, data_full_200bc[c(\"coin\", \"x\", \"y\", \"kmeans_label\", \"dbscan_label\", \"hierarchy_label\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_0bc <- entitys[entitys$id_coin %in% unique(data_0bc$coin),]\nentity_0bc <- merge(entity_0bc, data_full_0bc[c(\"coin\", \"x\", \"y\", \"kmeans_label\", \"dbscan_label\", \"hierarchy_label\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_0ad <- entitys[entitys$id_coin %in% unique(data_0ad$coin),]\nentity_0ad <- merge(entity_0ad, data_full_0ad[c(\"coin\", \"x\", \"y\", \"kmeans_label\", \"dbscan_label\", \"hierarchy_label\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_200ad <- entitys[entitys$id_coin %in% unique(data_200ad$coin),]\nentity_200ad <- merge(entity_200ad, data_full_200ad[c(\"coin\", \"x\", \"y\", \"kmeans_label\", \"dbscan_label\", \"hierarchy_label\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\n\nsaveRDS(entity_400bc, file = \"cnn/entity_400bc.rds\")\nsaveRDS(entity_200bc, file = \"cnn/entity_200bc.rds\")\nsaveRDS(entity_0bc, file = \"cnn/entity_0bc.rds\")\nsaveRDS(entity_0ad, file = \"cnn/entity_0ad.rds\")\nsaveRDS(entity_200ad, file = \"cnn/entity_200ad.rds\")\n\n\nentity_400bc <- entitys[entitys$id_coin %in% unique(data_400bc$coin),]\nentity_400bc_detail <- merge(entity_400bc, data_full_400bc[c(\"coin\", \"x\", \"y\", \"startdate\", \"enddate\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_200bc<- entitys[entitys$id_coin %in% unique(data_200bc$coin),]\nentity_200bc_detail <- merge(entity_200bc, data_full_200bc[c(\"coin\", \"x\", \"y\", \"startdate\", \"enddate\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_0bc <- entitys[entitys$id_coin %in% unique(data_0bc$coin),]\nentity_0bc_detail <- merge(entity_0bc, data_full_0bc[c(\"coin\", \"x\", \"y\", \"startdate\", \"enddate\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_0ad <- entitys[entitys$id_coin %in% unique(data_0ad$coin),]\nentity_0ad_detail <- merge(entity_0ad, data_full_0ad[c(\"coin\", \"x\", \"y\", \"startdate\", \"enddate\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\nentity_200ad <- entitys[entitys$id_coin %in% unique(data_200ad$coin),]\nentity_200ad_detail <- merge(entity_200ad, data_full_200ad[c(\"coin\", \"x\", \"y\", \"startdate\", \"enddate\")], all.x=T, by.x=\"id_coin\", by.y=\"coin\")\n\nentity_400bc_detail$d_startdate <- floor(entity_400bc_detail$startdate/10)*10\nentity_200bc_detail$d_startdate <- floor(entity_200bc_detail$startdate/10)*10\nentity_0bc_detail$d_startdate <- floor(entity_0bc_detail$startdate/10)*10\nentity_0ad_detail$d_startdate <- floor(entity_0ad_detail$startdate/10)*10\nentity_200ad_detail$d_startdate <- floor(entity_200ad_detail$startdate/10)*10\n\nentity_400bc_detail$d_enddate <- floor(entity_400bc_detail$enddate/10)*10\nentity_200bc_detail$d_enddate <- floor(entity_200bc_detail$enddate/10)*10\nentity_0bc_detail$d_enddate <- floor(entity_0bc_detail$enddate/10)*10\nentity_0ad_detail$d_enddate <- floor(entity_0ad_detail$enddate/10)*10\nentity_200ad_detail$d_enddate <- floor(entity_200ad_detail$enddate/10)*10\n\n\nsaveRDS(entity_400bc_detail, file = \"cnn/entity_400bc_detail.rds\")\nsaveRDS(entity_200bc_detail, file = \"cnn/entity_200bc_detail.rds\")\nsaveRDS(entity_0bc_detail, file = \"cnn/entity_0bc_detail.rds\")\nsaveRDS(entity_0ad_detail, file = \"cnn/entity_0ad_detail.rds\")\nsaveRDS(entity_200ad_detail, file = \"cnn/entity_200ad_detail.rds\")\n" }, { "alpha_fraction": 0.6478186249732971, "alphanum_fraction": 0.673937976360321, "avg_line_length": 35.66315841674805, "blob_id": "a090884bb979ed3bc49a07031da886941895d953", "content_id": "58184d442dfa82928b43884475ba2aaf1ce28d58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3484, "license_type": "no_license", "max_line_length": 235, "num_lines": 95, "path": "/dashboard/backend/timperiods_create_all_labels.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 2 18:58:24 2021\nThis script creates the different label variables for kmeans, dbscan and hierarchical cluster analysis. This is necessary for filtering differnt clustering methods in shiny dashboard. \n@author: annalena, freddy\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import AgglomerativeClustering\nfrom scipy.cluster.hierarchy import dendrogram\ndirectory = \"timeperiod/old/\"\nold_files = [\"data_400bc_labels.csv\", \"data_200bc_labels.csv\", \"data_0bc_labels.csv\", \"data_0ad_labels.csv\", \"data_200ad_labels.csv\"]\ndata_files = [\"data_400bc.csv\", \"data_200bc.csv\", \"data_0bc.csv\", \"data_0ad.csv\", \"data_200ad.csv\"]\nfeature = ['weight', 'enddate', 'startdate', 'material_cat', 'denom_cat',\n 'mint_cat']\noutput = list()\n\n# rename old label variable and execute dbscan clustering\nfor old_file_name in old_files:\n data = pd.read_csv(f\"{directory+old_file_name}\", sep=\";\")\n data = data.rename(columns={\"label\": \"kmeans_label\"})\n # do dbscan analysis\n tmp = data[[\"x\", \"y\"]]\n dbscan = DBSCAN(eps=2, min_samples=1).fit(tmp)\n data[\"dbscan_label\"] = dbscan.labels_\n output.append(data)\n\n# load full dataset for hierarchical clustering (doesnt work with umap results)\ndata = pd.read_csv('data\\\\analysis_dataset_w_material.csv', sep=\";\")\n\n#helper method for plotting dendrograms for hierarchical cluster analysis\ndef plot_dendrogram(model, **kwargs):\n # Create linkage matrix and then plot the dendrogram\n\n # create the counts of samples under each node\n counts = np.zeros(model.children_.shape[0])\n n_samples = len(model.labels_)\n for i, merge in enumerate(model.children_):\n current_count = 0\n for child_idx in merge:\n if child_idx < n_samples:\n current_count += 1 # leaf node\n else:\n current_count += counts[child_idx - n_samples]\n counts[i] = current_count\n\n linkage_matrix = np.column_stack([model.children_, model.distances_,\n counts]).astype(float)\n\n # Plot the corresponding dendrogram\n dendrogram(linkage_matrix, **kwargs)\n\n\n\nfor c in data.columns:\n try:\n data[c] = data[c].astype(float)\n except:\n print(c)\ndel data[\"findspot\"]\ndel data[\"material\"]\ndata = data[feature]\n\n# create data subsets\nfull_datasets = [data[data[\"enddate\"] <= -400], data[(data[\"enddate\"]>-400) & (data[\"enddate\"]<=-200)], data[(data[\"enddate\"]>-200) & (data[\"enddate\"]<=0)], data[(data[\"enddate\"]>0) & (data[\"enddate\"]<=200)], data[data[\"enddate\"]>200]]\nfull_datasets = [x.dropna() for x in full_datasets]\ncounter = 1\n#first analyze agglomerative cluster resutls and set threshold for number of clusters\nfor dataset in full_datasets:\n model = AgglomerativeClustering(n_clusters=None, affinity='euclidean', distance_threshold=0,)\n model = model.fit(dataset)\n plt.title(f\"{counter}\")\n counter +=1\n plot_dendrogram(model)\n plt.show()\n#1: 11\n#2: 15\n#3: 26\n#4: 12\n#5: 16\ncounter = 0\n# n_cluster threshold from analysis above.\nclusters = [11, 15, 26, 12, 16]\nfor dataset in full_datasets:\n model = AgglomerativeClustering(n_clusters=clusters[counter], affinity='euclidean').fit(dataset)\n output[counter][\"hierarchy_label\"] = model.labels_\n counter +=1\ni = 0\n# export datasets\nfor out in output:\n out.to_csv(f\"timeperiod/{old_files[i]}\", sep=\";\", index=False)\n i += 1\n\n" }, { "alpha_fraction": 0.6453596949577332, "alphanum_fraction": 0.6632789373397827, "avg_line_length": 31.495651245117188, "blob_id": "9ba914b53bcb15fabf03be7a02cf018a9eadf388", "content_id": "87c92fe93e9c3a295525c87a3f15f999cfe545f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3739, "license_type": "no_license", "max_line_length": 194, "num_lines": 115, "path": "/data_prep/timperiods_clustering.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 2 18:58:24 2021\nThis file contains the clustering algortihms kmeans, dbscan and hierarchical clustering. Input from https://github.com/codehering/data_challanges_2021/blob/main/data_prep/dimension_reduction.py.\nThe resutls are stored in csv format and used by the dasboard for further analysis.\n@author: freddy, annalena\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import AgglomerativeClustering\nfrom scipy.cluster.hierarchy import dendrogram\nfrom yellowbrick.cluster import KElbowVisualizer\nfrom sklearn.cluster import KMeans\ndirectory = \"../timeperiod/new/\"\ndata_files = [\"data_400bc.csv\", \"data_200bc.csv\", \"data_0bc.csv\", \"data_0ad.csv\", \"data_200ad.csv\"]\n\n# elbow analysis for kmeans clustering\nfor file in data_files:\n tmp_data = pd.read_csv(f\"{directory+file}\")\n tmp_data = tmp_data[[\"x\", \"y\"]]\n Sum_of_squared_distances = list()\n K = range(1,20)\n for k in K:\n km = KMeans(n_clusters=k)\n km = km.fit(tmp_data)\n Sum_of_squared_distances.append(km.inertia_)\n plt.plot(K, Sum_of_squared_distances, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Sum_of_squared_distances')\n plt.title(f'{file} - Elbow Method For Optimal k')\n plt.show()\n\n# optimal k values\nk_values = [4,4, 4, 4, 5]\ni = 0\nk_labels = list()\nfor file in data_files:\n tmp_data = pd.read_csv(f\"{directory+file}\")\n tmp_data = tmp_data[[\"x\", \"y\"]]\n km = KMeans(n_clusters=k_values[i])\n i += 1\n km = km.fit(tmp_data)\n k_labels.append(km.labels_)\n\n#dbscan clustering analysis\ndbscan_labels = list()\nfor file in data_files:\n # do dbscan analysis\n tmp_data = pd.read_csv(f\"{directory+file}\")\n tmp_data = tmp_data[[\"x\", \"y\"]]\n dbscan = DBSCAN(eps=2, min_samples=1).fit(tmp_data)\n dbscan_labels.append(dbscan.labels_)\n\n\n\n\ndef plot_dendrogram(model, **kwargs):\n # Create linkage matrix and then plot the dendrogram\n\n # create the counts of samples under each node\n counts = np.zeros(model.children_.shape[0])\n n_samples = len(model.labels_)\n for i, merge in enumerate(model.children_):\n current_count = 0\n for child_idx in merge:\n if child_idx < n_samples:\n current_count += 1 # leaf node\n else:\n current_count += counts[child_idx - n_samples]\n counts[i] = current_count\n\n linkage_matrix = np.column_stack([model.children_, model.distances_,\n counts]).astype(float)\n\n # Plot the corresponding dendrogram\n dendrogram(linkage_matrix, **kwargs)\n\n\n\n#first analyze agglomerative cluster resutls and set threshold for number of clusters\nfor file in data_files:\n tmp_data = pd.read_csv(f\"../timeperiod/new/raw/{file}\")\n model = AgglomerativeClustering(n_clusters=None, affinity='euclidean', distance_threshold=0,)\n model = model.fit(tmp_data)\n plt.title(f\"{file}\")\n plot_dendrogram(model)\n plt.show()\n#1: 11\n#2: 15\n#3: 26\n#4: 12\n#5: 16\ncounter = 0\n#optimal number of classes for hierarchical clustering\nclusters = [8, 8, 6, 3, 6]\nhierarchy_labels = list()\nfor file in data_files:\n tmp_data = pd.read_csv(f\"../timeperiod/new/raw/{file}\")\n model = AgglomerativeClustering(n_clusters=clusters[counter], affinity='euclidean').fit(tmp_data)\n hierarchy_labels.append(model.labels_)\n counter +=1\nc = 0\n# output\nfor file in data_files:\n data = pd.read_csv(f\"{directory+file}\")\n data[\"kmeans_label\"] = k_labels[c]\n data[\"dbscan_label\"] = dbscan_labels[c]\n data[\"hierarchy_label\"] = hierarchy_labels[c]\n data.to_csv(f\"../timeperiod/new/{file.replace('.csv','')}_labels.csv\", index=False)\n \n \n c +=1\n\n\n" }, { "alpha_fraction": 0.6416009664535522, "alphanum_fraction": 0.6664645075798035, "avg_line_length": 31.343137741088867, "blob_id": "cccefb246ca8520b8cf9e857c62c5748f7965393", "content_id": "f0944c9048b8da95bad7d9380c58ec36d80265be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3298, "license_type": "no_license", "max_line_length": 193, "num_lines": 102, "path": "/experiments/vanilla_python/explorative/tsne_svd_entity_category.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 6 14:49:06 2021\nthis script was used to investigate different dimension reduction algorithms apart from UMAP. In particular, t-SNE and truncated SVD were used and explicitly investigated with the entity data. \n@author: annalena, freddy\n\"\"\"\nimport pandas as pd\nfrom sklearn.manifold import TSNE \nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.decomposition import TruncatedSVD\nimport random\nimport umap.umap_ as umap\nimport umap.utils\nimport umap.plot\n\n\ncat_dummys = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\entity_cats_prepared.csv\", sep=\";\")\ndel cat_dummys[\"Unnamed: 0\"]\n\n#split categorys by side\ncat_dummys_obv = cat_dummys[cat_dummys[\"side\"]==0]\ncat_dummys_rev = cat_dummys[cat_dummys[\"side\"]==1]\ncat_dummys_sides = pd.merge(cat_dummys_obv, cat_dummys_rev, how=\"outer\", on=\"id_coin\")\ncat_coins = cat_dummys_sides[\"id_coin\"]\ndel cat_dummys_sides[\"id_coin\"]\ncat_dummys_sides = cat_dummys_sides.fillna(-1)\n\n#try t-SNE algorithm\ntime_start = time.time()\ntsne = TSNE(n_components=2, verbose=2, perplexity=40, n_iter=1000, learning_rate=10)\ntsne_results = tsne.fit_transform(cat_dummys_sides)\nprint('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))\nplt.scatter(\n tsne_results[:, 0],\n tsne_results[:, 1])\nplt.gca().set_aspect('equal', 'datalim')\n\n\n#try good old umap\ndef cross_validation(d, k=40):\n seeds = random.sample(range(0, 100000), k)\n embedding_x = 0\n embedding_y = 0\n counter = 0\n for seed in seeds:\n counter += 1\n print(f\"{counter}/{k}\")\n r = umap.UMAP(random_state=seed)\n r.fit(d)\n embedding = r.transform(d)\n embedding_x += embedding[:, 0]\n embedding_y += embedding[:, 1]\n embedding_x = embedding_x / k\n embedding_y = embedding_y / k\n return embedding_x, embedding_y\nx, y = cross_validation(cat_dummys_sides, 10)\nplt.scatter(\n x,\n y)\numap_cat_df = pd.DataFrame({\"x\": x, \"y\": y})\n\n# use umap to cluster data\nclustering = DBSCAN(eps=1, min_samples=10).fit(umap_cat_df)\numap_cat_df[\"label\"] = clustering.labels_\nplt.scatter(\n umap_cat_df[\"x\"],\n umap_cat_df[\"y\"], c= umap_cat_df[\"label\"])\numap_cat_df.groupby(\"label\").count()\n\n\n# use SVD for sparse daata\nsvd = TruncatedSVD(n_components=2, n_iter=100, random_state=42)\nsvd.fit(cat_dummys_sides)\nsvd_data = svd.transform(cat_dummys_sides)\nplt.scatter(\n svd_data[:, 0],\n svd_data[:, 1])\n\nd_tmp = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\design_data2.csv\", sep=\";\")\ne_tmp = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\entity_cats.csv\", sep=\";\")\n\ncomplete_data = pd.merge(d_tmp, e_tmp, how=\"left\", left_on=\"Entity\", right_on=\"name\")\n\ncat_cols = [x for x in complete_data if \"Cat\" in x]\ncomplete_data = complete_data[[\"id_coin\"]+cat_cols]\ndata_list = list()\nfor cluster in svd_df[\"label\"].unique().tolist():\n tmp = svd_df[svd_df[\"label\"]==cluster]\n tmp_coins = tmp[\"coin\"].unique().tolist()\n tmp_cats = complete_data[complete_data[\"id_coin\"].isin(tmp_coins)]\n tmp_dict = dict()\n for c in cat_cols:\n tmp_dict[c] = tmp_cats[c].unique().tolist()\n tmp_dict[c] = [x for x in tmp_dict[c] if str(x) != \"nan\"]\n data_list.append(tmp_dict)\n\ntest = data_list[0]\ncatI_list =list()\nfor d in data_list:\n catI_list.append(d[\"Cat_I\"])" }, { "alpha_fraction": 0.5555877089500427, "alphanum_fraction": 0.5810654163360596, "avg_line_length": 35.13978576660156, "blob_id": "9a4f1d0e28b6303e0d875a7f2ae0e0749295799b", "content_id": "0fb5cc6700b50d8b50f2db418bc17d09c3ba9f0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3454, "license_type": "no_license", "max_line_length": 141, "num_lines": 93, "path": "/data_prep/rdf/data_prep.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 26 21:45:29 2021\r\nThe idea of this script was to directly extract the data from RDF format via python.\r\nBut we have found that it makes more sense to access the data directly via sparql. For this reason this script is no longer relevant\r\n\r\n@author: freddy, annalena\r\n\"\"\"\r\n\r\nimport json\r\nimport time\r\nimport pandas as pd\r\nwith open(\"cn_output.json\", \"r\") as file:\r\n data = json.load(file)\r\nkeys = list(data.keys())[0]\r\ntest = data[keys]\r\ncoin_dict = dict()\r\nout_dict = dict()\r\nstart_time = time.time()\r\ndata_keys = list(data.keys())#[:1000]\r\n# flatten dictionaries\r\nfor coin in data_keys:\r\n coin_data = data[coin]\r\n for k in coin_data.keys():\r\n if isinstance(coin_data[k], dict):\r\n subdict = coin_data[k]\r\n for j in subdict.keys():\r\n print(j)\r\n if \"#obverse\" in k:\r\n n = j + \"#obverse\"\r\n elif \"#reverse\" in k:\r\n n = j + \"#reverse\"\r\n else:\r\n n = j\r\n if not j in coin_data.keys():\r\n coin_dict[n] = subdict[j]\r\n else:\r\n coin_dict[k] = coin_data[k]\r\n out_dict[coin] = coin_dict\r\n coin_dict = dict()\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time)) \r\nwith open(\"cn_output_flatten.json\", \"w\") as file:\r\n json.dump(out_dict, file) \r\n#unlist the values\r\n\r\nwith open(\"cn_output_flatten.json\", \"r\") as file:\r\n out_dict = json.load( file) \r\nunlisted_outdict = dict()\r\n\r\ndef replace_prefixes(colname):\r\n if \"http://nomisma.org/ontology\" in colname:\r\n return colname.replace(\"http://nomisma.org/ontology\", \"nm\")\r\n if \"http://purl.org/dc/terms/\" in colname:\r\n return colname.replace(\"http://purl.org/dc/terms/\",\"purl#\")\r\n if \"http://www.w3.org/1999/02/22-rdf-syntax-ns\" in colname:\r\n return colname.replace(\"http://www.w3.org/1999/02/\", \"w3#\")\r\n if \"http://www.w3.org/2000/01/\" in colname:\r\n return colname.replace(\"http://www.w3.org/2000/01/\", \"w3#\")\r\n if \"http://www.w3.org/2004/02/skos/\" in colname:\r\n return colname.replace(\"http://www.w3.org/2004/02/skos/\", \"skos#\")\r\n if \"http://xmlns.com/foaf/0.1/\" in colname:\r\n return colname.replace(\"http://xmlns.com/foaf/0.1/\", \"#xmlns\")\r\n print(colname)\r\n return colname\r\n\r\n\r\nfor key in out_dict.keys():\r\n coin_dict = out_dict[key]\r\n unlisted_coindict = dict()\r\n for col in coin_dict.keys():\r\n if len(coin_dict[col]) > 1:\r\n for index, value in enumerate(coin_dict[col]):\r\n colname = col + \"_\" + str(index)\r\n colname = replace_prefixes(colname)\r\n unlisted_coindict[colname] = value\r\n else:\r\n unlisted_coindict[replace_prefixes(col)] = coin_dict[col][0]\r\n key = key.replace(\"file:///C:/Users/karsten/Documents/uni/ProgrammeWorkspace/D2RServer/d2rq-0.8.1_CNT/dump_2021_03_16.rdf#coins?id=\", \"\")\r\n unlisted_outdict[key] = unlisted_coindict\r\n\r\n\r\n# dataframe format: took 2h\r\nout_df = pd.DataFrame()\r\nstart_time = time.time()\r\ncounter = 0\r\nfor key in unlisted_outdict.keys():\r\n counter +=1\r\n df_temp = pd.DataFrame(unlisted_outdict[key], index = [0])\r\n df_temp[\"coin_dict_key\"] = key\r\n out_df = pd.concat([out_df, df_temp], axis=0, ignore_index=True)\r\n print(counter)\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\nout_df.to_csv(\"cn_output_df.csv\", sep=\";\")\r\n" }, { "alpha_fraction": 0.7187948226928711, "alphanum_fraction": 0.7431851029396057, "avg_line_length": 24.851852416992188, "blob_id": "ee0cba8cac795cada3ce4f294b1ede8f82e0eefb", "content_id": "8939c5da85e32931abc5e40a7c35662a036335b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 84, "num_lines": 27, "path": "/experiments/vanilla_python/explorative/hierarchical_clustering.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 18 10:23:57 2021\nThis script helped us to understand hierarchical clustering (with complete dataset).\n@author: annalena, freddy\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\nimport scipy.cluster.hierarchy as shc\n\ndata = pd.read_csv('data\\\\analysis_dataset_w_material.csv', sep=\";\")\n#del data[\"mindiam\"]\ndel data[\"axis\"]\ndel data[\"Unnamed: 0\"]\ndel data[\"findspot\"]\ndel data[\"material\"]\n\ndata = data.dropna()\ndata.shape\n\n# create dendrogram for analysis\nplt.figure(figsize=(10, 7))\nplt.title(\"Customer Dendograms\")\ndend = shc.dendrogram(shc.linkage(data, method='ward'))" }, { "alpha_fraction": 0.6221728324890137, "alphanum_fraction": 0.6754846572875977, "avg_line_length": 47.009708404541016, "blob_id": "62eca828dc5476b348e9b159f0b907827e8b2359", "content_id": "a2c1c77e488bfe3b1e5ba647121ffd7e4868c202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4952, "license_type": "no_license", "max_line_length": 105, "num_lines": 103, "path": "/dashboard/backend/rf_classifier.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "library(caret)\nlibrary(randomForest)\ndata_full_400bc <- readRDS(\"cnn/data_full_400bc.rds\")\ndata_full_200bc <- readRDS(\"cnn/data_full_200bc.rds\")\ndata_full_0bc <- readRDS(\"cnn/data_full_0bc.rds\")\ndata_full_0ad <- readRDS(\"cnn/data_full_0ad.rds\")\ndata_full_200ad <- readRDS(\"cnn/data_full_200ad.rds\")\n# filter necessary columns\nrf_method <- \"cforrest\"\ndata_full_400bc <- data_full_400bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"y\")]\ndata_full_400bc$mint <- as.double(as.factor(data_full_400bc$mint))\ndata_full_400bc$denom <- as.double(as.factor(data_full_400bc$denom))\ndata_full_400bc$material <- as.double(as.factor(data_full_400bc$material))\nrf_fit <- randomForest(y ~ ., \n data = data_full_400bc, \n )\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_400bc_y.rds\")\n\ndata_full_200bc <- data_full_200bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"y\")]\ndata_full_200bc$mint <- as.double(as.factor(data_full_200bc$mint))\ndata_full_200bc$denom <- as.double(as.factor(data_full_200bc$denom))\ndata_full_200bc$material <- as.double(as.factor(data_full_200bc$material))\nrf_fit <- randomForest(y ~ ., \n data = data_full_200bc)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_200bc_y.rds\")\ndata_full_0bc <- data_full_0bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"y\")]\ndata_full_0bc$mint <- as.double(as.factor(data_full_0bc$mint))\ndata_full_0bc$denom <- as.double(as.factor(data_full_0bc$denom))\ndata_full_0bc$material <- as.double(as.factor(data_full_0bc$material))\nrf_fit <- randomForest(y ~ ., \n data = data_full_0bc)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_0bc_y.rds\")\ndata_full_0ad <- data_full_0ad[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"y\")]\ndata_full_0ad$mint <- as.double(as.factor(data_full_0ad$mint))\ndata_full_0ad$denom <- as.double(as.factor(data_full_0ad$denom))\ndata_full_0ad$material <- as.double(as.factor(data_full_0ad$material))\nrf_fit <- randomForest(y ~ ., \n data = data_full_0ad)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_0ad_y.rds\")\ndata_full_200ad <- data_full_200ad[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"y\")]\ndata_full_200ad$mint <- as.double(as.factor(data_full_200ad$mint))\ndata_full_200ad$denom <- as.double(as.factor(data_full_200ad$denom))\ndata_full_200ad$material <- as.double(as.factor(data_full_200ad$material))\nrf_fit <- randomForest(y ~ ., \n data = data_full_200ad)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_200ad_y.rds\")\n\n\n\n\ndata_full_400bc <- readRDS(\"cnn/data_full_400bc.rds\")\ndata_full_200bc <- readRDS(\"cnn/data_full_200bc.rds\")\ndata_full_0bc <- readRDS(\"cnn/data_full_0bc.rds\")\ndata_full_0ad <- readRDS(\"cnn/data_full_0ad.rds\")\ndata_full_200ad <- readRDS(\"cnn/data_full_200ad.rds\")\n# filter necessary columns\n\ndata_full_400bc <- data_full_400bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"x\")]\ndata_full_400bc$mint <- as.double(as.factor(data_full_400bc$mint))\ndata_full_400bc$denom <- as.double(as.factor(data_full_400bc$denom))\ndata_full_400bc$material <- as.double(as.factor(data_full_400bc$material))\nrf_fit <- randomForest(x ~ ., \n data = data_full_400bc)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_400bc_x.rds\")\n\ndata_full_200bc <- data_full_200bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"x\")]\ndata_full_200bc$mint <- as.double(as.factor(data_full_200bc$mint))\ndata_full_200bc$denom <- as.double(as.factor(data_full_200bc$denom))\ndata_full_200bc$material <- as.double(as.factor(data_full_200bc$material))\nrf_fit <- randomForest(x ~ ., \n data = data_full_200bc)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_200bc_x.rds\")\ndata_full_0bc <- data_full_0bc[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"x\")]\ndata_full_0bc$mint <- as.double(as.factor(data_full_0bc$mint))\ndata_full_0bc$denom <- as.double(as.factor(data_full_0bc$denom))\ndata_full_0bc$material <- as.double(as.factor(data_full_0bc$material))\nrf_fit <- randomForest(x ~ ., \n data = data_full_0bc)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_0bc_x.rds\")\ndata_full_0ad <- data_full_0ad[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"x\")]\ndata_full_0ad$mint <- as.double(as.factor(data_full_0ad$mint))\ndata_full_0ad$denom <- as.double(as.factor(data_full_0ad$denom))\ndata_full_0ad$material <- as.double(as.factor(data_full_0ad$material))\nrf_fit <- randomForest(x ~ ., \n data = data_full_0ad)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_0ad_x.rds\")\ndata_full_200ad <- data_full_200ad[c(\"weight\", \"startdate\", \"enddate\", \"mint\", \"denom\", \"material\", \"x\")]\ndata_full_200ad$mint <- as.double(as.factor(data_full_200ad$mint))\ndata_full_200ad$denom <- as.double(as.factor(data_full_200ad$denom))\ndata_full_200ad$material <- as.double(as.factor(data_full_200ad$material))\nrf_fit <- randomForest(x ~ ., \n data = data_full_200ad)\nrf_fit\nsaveRDS(rf_fit, \"cnn/model_200ad_x.rds\")\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7039248943328857, "alphanum_fraction": 0.723549485206604, "avg_line_length": 30.675676345825195, "blob_id": "7b2a9fbcb1903d7bb11e6f0ba85a862d3f9d2227", "content_id": "7f37125ed0220b0d647cc83c9924d504ea763307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1172, "license_type": "no_license", "max_line_length": 193, "num_lines": 37, "path": "/experiments/vanilla_python/explorative/outlier.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 23 17:05:26 2021\nThis script was created to test an outlier detection algorithm. However, we could not gather any useful insights and the use of outlier detection was not pursued in the further project process.\n@author: fredi\n\"\"\"\n\nfrom sklearn.ensemble import IsolationForest\nimport pandas as pd\nfrom sklearn.impute import IterativeImputer\nimport numpy as np\n\ndata = pd.read_csv(\"data\\\\analysis_dataset.csv\", sep=\";\")\ndel data[\"Unnamed: 0\"]\ncoins = data[\"coin\"].to_list()\ndel data[\"coin\"]\ndel data[\"axis\"]\ndel data[\"mindiam\"]\ndel data[\"findspot\"]\nimp = IterativeImputer(max_iter=10, random_state=0)\nimp_data = data.copy()\nhead = data.columns\nfor col in data.columns:\n imp.fit(np.array(data[col]).reshape(-1,1))\n imp_data[col] = imp.transform(np.array(data[col]).reshape(-1,1))\n #to do single transformation\n#imp_data = imp.transform(data)\nimp_data = pd.DataFrame(imp_data, columns=head)\n\n\n# use isolation forest for detecting outlier\nclf = IsolationForest()\npreds = clf.fit_predict(imp_data)\nimp_data[\"outlier\"] = preds\n\nimp_outlier = imp_data[imp_data[\"outlier\"]==-1]\nimp_normal = imp_data[imp_data[\"outlier\"]!=-1]\n" }, { "alpha_fraction": 0.5193428993225098, "alphanum_fraction": 0.635930061340332, "avg_line_length": 25.22222137451172, "blob_id": "236aeeba21c2e33d7219a9e85e3d9a9b17d4673c", "content_id": "b4b2b0204fa9542e82e2a08924d5bdf1b5822dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 49, "num_lines": 72, "path": "/dashboard/frontend/data_loader.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "load_x_model <- reactive({\n bc400 <- readRDS(\"cnn/model_400bc_x.rds\")\n bc200 <- readRDS(\"cnn/model_200bc_x.rds\")\n bc0 <- readRDS(\"cnn/model_0bc_x.rds\")\n ad0 <- readRDS(\"cnn/model_0ad_x.rds\")\n ad200 <-readRDS(\"cnn/model_200ad_x.rds\")\n data <- list()\n data$bc400 <- bc400\n data$bc200 <- bc200\n data$bc0 <- bc0\n data$ad0 <- ad0\n data$ad200 <- ad200\n data\n})\nload_y_model <- reactive({\n bc400 <- readRDS(\"cnn/model_400bc_y.rds\")\n bc200 <- readRDS(\"cnn/model_200bc_y.rds\")\n bc0 <- readRDS(\"cnn/model_0bc_y.rds\")\n ad0 <- readRDS(\"cnn/model_0ad_y.rds\")\n ad200 <-readRDS(\"cnn/model_200ad_y.rds\")\n data <- list()\n data$bc400 <- bc400\n data$bc200 <- bc200\n data$bc0 <- bc0\n data$ad0 <- ad0\n data$ad200 <- ad200\n data\n})\n\nload_entity_data <- reactive({\n bc400 <- readRDS(\"cnn/entity_400bc.rds\")\n bc200 <- readRDS(\"cnn/entity_200bc.rds\")\n bc0 <- readRDS(\"cnn/entity_0bc.rds\")\n ad0 <- readRDS(\"cnn/entity_0ad.rds\")\n ad200 <-readRDS(\"cnn/entity_200ad.rds\")\n data <- list()\n data$bc400 <- bc400\n data$bc200 <- bc200\n data$bc0 <- bc0\n data$ad0 <- ad0\n data$ad200 <- ad200\n data\n})\nload_entity_detail_data <- reactive({\n bc400 <- readRDS(\"cnn/entity_400bc_detail.rds\")\n bc200 <- readRDS(\"cnn/entity_200bc_detail.rds\")\n bc0 <- readRDS(\"cnn/entity_0bc_detail.rds\")\n ad0 <- readRDS(\"cnn/entity_0ad_detail.rds\")\n ad200 <-readRDS(\"cnn/entity_200ad_detail.rds\")\n data <- list()\n data$bc400 <- bc400\n data$bc200 <- bc200\n data$bc0 <- bc0\n data$ad0 <- ad0\n data$ad200 <- ad200\n data\n})\n\nload_all_data <- reactive({\n bc400 <- readRDS(\"cnn/data_full_400bc.rds\")\n bc200 <- readRDS(\"cnn/data_full_200bc.rds\")\n bc0 <- readRDS(\"cnn/data_full_0bc.rds\")\n ad0 <- readRDS(\"cnn/data_full_0ad.rds\")\n ad200 <-readRDS(\"cnn/data_full_200ad.rds\")\n data <- list()\n data$bc400 <- bc400\n data$bc200 <- bc200\n data$bc0 <- bc0\n data$ad0 <- ad0\n data$ad200 <- ad200\n data\n})" }, { "alpha_fraction": 0.6766712069511414, "alphanum_fraction": 0.7053205966949463, "avg_line_length": 33.904762268066406, "blob_id": "4b6029ff5ffc182de008180c0349338b4d074023", "content_id": "e6f44a9b594be415343c151c44b94d815d7f7ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 148, "num_lines": 21, "path": "/data_prep/entities/read_sql_dump.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 3 15:04:18 2021\nThis script filters all verbs from the dataset and creates for every entity a dummy variable. We used this entity dummy dataset for our experiments.\n@author: freddy, annalena\n\"\"\"\n\nimport pandas as pd\ndesign_data = pd.read_csv(\"2021_06_01_DC_NLP_CNT\\\\design_data2.csv\", sep=\";\")\n#delete vers\ndesign_data = design_data[design_data[\"Label_Entity\"]!=\"VERBS\"]\ndel design_data[\"DesignID\"]\n\n\n\nentity_dummys = pd.get_dummies(design_data[\"Entity\"], prefix=\"entity\")\nentity_dummys[\"id_coin\"] = design_data[\"id_coin\"]\ncols = [x for x in entity_dummys.columns if \"entity\" in x]\ng = entity_dummys.groupby(\"id_coin\")[cols].sum().reset_index()\n\ng.to_csv(\"design_dummys.csv\", sep=\";\")\n" }, { "alpha_fraction": 0.6617733240127563, "alphanum_fraction": 0.6987668871879578, "avg_line_length": 28.36206817626953, "blob_id": "a49ce1094dbdb76134bd01e566ed34b6006d2f5f", "content_id": "53a2e47e4a61a3d2f26e8134044230cb6296b1d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1703, "license_type": "no_license", "max_line_length": 124, "num_lines": 58, "path": "/experiments/vanilla_python/explorative/kmeans.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 26 21:31:32 2021\nThis script helped us to understand and get a feeling with kmeans clustering. We also experimented with an imputer and PCA. \n@author: annalena, freddy\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata = pd.read_csv(\"data\\\\analysis_dataset.csv\", sep=\";\")\n\ndel data[\"Unnamed: 0\"]\ndel data[\"coin\"]\ndel data[\"axis\"]\ndel data[\"mindiam\"]\ndel data[\"findspot\"]\n\n\n# impute values\nimp = IterativeImputer(max_iter=10, random_state=0)\nimp_data = data.copy()\nhead = data.columns\nfor col in data.columns:\n imp.fit(np.array(data[col]).reshape(-1,1))\n imp_data[col] = imp.transform(np.array(data[col]).reshape(-1,1))\n\n\n# split up intervals for better analysis:\ninterval_costs = list()\nintervals = [range(1,16), range(15,101) , range(100,200), range(200,300), range(300, 400)]\nk_ = range(15,100)\nfor i in intervals:\n costs = list()\n for k in i:\n print(k)\n kmeans = KMeans(n_clusters=k, random_state=0).fit(imp_data)\n costs.append(kmeans.inertia_)\n interval_costs.append(costs)\n\nfor j in range(len(interval_costs)):\n plt.plot(intervals[j], interval_costs[j])\n plt.show()\nplt.plot(k_, costs)\n\nkmeans = KMeans(n_clusters=500, random_state=0).fit(imp_data)\nimp_data[\"labels\"] = kmeans.labels_\n\n# PCA\nX_pca = pd.DataFrame(X_pca, columns=[\"PCA1\", \"PCA2\"])\nX_pca[\"labels\"] = kmeans.labels_\nplt.scatter(X_pca.PCA1, X_pca.PCA2, c=X.labels, alpha = 0.6)\nplt.show()\ntest = imp_data.groupby(\"labels\")[\"maxdiam\"].count()\n" }, { "alpha_fraction": 0.489130437374115, "alphanum_fraction": 0.6902173757553101, "avg_line_length": 14.416666984558105, "blob_id": "24c644f4c9ab0a34af513593c9a44f2077adc820", "content_id": "35d8111626d2edf3b9d99516a35b08f883b95a82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 184, "license_type": "no_license", "max_line_length": 22, "num_lines": 12, "path": "/requirements.txt", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "pandas==0.25.3\nnumpy==1.17.2\nscipy==1.3.1\nsklearn\nmatplotlib==3.1.2\numap-learn==0.5.1\njupyter==1.0.0\nkeras==2.3.1\nautoviz==0.0.83\nfactor_analyzer==0.3.2\nseaborn==0.11.1\ntabulate==0.8.9" }, { "alpha_fraction": 0.5253505706787109, "alphanum_fraction": 0.5404530763626099, "avg_line_length": 39.30434799194336, "blob_id": "b8f9577a92a3540d4bc0ecbe12c35533a2a1f937", "content_id": "9b8ba7d5a9672d856ae14d00da3a5d6f72c8c559", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 927, "license_type": "no_license", "max_line_length": 107, "num_lines": 23, "path": "/dashboard/frontend/body.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "observe({ \n if (USER$login == FALSE) {\n if (!is.null(input$login)) {\n if (input$login > 0) {\n Username <- isolate(input$userName)\n Password <- isolate(input$passwd)\n if(length(which(credentials$username_id==Username))==1) { \n pasmatch <- credentials[\"passod\"][which(credentials$username_id==Username),]\n pasverify <- password_verify(pasmatch, Password)\n if(pasverify) {\n USER$login <- TRUE\n } else {\n shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\")\n shinyjs::delay(3000, shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\"))\n }\n } else {\n shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\")\n shinyjs::delay(3000, shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\"))\n }\n } \n }\n } \n})\n" }, { "alpha_fraction": 0.6840391159057617, "alphanum_fraction": 0.6970683932304382, "avg_line_length": 36.33783721923828, "blob_id": "7855fa62dffdba1d1fb4d26ec9808d33afcdbbf2", "content_id": "23bceefb0db707019dd821def14f9baf7ca75ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2763, "license_type": "no_license", "max_line_length": 198, "num_lines": 74, "path": "/data_prep/data_preperation_for_analysis.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 20 11:23:45 2021\nThis script loads the results from the sparql query and prepares them for further analysis. \n@author: annalena, freddy\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\ndata = pd.read_csv(\"data\\\\queryResults_semikolon.csv\", sep=\";\")\n\n#filter for interesting variables\nfilter_vars = [\"coin\", \"maxdiam\", \"mindiam\", \"weight\", \"material\", \"enddate\", \"startdate\", \"denom\", \"mint\", \"collection\", \"weightstand_engl\", \"findsport\", \"authority\", \"peculiarities_engl\", \"axis\"]\n\ndata = data[filter_vars]\n\n\n# replace unnecessary prefixes\ndata[\"mint\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"mint\"]]\ndata[\"coin\"] = [x.split(\"#coins?id=\")[1] for x in data[\"coin\"]]\ndata[\"material\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"material\"]]\ndata[\"denom\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"denom\"]]\ndata[\"findspot\"] = [str(x).replace(\"file:///C:/Users/karsten/Documents/uni/ProgrammeWorkspace/D2RServer/d2rq-0.8.1_CNT/dump_2021_03_16.rdf#\", \"\") for x in data[\"findsport\"]]\ncategorial_vars = [\"material\", \"denom\", \"mint\", \"collection\", \"weightstand_engl\", \"findsport\", \"authority\", \"peculiarities_engl\" ]\n\n# create nan values for further filtering\nfor var in categorial_vars:\n data[var] = data[var].replace(\" \", np.nan)\n\nfill_ratio = 100 - data.isnull().sum(axis = 0)/len(data)*100\n\n#delete findsport, authority, pecularities_engl and weightstand_engl because fill ratio is less than 15%\ndel data[\"findsport\"]\ncategorial_vars.remove(\"findsport\")\ndel data[\"authority\"]\ncategorial_vars.remove(\"authority\")\ndel data[\"peculiarities_engl\"]\ncategorial_vars.remove(\"peculiarities_engl\")\ndel data[\"weightstand_engl\"]\ncategorial_vars.remove(\"weightstand_engl\")\n\n#check number of different observations\n\nfor var in categorial_vars:\n print(f\"{var} : {len(data[var].unique().tolist())}\")\n\n#only for material its possible to create dummy variables:\nprint(data[\"material\"].value_counts())\nmaterial_dummys = pd.get_dummies(data[\"material\"], prefix=\"material\")\ndata = data.join(material_dummys)\ncategorial_vars.remove(\"material\")\ndel data[\"material\"]\n# cat encoding for denom, mint and collection\nfor var in categorial_vars:\n data[var] = data[var].astype('category')\n data[f\"{var}_cat\"] = data[var].cat.codes\n#delete collection, mint and denom. Lot of missing values hard to inteprete.\ndel data[\"collection\"]\ndel data[\"mint\"]\ndel data[\"denom\"]\n#fix , . problem for float values\nfor col in data.columns:\n data[col] = [str(x).replace(\",\",\".\") for x in data[col]]\n try:\n data[col] = data[col].astype(float)\n except:\n pass\n# create dataset for algorithms:\ndataset = data.copy()\n\n\n\n# export dataset\ndataset.to_csv(\"data\\\\analysis_dataset.csv\", sep=\";\")\n" }, { "alpha_fraction": 0.6201550364494324, "alphanum_fraction": 0.6395348906517029, "avg_line_length": 42.16666793823242, "blob_id": "50716a984bec0dbc3aab749c6bffba78e72c5db3", "content_id": "5ac1f1ada1964927a46b2ec25a177379437d1dc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 258, "license_type": "no_license", "max_line_length": 86, "num_lines": 6, "path": "/dashboard/frontend/credentials.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "credentials = data.frame(\n username_id = c(\"cn_user1\", \"cn_user2\", \"annalena\", \"freddy\"),\n passod = sapply(c(\"forelle9\", \"thunfisch2\", \"forelle4\", \"hering\"),password_store),\n permission = c(\"admin\", \"admin\", \"admin\", \"admin\"), \n stringsAsFactors = F\n)" }, { "alpha_fraction": 0.7013164162635803, "alphanum_fraction": 0.7194734215736389, "avg_line_length": 31.41176414489746, "blob_id": "7f68e8a9c12e7599466c20c66a4852dd2e2a30cc", "content_id": "7b3f3a9cf45e531c6a6b2f2e4a9aa9ca9d651b1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2203, "license_type": "no_license", "max_line_length": 134, "num_lines": 68, "path": "/experiments/vanilla_python/explorative/fca_autoviz.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 27 09:36:14 2021\nThe aim of this script was tu understand AutoViz library, which is a usefull library for plotting and creating descriptive statistics.\n@author: fredi\n\"\"\"\nimport pandas as pd\nfrom autoviz.AutoViz_Class import AutoViz_Class\nimport numpy as np\nfrom factor_analyzer import FactorAnalyzer\nimport matplotlib.pyplot as plt\nAV = AutoViz_Class()\n\ndata = pd.read_csv('data\\\\analysis_dataset.csv', sep=\";\")\ndata[\"denom_cat\"] = data[\"denom_cat\"].astype(\"category\")\ndata[\"mint_cat\"] = data[\"mint_cat\"].astype(\"category\")\ndata[\"collection_cat\"] = data[\"collection_cat\"].astype(\"category\")\ndel data[\"Unnamed: 0\"]\ndel data[\"coin\"]\ndata.to_csv(\"data\\\\autoviz_prepared_data.csv\")\n\ndf = AV.AutoViz('data\\\\autoviz_prepared_data.csv')\n\n\ntest = data.copy()\ntest.dropna(inplace=True)\ndel test[\"findspot\"]\ntest.info()\nfor col in test.columns:\n test[col] = test[col].astype(int)\n\nmat_cols = [x for x in test.columns.tolist() if \"material\" in x]\ntest[\"material\"] = 0\ni = 1\nfor c in mat_cols:\n test[\"material\"] = np.where(test[c]==1, i, test[\"material\"])\n i +=1\ntest = test[[\"maxdiam\", \"mindiam\", \"weight\", \"enddate\", \"startdate\", \"mint_cat\", \"denom_cat\", \"collection_cat\", \"material\"]]\n\nfrom factor_analyzer.factor_analyzer import calculate_bartlett_sphericity\nchi_square_value,p_value=calculate_bartlett_sphericity(test)\nchi_square_value, p_value\nfrom factor_analyzer.factor_analyzer import calculate_kmo\nkmo_all,kmo_model=calculate_kmo(test)\nkmo_model # kmo > 0.6 -> Factor analysis can be done\n\nfa = FactorAnalyzer()\nfa.set_params(n_factors=9, rotation=None)\nfa.fit(test)\nev, v = fa.get_eigenvalues()\nev\nplt.scatter(range(1,10), ev)\nplt.plot(range(1,10), ev)\nplt.xlabel = \"Factors\"\nplt.ylabel =\"Eigenvalue\"\nplt.show\nfa2 = FactorAnalyzer()\nfa2.set_params(n_factors=4, rotation=\"varimax\")\nfa2.fit(test)\nev2, v2 = fa2.get_eigenvalues();ev2\n\nloadings = fa2.loadings_\nfactors_col = list()\nfor f in range(1,5):\n factors_col.append(f\"Factor_{f}\")\nfactors = pd.DataFrame(fa2.loadings_, columns=factors_col, index=test.columns)\n\nvariance = pd.DataFrame(fa2.get_factor_variance(), columns=factors_col, index=[\"SS Loadings\", \"Proportion Var\", \"Cumulative Var\"])" }, { "alpha_fraction": 0.6395348906517029, "alphanum_fraction": 0.6564481854438782, "avg_line_length": 29.516128540039062, "blob_id": "31b847f613c72f71402b1f0c0406dc3bd838fd43", "content_id": "44667707e9eda075505ff9b96185db622142f513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 180, "num_lines": 31, "path": "/dashboard/backend/get_mint_geo_coordinates.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 24 21:14:19 2021\nThis scripts sources geo coordinates for a given mint. We planned to integrate a geo map analysis. However, we decided to use a simple bar chart for visualizing mint distribution. \n@author: freddy, annalena\n\"\"\"\nimport requests\nimport json\nimport pandas as pd\n\ndata = pd.read_csv(\"../data/full_cnn_dataset.csv\" , sep=\";\")\nmints = set(data[\"mint\"].to_list())\nmints = [x.replace(\" \", \"\") for x in mints if x != \"\"]\n\n\n\noutlist = list()\nfor mint in mints:\n# use nomisma api request\n r = requests.get(f\"http://nomisma.org/apis/getMints?id={mint}\")\n text = r.text\n data = json.loads(text)\n try:\n data= data[\"features\"][0][\"geometry\"][\"coordinates\"]\n outlist.append({\"mint\": mint, \"lon\": data[0], \"lat\": data[1]})\n except:\n print(mint)\n \n# olbia_city, eleutherion do not have valid geo coodrinates\ndf = pd.DataFrame(outlist)\ndf.to_csv(\"mint_geo.csv\")\n" }, { "alpha_fraction": 0.6037873029708862, "alphanum_fraction": 0.6158047914505005, "avg_line_length": 31.690475463867188, "blob_id": "2148de21e2e8ecce51dafa1c3e7701792763df0e", "content_id": "d80cc8f3ae094edd80ebf8f5819f59e44fbc0fc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2746, "license_type": "no_license", "max_line_length": 132, "num_lines": 84, "path": "/data_prep/rdf/rdf_extract.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 23 12:47:07 2021\nThis script was developed to access directly the RDF data with python.\nBut we have found that it makes more sense to access the data directly via sparql. For this reason this script is no longer relevant\n@author: freddy, annalena\n\"\"\"\n\nimport rdflib\nimport time\nimport json\nstart_time = time.time()\ngraph = rdflib.Graph()\ngraph.open(\"store\", create=True)\ngraph.parse(\"dump_2021_03_16.rdf\")\n\n\nresult_dict = dict()\ncount = 0\n\n# get all triples in graph, this step takes a few minutes\nfor subject, predicate, obj in graph:\n subject = str(subject)\n predicate = str(predicate)\n obj = str(obj)\n if subject in result_dict.keys():\n if predicate in result_dict[subject].keys():\n result_dict[subject][predicate].append(obj)\n else:\n result_dict[subject][predicate] = [obj]\n else:\n result_dict[subject] = dict()\n result_dict[subject][predicate] = [obj]\n# create a dict containing every coin\nkeys = list(result_dict.keys())\ncoins = list()\nfor k in keys:\n k_split = k.split(\"/dump_2021_03_16.rdf#coins?id=\")\n if len(k_split)>1:\n coins.append(k)\n\nfinal_results = dict()\nresult = result_dict.copy()\nfor coin in coins:\n final_results[coin] = dict()\n coin_dict = result_dict[coin]\n for coin_dict_key in coin_dict.keys():\n for element in coin_dict[coin_dict_key]:\n if element in result_dict.keys():\n if not element in coins:\n final_results[coin][element] = result_dict[element]\n else:\n final_results[coin][coin_dict_key] = coin_dict[coin_dict_key]\nprint(\"--- %s seconds ---\" % (time.time() - start_time)) \n# read big data\n\n#search for additional metada for every coin\nstart_time = time.time()\nadditional_keys = list()\ndic = final_results.copy()\ncounter = 0\nout_dict = dict()\nfor key in dic.keys():\n sub_dic = dic[key]\n for sub_key in sub_dic.keys():\n if isinstance(sub_dic[sub_key], dict):\n for n_key in sub_dic[sub_key].keys():\n for e in sub_dic[sub_key][n_key]:\n if e in result_dict.keys():\n additional_keys.append(e)\n if isinstance(sub_dic[sub_key], list):\n for element in sub_dic[sub_key]:\n if element in result_dict.keys():\n additional_keys.append(element)\n for new_keys in additional_keys:\n sub_dic[new_keys] = result_dict[new_keys]\n out_dict[key] = sub_dic\n counter +=1\n additional_keys = list()\n print(counter)\n#save the data\nwith open(f'data/cn_output.json', \"w\") as file:\n json.dump(out_dict, file)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n" }, { "alpha_fraction": 0.8283601999282837, "alphanum_fraction": 0.8308687806129456, "avg_line_length": 179.23809814453125, "blob_id": "8ad1bcc6c06f9decb260358d85a71400c333be6d", "content_id": "c21d4ec7a91376012c15f0c5a7ab933a2d38de0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7655, "license_type": "no_license", "max_line_length": 1125, "num_lines": 42, "path": "/README.md", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# Data Challenges - Numismatic 2021 Repository\nDieses Repository beinhaltet alle Skripte und Experimente, die im Rahmen der Vorlesung Data Challenges erstellt wurden. Im Folgenden wird dabei kurz auf die einzelnen Verzeichnisse eingegangen.\nDer Quellcode ist unter https://github.com/codehering/data_challenges_2021 frei verfügbar. \n## Technische Umsetzung\nFür die technische Umsetzung des Projektes wurden die Programmiersprache Python und die statistische Skriptsprache R verwendet. Dabei wurde vor allem im ersten Schritt für die jeweiligen Experimente und explorativen Analysen Python mit der Erweiterung Jupyter Notebook verwendet. Für das interaktive Dashboard wurde wiederum das sehr einfach gehaltene High-level Framework Shiny verwendet. \n## Daten\nFür den Zugriff auf die CN Daten muss der Link aus download_link_for_CN_data.txt (nicht auf github) geöffnet und die csv Daten heruntergeladen werden. Zur Ausführung der Skripte muss entsprechend der Pfad angepasst werden. \n\n## Verzeichnisstruktur:\n\n### dashboard:\nBeinhaltet alle Skripte, die für das Dashboard benötigt werden. Inklusive der Skripte, die die Daten in geeigneter Weise aufbereiten, so dass diese vom Dashboard verarbeitet werden können. Dabei wird unterschieden zwischen:\n\n**-Frontend:**\n\nUnter Frontend sind nur Skripte, die für das das Dashboard selbst benötigt werden. Also alle Skripte, die für die interaktive Darstellung notwendig sind (inklusive Login, das Laden der Daten und Aggregation der Daten).\n\n**-Backend:**\n\nUnter Backend sind alle Skripte, die notwendig sind, um die Daten in ein für das Dashboard geeignetes Format zu bringen (rds format) (ETL.R). Des Weiteren liegt in diesem Verzeichnis rf_classifer.R. Dieses Skript trainiert die RandomForest Modelle, die für den Coin Finder des Dashboards notwendig sind, um ähnliche Münzen anhand bestimmter Kriterien finden zu können.\n### data_prep:\nUnter data_prep finden sich alle Skripte, die für die Aufbereitung der Daten notwendig waren. Dabei existieren entitätsspezifische Skripte, wie auch Skripte, die sich mit der Aufbereitung des RDF-Datensatzes beschäftigen. \n- data_preperation_for_analysis.py: Das Skript bereitet die über eine SPARQL-Query gezogenen Daten für die weiteren Analyseschritte auf. \n- dimension_reduction.py: Das Skript beinhaltet die kompletten Schritte für die UMAP Dimensionsreduzierung. \n- timperiods_clustering.py: In der Datei ist die gesamte Logik der Clusteringverfahren gespeichert.\n### experiments:\nIn dem Verzeichnis experiments liegen alle Skripte, welche uns geholfen haben entweder die Daten an sich oder verschiedene datengetriebene Methoden zu verstehen. Wichtig ist, dass nicht alle Ideen und Methoden, die in diesem Verzeichnis angewandt wurden, auch später weiter verfolgt und ausgebaut wurden. Insofern kann dieses Verzeichnis als Sammelstelle aller für das Projekt essenziell wichtigen Teile betrachtet werden, auch wenn vieles keinen direkten Einfluss auf die Ergebnisse unseres Projektes hatte. Wir unterscheiden dabei zwischen vanilla Python und jupyter notebook Experimenten. Letztere sind deutlich besser aufbereitet und können dadurch auch leichter verstanden werden. Besonders interessant sind dabei die Notebooks Dimesion_reduction_analysis.ipynb (geht auf die verschiedenen Dimensionsreduzierungsalgorithmen ein und vergleicht diese), different_timeperiods_umap_analysis.ipynb (ein erster Versuch die verschiedenen kMeans-Cluster nach der UMAP-Transformation zu interpretieren) und predict_coin_data_with_entitys_experiments.ipynb (Vorhersage von Münzdaten, wie z.B. enddate nur mit Hilfe der Entitäten).\n### plots:\nIm plots Verzeichnis sind unsere Bilder (wie etwa Korrelationsmatrix, Ergebnisse aus kMeans, etc.) gespeichert.\n\n## Das Dashboard\nZentraler Bestandteil unserer Arbeit war neben der explorativen Analyse der Münzdaten auch ein R Shiny Dashboard, welches als self-service Analysetool gesehen werden kann. Das Dashboard wurde von uns eingeführt, da wir Probleme hatten einzelne Cluster oder Gruppen von Münzen mit klassischen Plots und Tabellen zu analysieren. Insbesondere die zeitliche Untersuchung verschiedener Entitäten ist mit einem einfachen Plot und der Tatsache, dass über 500 verschiedene Entitäten existieren, sehr schwierig. Aus diesen Gründen haben wir uns dazu entschieden ein einfaches Dashboard mit interaktiven Elementen und verschiedenen Filtern zu programmieren. Eventuell können auch Numismatiker von diesem Dashboard profitierern, indem sie eigenständige Analysen durchführen können. Dabei ist das Dashboard in verschiedene Tabs unterteilt. Diese werden durch einen Help-Tab unterstützt, der den Umgang mit dem Tool kurz erklärt. Die Tabs sind:\n### Coin Explorer\nDer Coin Explorer hat das Ziel, die Ergebnisse der Clusteranalyse und insbesondere der UMAP-Dimensionsreduzierung darzustellen. Zentrales Element bildet dabei der erste Plot, bei dem die Ergebnisse der UMAP-Dimensionsreduzierung mit verschiedenen Clustering-Algorithmen dargestellt werden können. Je nachdem welche Punkte über Linksklick und Ziehen im Plot ausgewählt werden, passen sich die anderen Elemente, wie Top10 Entitäten an. Der Coin Explorer ist dafür gedacht in die Daten \"rein zu drillen\" und zu verstehen, wie sich die Verteilung der einzelnen Punkte und Cluster ergibt. Dabei ist zu beachten, dass je näher Punkte (Münzen) zueinander stehen, desto ähnlichr sind diese auch.\n### Entity Explorer\nDer Entity Explorer hat das Ziel dne zeitlichen Verlauf von Entitätne aufzuzeigen. Dabei können mit dem oberen Filter einzelne Entitäten ausgewählt werden und miteinander verglichen werden. Insbesondere die Analyse mit Hilfe des Zeitreihenplottes auf diesem Tab bietet die einmalige Möglichkeit unzählige Entitäten gleichzeitig zu analyiseren. Das war auch eines der Hauptargumente dieses Dashboard zu erstellen.\n### Coin Finder\nDer Coin Finder bietet die Möglichkeit Werte einer Münze für einen gegebenen Datenstaz einzugeben und die ähnlichsten Münzen auf Basis der UMAP-Dimensionsreduzierung auszugeben. Dafür wurde für jeden gegebenen Zeitraum ein Classifier trainiert, welcher für eine Münze die x- und y-Koordinate (im UMAP-Koordinatensystem, siehe Coin Explorer) vorhersagen kann. In der daraus resultierenden Tabelle kann anhand des Feature \"dist\" bestimmt werden, wie \"weit\" die eingegebenen Werte von der jeweiligen in der Tabelle aufgeführten Münze entfernt sind. Achtung: Alle Werte müssen gefüllt sein, damit der Classifier damit umgehen kann.\n## Zugriff auf das Dashboard\nDas Dashboard ist momentan unter https://cnminerva.shinyapps.io/coin/ verfügbar. Die jeweiligen Credentials wurden per Mail versendet oder sind in der Abgabe unter dashboard/frontend/credentials.R zu finden.\nDie Performance des gehosteten Dashboards ist jedoch deutlich schlechter, als bei einer lokalen Ausführung. Insbesondere der Entity Explorer läd sehr lange, da über 150.000 Datenpunkte geladen werden müssen. Des Weiteren sollte bei der Nutzung darauf geachtet werden, dass der Ladevorgang der Tabellen und Plots abgeschlossen ist, bevor weitere Veränderungen vorgenommen werden. \nInsofern empfehlen wir das Dashboard lokal zu starten. Um alle notwendigen Packages zu installieren, muss das Skript dashboard/install_all_packages.R ausgeführt werden. Im nächsten Schritt muss die Datei dashboard/frontend/app.R ausgeführt werden. Wichtig: Die notwendigen Daten müssen noch in das Verzeichnis dashboard/frontend/cnn kopiert werden. Um an die Daten zu gelangen, muss der Link aus download_link_for_dashboard_data.txt (nicht auf github) geöffnet und alle Daten heruntergeladen werden. \n\n\n\n" }, { "alpha_fraction": 0.4943923354148865, "alphanum_fraction": 0.5093362331390381, "avg_line_length": 46.53023910522461, "blob_id": "75630316bc4d813f353ad4f71349a702d08c9f16", "content_id": "34bf94e60f19966f191d2f373e0a620a83ba9928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 33793, "license_type": "no_license", "max_line_length": 490, "num_lines": 711, "path": "/dashboard/frontend/app.R", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "library(shiny)\nlibrary(shinydashboard)\nlibrary(ggplot2)\nlibrary(plotly)\nlibrary(dplyr)\nlibrary(DT)\n#library(leaflet)\nlibrary(stringr)\nlibrary(randomForest)\nlibrary(shinyjs)\nlibrary(sodium)\n\n\n\nsource(\"credentials.R\")\nbox_height <- \"40em\"\nthreshold_mpg = 0.0\nthreshold_cyl = 0.0\nsource(\"ui.R\")\n\n\nserver <- function(input, output, session) {\n #current <- reactiveValues(file = c(\"\"), dataset = data.frame(), clusters = c(), count=0)\n # Change this variable if you plan to publish it to false\n login = TRUE\n USER <- reactiveValues(login = login)\n \n \n \n output$body <- renderUI({\n if (USER$login == TRUE ) {\n tabItems(\n # First tab content\n \n # Second tab content\n tabItem(tabName = \"explorer\",\n fluidRow(column(12, box(width=12, h1(\"Filter\"), selectInput(\"dataset\", \"Select a dataset (timeperiod)\", c(\"<400BC\",\"400-200BC\",\"200-0BC\", \"0-200AD\", \">200AD\")),\n selectInput(\"cluster_method\", \"Select the cluster algorithm\", c(\"kmeans\", \"dbscan\", \"hierarchy\"), selected = \"dbscan\"),\n selectInput(\"cluster_filter\", \"Select the cluster you want to filter\", c(\"All\"), selected = \"All\", multiple=T)))),\n fluidRow(\n column(12,\n # box(width=12,plotOutput(\"plot_explorer\", click = \"plot_click\",\n # dblclick = \"plot_dblclick\",\n # hover = \"plot_hover\",\n # brush = \"plot_brush\"), \n box(width=12,h1(\"Cluster analysis\"), plotlyOutput(\"plot_explorer\"),\n height = box_height))),\n fluidRow(column(12, box(width=12, h2(\"Coin details\"),\n textOutput(\"observations\"),\n textOutput(\"avg_weight\"),\n textOutput(\"avg_startdate\"),\n textOutput(\"avg_enddate\"),\n textOutput(\"avg_mindiam\"),\n textOutput(\"avg_maxdiam\"),\n textOutput(\"avg_denom\"),\n textOutput(\"avg_material\"),\n DT::dataTableOutput(\"explorer_table\")))),\n fluidRow(column(12, box(width=12, h2(\"Entity Top 10\"), plotlyOutput(\"plot_entity\")))),\n fluidRow(column(12, box(width=12, h2(\"Entity details\"), DT::dataTableOutput(\"entity_table\")))),\n #fluidRow(column(12, box(width=12, h2(\"Mint details\"), leafletOutput(\"mint_map\", ))))\n fluidRow(column(12, box(width=12, h2(\"Mint Top 15\"), plotlyOutput(\"mint_map\", ))))\n ),\n tabItem(tabName = \"entity\",\n fluidRow(\n column(12, box(width=12,h1(\"Entity analysis\"),\n selectInput(\"dataset_entity\", \"Select a dataset (timeperiod)\", c(\"All\", \"<400BC\",\"400-200BC\",\"200-0BC\", \"0-200AD\", \">200AD\"), selected = \"All\"),\n sliderInput(\"enddate\", \"Select enddate timeperiod\", min=0, max=100, value=c(40,60)),\n selectInput(\"entity_type\", \"Select entity type\", c(\"All\",\"Object\",\"Person\", \"Plant\"),selected=\"All\" ),\n selectInput(\"entity_items\", \"Select entity type\", c(\"All\"), selected=c(\"All\") , multiple = T)\n ))),\n fluidRow(\n column(12, box(width=12,h1(\"Entity analysis\"), plotlyOutput(\"plot_entity_explorer\"),\n height = box_height))),\n fluidRow(\n column(12, box(width=12,h1(\"Entity development\"), plotlyOutput(\"plot_entity_timeseries\"),\n height = box_height)))),\n \n tabItem(tabName = \"finder\",\n fluidRow(\n column(12, box(width=12, h1(\"Select timeperiod\"),selectInput(\"dataset_finder\", \"Select a dataset\", c( \"<400BC\",\"400-200BC\",\"200-0BC\", \"0-200AD\", \">200AD\"), selected = \"<400BC\")))),\n fluidRow(column(12,box(width=12, h1(\"Select feature\"),\n numericInput(\"weight\", \"Specify weight\", 0),\n numericInput(\"startdate\", \"Specify startdate\", 0),\n numericInput(\"enddate_finder\", \"Specify enddate\", 0),\n selectInput(\"mint\", label=\"Specify mint\", choices=c(\"\")),\n selectInput(\"material\", label=\"Specify material\", choices=c()),\n actionButton(\"calculate\", label=\"Calculate\", width=100)\n ))),\n fluidRow(column(12, box(width=12, h1(\"Results\"), DT::dataTableOutput(\"result_table\")))),\n fluidRow(column(12, box(width=12, h1(\"Entity Results\"), DT::dataTableOutput(\"result_entity_table\"))))\n \n \n ),\n tabItem(tabName = \"help\", fluidRow(column(12, box(width=12, h1(\"How to use this dashboard?\"),\n p(\"The key element of this dashboard is the UMAP cluster analysis. Therefore we used the UMAP dimension reduction to plot multidimensional features of coins in a 2D plot. For the transformation we used the features weight, enddate, startdate, mint, material and denom. We filtered the data based on different timeperiods and created data subsets. All plots (without bar charts) are interactive, so please play around :-).\"),\n h2(\"COIN EXPLORER:\"),\n h3(\"How to Select Coins?\"),\n p(\"1. Select a dataset based on time period\"),\n #br(),\n p(\"2. Select coins by clicking and dragging a selection box around coins of interest (default zoom) or use the selection tool(lasso or box selection). \"),\n #br(),\n p(\"3. De-select coins: double-click in plot resets your selection to all coins in time period\"),\n h3(\"How to explore Selected Coins?\"),\n p(\"1. Coin Details: See number of selected coins via entries (below table)\"),\n p(\"2. Entity Top: See 10 most frequent entities for selected coins in bar diagram. See frequency of individual entities on vertical axis or by hovering over respective bar\"),\n p(\"3. Entity Details: Find all available descriptions of coins for obverse and reverse separately. Search for any term in the table to find coins with that specific characteristic via Search, e.g. to see coins showing a head, type 'Kopf'.\"),\n \n h3(\"How to export Selected Coins Data?\"),\n p(\"Go to the table click on the download button. You can choose between different formats.\"),\n h2(\"ENTITY EXPLORER\"),\n h3(\"How to Select Coins?\"),\n p(\"1. Select a dataset based on time period\"),\n p(\"2. Specify time period by moving sliders: this reflects final date in the range assigned to the coins\"),\n p(\"3. Specify the entity type you are interested in by selection from the drop-down. By default all entity types are selected.\"),\n p(\"Hint: The loading of all timeperiod (datasets) take some time.\"),\n h3(\"How to explore Selected Coins?\"),\n p(\"1. Entity analysis: See 15 most frequent entities for coins in selected time period. See frequency of individual entities on vertical axis or by hovering over respective bar\"),\n p(\"2. Entity development: See all entities in selected time period by default. Select 1 specific entities by double-click on the entity name(right next to the plot). Add entities to the selection by single-click on the entity name(right next to the plot). De-Select specific entities by single-click on a selected entity name. Select all entities by double-click on a de-selected entity name.\"),\n h2(\"COIN FINDER\"),\n p(\"Coin finder uses Machine Learning classifier to find similar coins based on the used UMAP dimension reduction. Right now, not 100% implemented. \")\n )))))\n } else {\n loginpage\n }\n \n \n })\n \n \n output$logoutbtn <- renderUI({\n req(USER$login)\n tags$li(a(icon(\"fa fa-sign-out\"), \"Logout\", \n href=\"javascript:window.location.reload(true)\"),\n class = \"dropdown\", \n style = \"background-color: #eee !important; border: 0;\n font-weight: bold; margin:5px; padding: 10px;\")\n })\n \n observe({ \n if (USER$login == FALSE) {\n if (!is.null(input$login)) {\n if (input$login > 0) {\n Username <- isolate(input$userName)\n Password <- isolate(input$passwd)\n if(length(which(credentials$username_id==Username))==1) { \n pasmatch <- credentials[\"passod\"][which(credentials$username_id==Username),]\n pasverify <- password_verify(pasmatch, Password)\n if(pasverify) {\n USER$login <- TRUE\n } else {\n shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\")\n shinyjs::delay(3000, shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\"))\n }\n } else {\n shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\")\n shinyjs::delay(3000, shinyjs::toggle(id = \"nomatch\", anim = TRUE, time = 1, animType = \"fade\"))\n }\n } \n }\n } \n })\n \n \n output$sidebarpanel <- renderUI({\n if (USER$login == TRUE ){\n sidebarMenu(\n menuItem(\"Coin Explorer\", tabName = \"explorer\", icon = icon(\"wpexplorer\")),\n menuItem(\"Entity Explorer\", tabName = \"entity\", icon = icon(\"image\")),\n menuItem(\"Coin finder\", tabName = \"finder\", icon = icon(\"search\")),\n menuItem(\"Help\", tabName = \"help\", icon = icon(\"question\"))\n \n #selectInput(\"cluster\", \"Select a cluster\", c(0,1,2,3), multiple=T, selected =c(0,1,2))\n )\n \n }})\n \n \n \n \n \n \n \n table <- reactiveValues(data=\"\", entity=\"\")\n current <- reactiveValues(avg_weight=\"\", std_weight=\"\", avg_enddate=\"\", std_enddate=\"\", avg_startdate=\"\", std_startdate=\"\", observations=\"\", avg_mindiam=\"\", std_mindiam=\"\", avg_maxdiam=\"\", std_maxdiam=\"\", avg_material=\"\", avg_denom=\"\", avg_material_pc=\"\", avg_denom_pc=\"\", cluster=\"dbscan\", cluster_name=\"dbscan_label\", selected_cluster=\"All\")\n source(\"data_loader.R\")\n \n get_models <- reactive({\n req(load_x_model())\n req(load_y_model())\n x <- load_x_model()\n y <- load_y_model()\n model <- list()\n if (req(input$dataset_finder)==\"<400BC\"){\n model$x <- x$bc400\n model$y <- y$bc400\n }else if (req(input$dataset_finder)==\"400-200BC\"){\n model$x <- x$bc200\n model$y <- y$bc200\n } else if(req(input$dataset_finder)==\"200-0BC\") {\n model$x <- x$bc0\n model$y <- y$bc0\n } else if(req(input$dataset_finder)==\"0-200AD\"){\n\n model$x <- x$ad0\n model$y <- y$ad0\n } else {\n model$x <- x$ad200\n model$y <- y$ad200\n \n }\n model\n }\n )\n get_cluster_entity <- reactive({\n req(load_entity_data())\n data <- load_entity_data()\n if (req(input$dataset)==\"<400BC\"){\n dataset <- data$bc400\n }else if (req(input$dataset)==\"400-200BC\"){\n dataset <- data$bc200\n } else if(req(input$dataset)==\"200-0BC\") {\n dataset <- data$bc0\n } else if(req(input$dataset)==\"0-200AD\"){\n dataset <- data$ad0\n } else {\n dataset <- data$ad200\n }\n dataset\n })\n get_cluster_entity_detail <- reactive({\n req(load_entity_detail_data())\n data <- load_entity_detail_data()\n if(req(input$dataset_entity)==\"All\"){\n dataset <- rbind(data$bc400,data$bc200 )\n dataset <- rbind(dataset, data$bc0)\n dataset <- rbind(dataset, data$ad0)\n dataset <- rbind(dataset,data$ad200 )\n }\n else if (req(input$dataset_entity)==\"<400BC\"){\n dataset <- data$bc400\n }else if (req(input$dataset_entity)==\"400-200BC\"){\n dataset <- data$bc200\n } else if(req(input$dataset_entity)==\"200-0BC\") {\n dataset <- data$bc0\n } else if(req(input$dataset_entity)==\"0-200AD\"){\n dataset <- data$ad0\n } else {\n dataset <- data$ad200\n }\n dataset\n })\n \n get_cluster_entity_detail_finder <- reactive({\n req(load_entity_detail_data())\n data <- load_entity_detail_data()\n if (req(input$dataset_finder)==\"<400BC\"){\n dataset <- data$bc400\n }else if (req(input$dataset_finder)==\"400-200BC\"){\n dataset <- data$bc200\n } else if(req(input$dataset_finder)==\"200-0BC\") {\n dataset <- data$bc0\n } else if(req(input$dataset_finder)==\"0-200AD\"){\n dataset <- data$ad0\n } else {\n dataset <- data$ad200\n }\n dataset\n })\n get_cluster_dataset <- reactive({\n req(load_all_data())\n data <- load_all_data()\n if (req(input$dataset)==\"<400BC\"){\n dataset <- data$bc400\n }else if (req(input$dataset)==\"400-200BC\"){\n dataset <- data$bc200\n } else if(req(input$dataset)==\"200-0BC\") {\n dataset <- data$bc0\n } else if(req(input$dataset)==\"0-200AD\"){\n dataset <- data$ad0\n } else {\n dataset <- data$ad200\n }\n \n #updateSelectInput(session, \"cluster\", choices=unique(dataset$label),selected=unique(dataset$label))\n dataset\n \n })\n get_cluster_dataset_finder <- reactive({\n req(load_all_data())\n data <- load_all_data()\n if (req(input$dataset_finder)==\"<400BC\"){\n dataset <- data$bc400\n }else if (req(input$dataset_finder)==\"400-200BC\"){\n dataset <- data$bc200\n } else if(req(input$dataset_finder)==\"200-0BC\") {\n dataset <- data$bc0\n } else if(req(input$dataset_finder)==\"0-200AD\"){\n dataset <- data$ad0\n } else {\n dataset <- data$ad200\n }\n \n #updateSelectInput(session, \"cluster\", choices=unique(dataset$label),selected=unique(dataset$label))\n dataset\n \n })\n \n filter_coins <- reactive({\n req(get_cluster_dataset())\n data <- get_cluster_dataset()\n #data <- data[data$label %in% input$cluste)r,]\n if (\"All\" %in% current$selected_cluster){\n return(data)\n }\n if (current$cluster==\"kmeans\"){\n data <- data[data$kmeans_label %in% current$selected_cluster,]\n } else if (current$cluster==\"dbscan\"){\n data <- data[data$dbscan_label %in% current$selected_cluster,]\n } else {\n data <- data[data$hierarchy_label %in% current$selected_cluster,]\n }\n return(data)\n })\n \n output$plot_explorer <- renderPlotly({\n req(filter_coins())\n data <- filter_coins()\n \n if(current$cluster==\"dbscan\"){\n color_var <- data$dbscan_label\n } else if (current$cluster==\"kmeans\"){\n color_var <- data$kmeans_label\n } else {\n color_var <- data$hierarchy_label\n }\n if (! \"All\" %in% current$selected_cluster){\n data <- data[color_var %in% current$selected_cluster,]\n }\n \n \n p <- ggplot(data, aes(x=x, y=y, color=color_var)) + geom_point(size=2)\n g <- ggplotly(p, mode = \"markers\", type = \"scatter\", source=\"mysource\", colors=\"turbo\")\n g <- g %>% event_register(\"plotly_relayout\") %>% event_register(\"plotly_selected\")\n g\n })\n \n \n get_selected_coins <- reactive({\n event.data <- event_data(\"plotly_relayout\", source = \"mysource\")\n event.selection <- event_data(\"plotly_selected\", source=\"mysource\")\n data <- list()\n\n if (!is.null(nrow(event.selection))){\n data$min_x <- min(event.selection$x)\n data$min_y <- min(event.selection$y)\n data$max_x <- max(event.selection$x)\n data$max_y <- max(event.selection$y)\n return(data)\n } else{\n data$min_x <- as.numeric(event.data[1])\n data$max_x <- as.numeric(event.data[2])\n data$min_y <- as.numeric(event.data[3])\n data$max_y <- as.numeric(event.data[4])\n return(data)\n }\n })\n\n output$explorer_table <- DT::renderDataTable(server=FALSE,{\n req(get_selected_coins())\n selected_coins <- get_selected_coins()\n req(filter_coins())\n data <- filter_coins()\n \n #rows <- nearPoints(data, input$plot_click)#, threshold = 10, maxpoints = 1)\n #event.data <- event_data(\"plotly_click\", source = \"mysource\")\n #browser()\n result <- data[data$x<=selected_coins$max_x & data$x>=selected_coins$min_x & data$y<=selected_coins$max_y & data$y>=selected_coins$min_y , ]\n #result <- data\n if (nrow(result)==0){\n result <- data\n }\n result <- result[c(current$cluster_name, \"coin\", \"maxdiam\", \"mindiam\", \"weight\", \"startdate\", \"enddate\", \"material\", \"mint\", \"denom\")]\n DT::datatable(result, extensions = 'Buttons', options = list(\n dom = 'Bfrtip',\n buttons = \n list('copy', list(\n extend = 'collection',\n buttons = c('csv', 'excel', 'pdf'),\n text = 'Download'\n ))))\n \n #browser()\n # if (dim(rows)[1]<1){\n # subset(data, select=-c(x,y))\n # } else{\n # subset(rows, select=-c(x,y))\n # }\n }\n )\n \n output$entity_table <- DT::renderDataTable(server=FALSE,{\n req(coin_explorer_get_filtered_entity_data())\n result <- coin_explorer_get_filtered_entity_data()\n result <- result[c(\"id_coin\", \"design_de\", \"side\", \"Entity\")]\n DT::datatable(result, extensions = 'Buttons', options = list(\n dom = 'Bfrtip',\n buttons = \n list('copy', list(\n extend = 'collection',\n buttons = c('csv', 'excel', 'pdf'),\n text = 'Download'\n ))))\n \n \n }\n )\n \n coin_explorer_get_filtered_data <- reactive({\n req(get_selected_coins())\n selected_coins <- get_selected_coins()\n req(filter_coins())\n data <- filter_coins()\n #browser()\n result <- data[data$x<=selected_coins$max_x & data$x>=selected_coins$min_x & data$y<=selected_coins$max_y & data$y>=selected_coins$min_y , ]\n if (nrow(result)==0){\n result <- data\n }\n if(current$cluster==\"dbscan\"){\n color_var <- result$dbscan_label\n } else if (current$cluster==\"kmeans\"){\n color_var <- result$kmeans_label\n } else {\n color_var <- result$hierarchy_label\n }\n if (! \"All\" %in% current$selected_cluster){\n result <- result[color_var %in% current$selected_cluster,]\n }\n result\n })\n output$mint_map <- renderPlotly({\n req(coin_explorer_get_filtered_data())\n result <- coin_explorer_get_filtered_data()\n \n agg_result <- result %>%\n group_by(mint) %>%\n summarise(n=n())\n top15_mints <- head(agg_result[order(agg_result$n, decreasing=T),],15)\n #browser()\n p <- ggplot(data=top15_mints, aes(x=reorder(mint, -n), y=n)) + geom_bar(stat=\"identity\") + xlab(\"Mints\")\n g <- ggplotly(p, type = \"bar\", source=\"entity_plot\")\n g\n })\n # output$mint_map <- renderLeaflet({\n # req(get_selected_coins())\n # selected_coins <- get_selected_coins()\n # req(filter_coins())\n # data <- filter_coins()\n # data <- data[c(\"mint\", \"lat\", \"lon\", \"x\", \"y\")]\n # if(is.null(selected_coins)) { return(NULL)}\n # result <- data[data$x<=selected_coins$max_x & data$x>=selected_coins$min_x & data$y<=selected_coins$max_y & data$y>=selected_coins$min_y , ]\n # mymap <- leaflet() %>% addTiles() \n # mymap %>% \n # addMarkers(data = result, lng = ~lon, lat = ~lat, label=~mint,\n # icon = list(\n # group=\"tools\",\n # iconSize = c(75, 75)\n # ))\n # \n # })\n \n get_aggregated_entity_detail <- reactive({\n req(get_cluster_entity_detail())\n #browser()\n data <- get_cluster_entity_detail()\n input_slider <- input$enddate\n start_filter <- input_slider[1]\n end_filter <- input_slider[2]\n #browser()\n data <- data[data$d_enddate >= start_filter & data$d_enddate <= end_filter,]\n #browser()\n if (input$entity_type!=\"All\"){\n data <- data[data$Label_Entity==toupper(input$entity_type),]\n }\n if (! input$entity_items %in% c(\"All\")){\n data <- data[data$Entity %in% input$entity_items,]\n }\n data\n })\n \n coin_explorer_get_filtered_entity_data <- reactive({\n \n req(get_selected_coins())\n selected_coins <- get_selected_coins()\n req(get_cluster_entity())\n data <- get_cluster_entity()\n #browser()\n if(is.null(selected_coins)) { return(NULL)}\n result <- data[data$x<=selected_coins$max_x & data$x>=selected_coins$min_x & data$y<=selected_coins$max_y & data$y>=selected_coins$min_y , ]\n if (nrow(result)==0){\n result <- data\n }\n if(current$cluster==\"dbscan\"){\n color_var <- result$dbscan_label\n } else if (current$cluster==\"kmeans\"){\n color_var <- result$kmeans_label\n } else {\n color_var <- result$hierarchy_label\n }\n if (! \"All\" %in% current$selected_cluster){\n result <- result[color_var %in% current$selected_cluster,]\n }\n result\n })\n output$plot_entity <- renderPlotly({\n req(coin_explorer_get_filtered_entity_data())\n result <- coin_explorer_get_filtered_entity_data()\n \n agg_result <- result %>%\n group_by(Entity) %>%\n summarise(n=n())\n top10_entity <- head(agg_result[order(agg_result$n, decreasing=T),],10)\n #browser()\n p <- ggplot(data=top10_entity, aes(x=reorder(Entity, -n), y=n)) + geom_bar(stat=\"identity\") + xlab(\"Entitys\")\n g <- ggplotly(p, type = \"bar\", source=\"entity_plot\")\n g\n })\n \n \n output$plot_entity_explorer <- renderPlotly({\n req(get_aggregated_entity_detail())\n data <- get_aggregated_entity_detail()\n \n agg_result <- data %>%\n group_by(Entity) %>%\n summarise(n=n())\n \n \n #browser()\n top15_entity <- head(agg_result[order(agg_result$n, decreasing=T),],15)\n #browser()\n p <- ggplot(data=top15_entity, aes(x=reorder(Entity, -n), y=n)) + geom_bar(stat=\"identity\") + xlab(\"Entitys\")\n g <- ggplotly(p, type = \"bar\", source=\"entity_plot\")\n g\n })\n \n output$plot_entity_timeseries <- renderPlotly({\n req(get_aggregated_entity_detail())\n #browser()\n data <- get_aggregated_entity_detail()\n agg_result <- data %>%\n group_by(Entity, d_enddate) %>%\n summarise(n=n())\n agg_result <- agg_result[order(agg_result$d_enddate),]\n #top15_entity <- head(agg_result[order(agg_result$n, d),],15)\n #browser()\n p <- ggplot(data=agg_result, aes(x=d_enddate, y=n)) + geom_line(aes(color = Entity, linetype = Entity)) + geom_point(aes(color = Entity, linetype = Entity)) + xlab(\"Timeperiod\")\n g <- ggplotly(p, type = \"line\", source=\"entity_timeseries\")\n g\n })\n \n observe({\n req(get_cluster_entity_detail())\n data <- get_cluster_entity_detail()\n end_choices <- unique(data$d_enddate)\n min <- min(end_choices)\n max <- max(end_choices)\n entity_items <- str_sort(unique(data$Entity) )\n entity_items <- append(entity_items, \"All\", after=0)\n updateSelectInput(session, \"entity_items\", choices=entity_items, selected=\"All\")\n updateSliderInput(session, \"enddate\", value=c(min, max), min=min, max=max, step=10 )\n #end_choices <- append(end_choices, \"all\", after=0)\n #updateSelectInput(session, \"startdate\", choices=end_choices)\n #updateSelectInput(session, \"enddate\", choices=end_choices)\n \n })\n \n observeEvent(input$dataset_finder,\n {\n req(get_cluster_dataset_finder())\n data <- get_cluster_dataset_finder()\n mint <- str_sort(unique(data$mint))\n material <- str_sort(unique(data$material))\n denom <- str_sort(unique(data$denom))\n updateSelectInput(session, \"mint\", choices=mint)\n updateSelectInput(session, \"material\", choices=material)\n updateSelectInput(session, \"denom\", choices=denom)\n })\n \n observeEvent(input$calculate,{\n req(get_models())\n req(get_cluster_dataset_finder())\n req(get_cluster_entity_detail_finder)\n weight <- input$weight\n startdate <- input$startdate\n enddate <- input$enddate_finder\n material <- input$material\n mint <- input$mint\n data <- get_cluster_dataset_finder()\n entity <- get_cluster_entity_detail_finder()\n geo <- data[data$mint==mint,]\n lat <- as.numeric(unique(geo$lat))\n lon <- as.numeric(unique(geo$lon))\n material <- ifelse(material==\"av\", 6, ifelse(material==\"ar\", 5, ifelse(material==\"cu\", 4, ifelse(material==\"ae\", 3, ifelse(material==\"el\", 2, ifelse(material==\"pb\", 1, 0))))))\n new_data <- data.frame(weight, startdate, enddate, lon, lat , material)\n colnames(new_data) <- c(\"weight\", \"startdate\", \"enddate\", \"lon\", \"lat\", \"material\")\n model <- get_models()\n x_new <- predict(model$x, new_data)\n y_new <- predict(model$y, new_data)\n print(x_new)\n print(y_new)\n data$dist <- sqrt((data$x-x_new)^2+(data$y-y_new)^2)\n entity$dist <- sqrt(((entity$x-x_new)^2+(entity$y-y_new)^2))\n closest_data <- data[data$dist<=min(data$dist)*1.5,]\n closest_entity <- entity[entity$dist<=min(entity$dist)*1.1,]\n closest_data <- closest_data[c(\"x\", \"y\",\"dbscan_label\", \"coin\", \"maxdiam\", \"mindiam\", \"weight\", \"startdate\", \"enddate\", \"material\", \"mint\", \"denom\", \"dist\")]\n closest_entity <- closest_entity[c(\"x\", \"y\",\"id_coin\", \"design_de\", \"side\" ,\"Entity\", \"dist\")]\n table$data <- closest_data\n table$entity <- closest_entity\n \n \n })\n \n \n output$result_table <- DT::renderDataTable({\n if (table$data==\"\"){ return(NULL)}\n \n DT::datatable(table$data)\n })\n \n output$result_entity_table <- DT::renderDataTable({\n if (table$entity==\"\"){ return(NULL)}\n \n DT::datatable(table$entity)\n })\n output$observations <- renderText({ paste0(\"Number of observations: \",as.character(current$observations)) })\n output$avg_weight <- renderText({ paste0(\"Average weight: \",as.character(current$avg_weight), \" Std: \", as.character(current$std_weight)) })\n output$avg_enddate <- renderText({ paste0(\"Average enddate: \", as.character(current$avg_enddate), \" Std: \", as.character(current$std_enddate)) })\n output$avg_startdate <- renderText({ paste0(\"Average startdate: \", as.character(current$avg_startdate), \" Std: \", as.character(current$std_startdate)) })\n output$avg_mindiam <- renderText({ paste0(\"Average mindiam: \", as.character(current$avg_mindiam), \" Std: \", as.character(current$std_mindiam)) })\n output$avg_maxdiam <- renderText({ paste0(\"Average maxdiam: \", as.character(current$avg_maxdiam), \" Std: \", as.character(current$std_maxdiam)) })\n output$avg_material <- renderText({ paste0(\"Top material: \", as.character(current$avg_material), \" \", as.character(current$avg_material_pc), \" %\") })\n output$avg_denom <- renderText({ paste0(\"Top denom: \", as.character(current$avg_denom), \" \", as.character(current$avg_denom_pc), \" %\") })\n observe({\n req(get_selected_coins())\n selected_coins <- get_selected_coins()\n req(filter_coins())\n data <- filter_coins()\n \n #rows <- nearPoints(data, input$plot_click)#, threshold = 10, maxpoints = 1)\n #event.data <- event_data(\"plotly_click\", source = \"mysource\")\n #browser()\n result <- data[data$x<=selected_coins$max_x & data$x>=selected_coins$min_x & data$y<=selected_coins$max_y & data$y>=selected_coins$min_y , ]\n #result <- data\n if (nrow(result)==0){\n result <- data\n }\n current$avg_weight <- round(mean(as.numeric(result$weight), na.rm=T),2)\n current$std_weight <- round(sd(as.numeric(result$weight), na.rm=T),2)\n current$avg_enddate <- round(mean(as.numeric(result$enddate), na.rm=T),2)\n current$std_enddate <- round(sd(as.numeric(result$enddate), na.rm=T),2)\n current$avg_startdate <- round(mean(as.numeric(result$startdate), na.rm=T),2)\n current$std_startdate <- round(sd(as.numeric(result$startdate), na.rm=T),2)\n current$observations <- nrow(result)\n current$avg_mindiam <- round(mean(as.numeric(result$mindiam), na.rm=T),2)\n current$std_mindiam <- round(sd(as.numeric(result$mindiam), na.rm=T),2)\n current$avg_maxdiam <- round(mean(as.numeric(result$maxdiam), na.rm=T),2)\n current$std_maxdiam <- round(sd(as.numeric(result$maxdiam), na.rm=T),2)\n current$avg_material <- names(sort(table(result$material),decreasing=TRUE)[1])\n current$avg_material_pc <- round((as.numeric(sort(table(result$material),decreasing=TRUE)[1]) / nrow(result))*100,2)\n current$avg_denom <- names(sort(table(result$denom),decreasing=TRUE)[1])\n current$avg_denom_pc <- round((as.numeric(sort(table(result$material),decreasing=TRUE)[1]) / nrow(result))*100,2)\n \n \n \n \n })\n \n observeEvent(input$cluster_method,{\n #observe({ \n current$cluster <- input$cluster_method\n current$cluster_name <- paste0(input$cluster_method, \"_label\")\n \n \n \n data <- filter_coins()\n if(current$cluster==\"dbscan\"){\n color_var <- data$dbscan_label\n } else if (current$cluster==\"kmeans\"){\n color_var <- data$kmeans_label\n } else {\n color_var <- data$hierarchy_label\n }\n selection <- append(c(\"All\"), unique(color_var))\n updateSelectInput(session, \"cluster_filter\", choices=selection, selected = \"All\")\n \n #r_ggplot$plot_explorer <- ggplot(data, aes(x=x, y=y, color=color_var)) + geom_point(size=2)\n \n })\n \n observeEvent(input$cluster_filter,{\n cluster_method <- current$cluster\n current$selected_cluster <- input$cluster_filter\n }\n )\n \n}\n\n \n \n\nshinyApp(ui, server)" }, { "alpha_fraction": 0.6498161554336548, "alphanum_fraction": 0.6829044222831726, "avg_line_length": 25.216867446899414, "blob_id": "40b0d0817315b91c72e2f455146f9ef20ef9bd86", "content_id": "647dbbc9719d20bee92432a56aa76d6af7cd6ff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2176, "license_type": "no_license", "max_line_length": 157, "num_lines": 83, "path": "/experiments/vanilla_python/explorative/regressions.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 30 16:32:22 2021\nthis script was created to get a better understanding oif the relationship between individual features and their realtions with logistic regression models. \n@author: freddy, annalena\n\"\"\"\nimport pandas as pd\nimport statsmodels.api as sm\n\n\ndata = pd.read_csv('data\\\\analysis_dataset.csv', sep=\";\")\ndata = data.dropna()\n\ndel data[\"Unnamed: 0\"]\ndel data[\"coin\"]\ndel data[\"axis\"]\ndel data[\"mindiam\"]\ndel data[\"findspot\"]\ndel data[\"enddate\"]\ny = data[\"startdate\"]\nX = data.drop(\"startdate\", axis=1)\nX = sm.add_constant(X)\nest = sm.OLS(y, X).fit()\nest.summary() # r^2 0.616\n\nweight = data[\"weight\"]\nweight = sm.add_constant(weight)\nsimple1 = sm.OLS(y, weight).fit()\nsimple1.summary() #r^2 0.15\nmaxdiam = data[\"maxdiam\"]\nmaxdiam = sm.add_constant(maxdiam)\nsimple2 = sm.OLS(y, maxdiam).fit()\nsimple2.summary() #r^2 0.33\n\nweightdiam = data[[\"weight\", \"maxdiam\"]]\nweightdiam = sm.add_constant(weightdiam)\nsimple3 = sm.OLS(y, weightdiam).fit()\nsimple3.summary() #r^2 0.41\n\nimport statsmodels.formula.api as smf\nweightdiam[\"y\"] = y\nsimple4 = smf.ols(formula=\"y ~ weight*maxdiam\", data=weightdiam).fit()\nsimple4.summary() #r^2 0.42\n\n#logit\ndata_not_dummy = data[[\"weight\", \"maxdiam\", \"startdate\", \"denom_cat\", \"mint_cat\", \"collection_cat\"]]\ny = data[\"material_ ae \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit1 = sm.Logit(y, X).fit()\nlogit1.summary() #r^2 0.68\n\ny = data[\"material_ ar \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit2 = sm.Logit(y, X).fit()\nlogit2.summary() #r^2 0.71\n\ny = data[\"material_ av \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit3 = sm.Logit(y, X).fit()\nlogit3.summary() #r^2: 0.32\n\ny = data[\"material_ cu \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit4 = sm.Logit(y, X).fit()\nlogit4.summary() #r^2 : 0.21\n#always maxdiam un denom_cat not significant\n\n# el and pb have not enought observations\ny = data[\"material_ el \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit5 = sm.Logit(y, X).fit()\nlogit5.summary() #Perfect separation detected\n\ny = data[\"material_ pb \"]\nX = data_not_dummy\nX = sm.add_constant(X)\nlogit6 = sm.Logit(y, X).fit()\nlogit6.summary() # perfect separation detected\n" }, { "alpha_fraction": 0.6213254332542419, "alphanum_fraction": 0.6364585161209106, "avg_line_length": 36.575164794921875, "blob_id": "29d68c71e32149da87afeebb7f9db1771c83bb2d", "content_id": "90c737aea9ae38d4ae11e26b56d36f227d4d447a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5749, "license_type": "no_license", "max_line_length": 310, "num_lines": 153, "path": "/data_prep/dimension_reduction.py", "repo_name": "codehering/data_challanges_2021", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 6 19:40:03 2021\nThis file contains the umap dimension reduction and data prepartion steps. Input from SPARQL query. Output: seperated csv for each timeperiod.\n@author: freddy, annalena\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport umap.umap_ as umap\nimport umap.utils\nimport umap.plot\nimport json\nimport requests\nimport random\nfrom datetime import datetime\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n\ndef prepare_data(data):\n data[\"weight\"] = [float(str(x).replace(\",\",\".\")) for x in data[\"weight\"]]\n data[\"mint\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"mint\"]]\n data[\"coin\"] = [x.split(\"#coins?id=\")[1] for x in data[\"coin\"]]\n data[\"material\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"material\"]]\n data[\"denom\"] = [x.replace(\"http://nomisma.org/id/\", \"\") for x in data[\"denom\"]]\n data[\"findspot\"] = [str(x).replace(\"file:///C:/Users/karsten/Documents/uni/ProgrammeWorkspace/D2RServer/d2rq-0.8.1_CNT/dump_2021_03_16.rdf#\", \"\") \\\n for x in data[\"findsport\"]]\n data[\"authority\"] = [x.replace(\" http://nomisma.org/id/\", \"\") for x in data[\"authority\"]]\n categorial_vars = [\"material\", \"denom\", \"mint\", \"collection\", \"weightstand_engl\", \"findsport\", \"authority\", \"peculiarities_engl\" ]\n \n for var in categorial_vars:\n data[var] = data[var].astype('category')\n \n try:\n data['weight'] = pd.to_numeric(data['weight'],errors='coerce')\n try:\n data['maxdiam'] = pd.to_numeric(data['maxdiam'],errors='coerce')\n try: \n data['mindiam'] = pd.to_numeric(data['mindiam'],errors='coerce')\n try:\n data['weight'].astype(float)\n except:\n pass\n except:\n pass\n except:\n pass\n except:\n pass\n \n return data\n\n# load data from query\ndata = pd.read_csv(\"C:\\\\Users\\\\fredi\\\\Desktop\\\\Uni\\\\Data Challanges\\\\CN\\\\data\\\\queryResults_semikolon.csv\", sep=\";\")\n# do data prepartion\nprepared_data = prepare_data(data)\n# select specific variables\npreselection = [\"coin\", \"weight\", \"startdate\", \"enddate\", \"denom\", \"mint\", \"material\"]\nselected_data = prepared_data[preselection]\n\n# get geo cordinates for each mint\nselected_data[\"mint\"] = [x.replace(\" \", \"\") for x in selected_data[\"mint\"]]\nmints = selected_data[\"mint\"].unique().tolist()\nmint_list = list()\nfor mint in mints:\n r = requests.get(f\"http://nomisma.org/apis/getMints?id={mint}\")\n text = r.text\n \n try:\n data_mint = json.loads(text)\n data_mint = data_mint[\"features\"][0][\"geometry\"][\"coordinates\"]\n mint_list.append({\"mint\": mint, \"lon\": data_mint[0], \"lat\": data_mint[1]})\n except:\n print(mint)\n \n# no geocoords for:\n#eleutherion\n#olbia_city\nmint_geo = pd.DataFrame(mint_list)\n\n# merge geo coordinates with selected data\nselected_data = pd.merge(selected_data, mint_geo, how=\"left\", on=\"mint\")\n\n\n#weighting of the material variables:\n#bronze: ae\n#silber: ar\n#Elektron (Mangensiumligierung): el\n#Gold: av\n#Kupfer: cu\n#Blei: pb\n#Hierarchie:\n# av > ar > cu > ae > el > pb\n# 6 > 5 > 4 > 3 > 2 > 1\nselected_data[\"material\"] = [x.replace(\" \", \"\") for x in selected_data[\"material\"]]\nselected_data[\"material\"] = np.where(selected_data[\"material\"]==\"av\", 6, np.where(selected_data[\"material\"]==\"ar\", 5, np.where(selected_data[\"material\"]==\"cu\",4, np.where(selected_data[\"material\"]==\"ae\", 3, np.where(selected_data[\"material\"]==\"el\", 2, np.where(selected_data[\"material\"]==\"pb\", 1, np.nan))))) )\nselected_data = selected_data[[\"coin\", \"weight\", \"startdate\", \"enddate\", \"lon\", \"lat\", \"material\"]]\nselected_data = selected_data.dropna()\n\n\n#split up the datasets into differnt time periods\ndata_dict = dict()\ndata_dict[\"data_400bc\"] = selected_data[selected_data[\"enddate\"] <= -400]\ndata_dict[\"data_200bc\"] = selected_data[(selected_data[\"enddate\"]>-400) & (selected_data[\"enddate\"]<=-200)]\ndata_dict[\"data_0bc\"] = selected_data[(selected_data[\"enddate\"]>-200) & (selected_data[\"enddate\"]<=0)]\ndata_dict[\"data_0ad\"] = selected_data[(selected_data[\"enddate\"]>0) & (selected_data[\"enddate\"]<=200)]\ndata_dict[\"data_200ad\"] = selected_data[selected_data[\"enddate\"]>200]\nfor key in data_dict.keys():\n print(data_dict[key].shape)\n\ncoin_dict = dict()\nfor key in data_dict.keys():\n coin_dict[key] = data_dict[key][\"coin\"].to_list()\n del data_dict[key][\"coin\"]\n\n# umap dimension transformation\n\n# cross validation calculates the average dimension from UMAP transformation (stochastic process)\ndef cross_validation(d, k=40):\n seeds = random.sample(range(0, 100000), k)\n embedding_x = 0\n embedding_y = 0\n for seed in seeds:\n r = umap.UMAP(random_state=seed)\n r.fit(d)\n embedding = r.transform(d)\n embedding_x += embedding[:, 0]\n embedding_y += embedding[:, 1]\n embedding_x = embedding_x / k\n embedding_y = embedding_y / k\n return embedding_x, embedding_y\n\numap_results = dict()\n# run umap\nfor key in data_dict.keys():\n print(key)\n tmp_data = StandardScaler().fit_transform(data_dict[key])\n x, y = cross_validation(tmp_data, 20)\n plt.scatter(x,y)\n plt.gca().set_aspect('equal', 'datalim')\n plt.title(f\"{key}\")\n plt.show()\n umap_results[key] = pd.DataFrame({\"x\": x, \"y\": y})\n\n# save output\nfor key in umap_results.keys():\n umap_results[key][\"coin\"] = coin_dict[key]\nfor key in umap_results.keys():\n umap_results[key].to_csv(f\"../timeperiod/new/{key}.csv\", index=False)\nfor key in data_dict.keys():\n data_dict[key].to_csv(f\"../timeperiod/new/raw/{key}.csv\", index=False)\n" } ]
26
zaher-abb/thingiverse_scraper
https://github.com/zaher-abb/thingiverse_scraper
cfe9f6b77bdaf52a1d57c57f8981ff097d331aa3
0c70c8b74b654e3488a3c57ef8ccd72509cd96b4
1738d41f6a4dbf32ee2fb3fe3ed4da9048ba6503
refs/heads/master
2023-06-03T17:37:59.811802
2021-06-16T15:08:52
2021-06-16T15:08:52
363,144,706
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5646243691444397, "alphanum_fraction": 0.5799057483673096, "avg_line_length": 35.374027252197266, "blob_id": "87706164f918b8c019ff2224c7ad48c738385eca", "content_id": "4d41d75c71217b0efcd42f9f63f5abb0a71ecc60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14004, "license_type": "no_license", "max_line_length": 214, "num_lines": 385, "path": "/main.py", "repo_name": "zaher-abb/thingiverse_scraper", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom time import sleep\nimport pandas as pd\nimport csv\nfrom selenium.webdriver.common.keys import Keys\nimport json\n\n\nfrom itertools import zip_longest\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.support import expected_conditions as EC, wait\n\n# Chrom Driver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\ndriver = webdriver.Chrome(\"D:\\ChromDriver\\chromedriver_win32\\chromedriver.exe\")\n\nurl_list = []\n\n\n\n# collect proudcts urls\n# # # Home Page\n# driver.get('https://www.thingiverse.com/')\n# sleep(3)\n# # chose the 3d printing\n# categorySort__dropdown = driver.find_element_by_xpath(\n# \"//button[contains(@class,'CategorySort__dropdownButton--gpHIi Dropdown__dropdownButton--1iEp1')]\").click()\n# categorySort__dropdown_select_3d_Printing = driver.find_element_by_xpath(\"//span[text()='3D Printing']\").click()\n#\n# sleep(9)\n# driver.find_element_by_xpath(\"//button[contains(@class,'Sort__dropdownButton--1myG8 Dropdown__dropdownButton--1iEp1')]\").click()\n#\n#\n# sleep(3)\n#\n#\n# url_list=[]\n# condition = True\n# testTemp = 0\n# temp=1\n# while condition:\n#\n# result = driver.find_elements_by_class_name('ThingCardBody__cardBodyWrapper--ba5pu')\n# # temp += 1\n# # print(temp)\n# for i in range(len(result)):\n# try:\n# # print(i)\n# url_list.append(result[i].get_property('href'))\n# print(result[i].get_property('href'))\n#\n# except Exception as e:\n# print(e)\n# try:\n# driver.find_element_by_xpath(\"//div[@class='Pagination__button--2X-2z Pagination__more--24exV']\").click()\n#\n# # testTemp+=1\n# # if testTemp >=1 :\n# # break\n# sleep(5)\n# except Exception as e:\n# condition = False\n# print(e)\n#\n# def scroll_down(driver):\n# global count\n# iter = 0\n# while 1:\n# scroll_top_num = str(iter * 1000)\n# iter += 1\n# driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n# try:\n# sleep(3)\n# WebDriverWait(driver, 30).until(check_difference_in_count)\n# except Exception as e:\n# print(e)\n# count = 0\n# break\n#\n# def check_difference_in_count(driver):\n# global count\n#\n# new_count = len(driver.find_elements_by_class_name('ThingCardBody__cardBodyWrapper--ba5pu'))\n#\n# if count != new_count:\n# count = new_count\n# return True\n# else:\n# return False\n\n# save all products urls from 3d printing in csv file\nwith open('3d_Designs2.csv','rt')as f:\n data = csv.reader(f)\n for row in data:\n url_list+=row\nprint(len(url_list))\n\ndf2=pd.DataFrame(url_list)\ndf2.to_csv('Products_urls.csv', index=False, header=False)\nusers_url_who_writes_comments = []\nlist_of_products = []\nfollowers_urls_list = []\nuser_Data = []\ndesigns_Data = []\ncount=0\nfor url in list(url_list[500:1000]) :\n\n try:\n\n count+=1\n print('count in ')\n print(count)\n driver.get(url)\n sleep(5)\n # name\n product_name = driver.find_element_by_class_name('ThingPage__modelName--3CMsV').text\n print(product_name)\n # Summary\n summary_of_product = driver.find_element_by_xpath(\"//div[@class='ThingPage__description--14TtH']//p[1]\").text\n created_at = driver.find_element_by_xpath(\n '//*[@id=\"react-app\"]/div/div/div/div[5]/div[1]/div/div[1]/div/div[2]').text\n created_at = created_at.split(\" \", 2)[2:][0]\n\n\n\n print_Settings=x=driver.find_elements_by_class_name('ThingPage__preHistory--312bi')\n\n words = [i.text for i in print_Settings]\n\n # print Settings\n print_Settings_string = ''.join(words)\n\n print_Settings_string=print_Settings_string.replace('\\n',' ')\n\n # owner\n owner_profile_url = driver.find_element_by_xpath(\n '//*[@id=\"react-app\"]/div/div/div/div[5]/div[1]/div/div[1]/div/div[2]/a').get_attribute('href')\n print('commenst')\n # get owner name\n owner_name= driver.find_element_by_xpath(\n '//*[@id=\"react-app\"]/div/div/div/div[5]/div[1]/div/div[1]/div/div[2]/a').text\n comment_url= str(url) + '/comments'\n print(comment_url)\n driver.get(comment_url)\n\n\n\n\n comments = []\n temp2 = 0\n sleep(4)\n condition = True\n\n # get throw all comments [View More Comments]\n while condition:\n try:\n driver.find_element_by_xpath(\"//button[text()='View More Comments']\").click()\n sleep(6)\n except Exception as e:\n condition = False\n sleep(4)\n # get the whole list of comments\n list_of_comments = driver.find_elements_by_class_name('ThingCommentsList__commentContainer--EjmOU')\n\n for c in list_of_comments:\n try:\n comment = c.find_element_by_class_name('ThingComment__commentBody--2xT45').text\n temp2 += 1\n user = c.find_element_by_xpath(\n \"(//div[@class='ThingComment__headerWrapper--3KNll'])[{}]\".format(temp2)).text.split(\"\\n\", 2)\n user_name = user[0]\n wrote_at = user[1]\n comments.append((comment, user_name, wrote_at))\n except Exception as e:\n pass\n print(' out of comments ')\n sleep(3)\n\n # create list of users url who wrote a comments on a product\n for user_comments in driver.find_elements_by_class_name('ThingComment__modelName--Vqvbz'):\n users_url_who_writes_comments.append(user_comments.get_property('href'))\n print(' out of comments 2')\n sleep(2)\n temp_url = str(str(url) + '/makes')\n print(temp_url)\n driver.get(temp_url)\n sleep(4)\n\n\n # makes_Number\n makes_num = int(driver.find_element_by_xpath(\"(//div[@class='MetricButton__tabButton--2rvo1 MetricButton__selected--BGAr0'])/div[1]\").text)\n print('makes')\n\n\n print(makes_num)\n\n sleep(4)\n\n # scroll down\n\n #makes\n while True:\n try:\n htmlelement = driver.find_element_by_tag_name('html')\n htmlelement.send_keys(Keys.END)\n sleep(4)\n htmlelement.send_keys(Keys.END)\n print(makes_num/10)\n if makes_num < 20 :\n makes_num= 100\n for num in range(int(makes_num / 10)):\n htmlelement.send_keys(Keys.END)\n sleep(2)\n htmlelement.send_keys(Keys.END)\n sleep(2)\n htmlelement.send_keys(Keys.END)\n sleep(1)\n all_makes_temp = driver.find_elements_by_class_name(\"ThingCardBody__cardBodyWrapper--ba5pu\")\n if(len(all_makes_temp) < 18 ):\n break\n if(len(all_makes_temp)==makes_num):\n break\n sleep(5)\n break\n\n\n except TimeoutException:\n print('Exception makes ')\n break # not more posts were loaded - exit the loop\n\n # makes\n all_makes_url = driver.find_elements_by_class_name(\"ThingCardBody__cardBodyWrapper--ba5pu\")\n print(\"all makes \")\n print(len(all_makes_url))\n print('test1')\n all_makes_user=driver.find_elements_by_class_name(\"ThingCardHeader__avatarWrapper--1Jliv\")\n print('test2')\n all_makes_created_at=driver.find_elements_by_class_name(\"ThingCardHeader__cardCreatedAfter--3xS2o\")\n print('test3')\n sleep(2)\n all_makes=[]\n for x , y, z in list(zip_longest(all_makes_user, all_makes_url, all_makes_created_at)) :\n try:\n\n mixed_by_url=x.get_attribute('href')\n mixed_by = str(mixed_by_url).rsplit('/',1)[1]\n\n make_created_at=z.text\n make_product_url = y.get_attribute('href')\n\n all_makes.append((owner_name, mixed_by,make_product_url,make_created_at))\n except IndexError as e:\n print(e)\n pass\n\n # print(all_makes)\n print('test4')\n\n sleep(2)\n # Remix_Number\n remixes_url=str(str(url) + '/remixes')\n print(remixes_url)\n driver.get(remixes_url)\n sleep(4)\n print('remix')\n\n\n test_temp=str(driver.find_element_by_xpath(\"//div[@class='MetricButton__tabButton--2rvo1 MetricButton__selected--BGAr0']/div[1]\").text)\n if(test_temp==''):\n print('remix number')\n remixes_num=int(driver.find_element_by_xpath(\"//div[@class='MetricButton__tabButton--2rvo1'][5]/div[1]\").text)\n print(remixes_num)\n else:\n remixes_num = int(driver.find_element_by_xpath(\"//div[@class='MetricButton__tabButton--2rvo1 MetricButton__selected--BGAr0']/div[1]\").text)\n\n print('remixes number')\n\n # remakes\n print(remixes_num)\n\n\n\n sleep(4)\n while True:\n try:\n htmlelement = driver.find_element_by_tag_name('html')\n htmlelement.send_keys(Keys.END)\n sleep(4)\n htmlelement.send_keys(Keys.END)\n print(remixes_num / 10)\n if remixes_num < 20:\n remixes_num = 100\n for num in range(int(remixes_num / 10)):\n htmlelement.send_keys(Keys.END)\n all_remixes_temp = driver.find_elements_by_class_name(\"ThingCardBody__cardBodyWrapper--ba5pu\")\n if (len(all_remixes_temp) < 18):\n break\n if (len(all_remixes_temp) == remixes_num):\n break\n sleep(5)\n break\n\n except TimeoutException:\n print('Exception remixes ')\n pass # not more posts were loaded - exit the loop\n\n # makes\n all_remixes_url = driver.find_elements_by_class_name(\"ThingCardBody__cardBodyWrapper--ba5pu\")\n print(\"all remixes \")\n print(len(all_remixes_url))\n all_remixes_user = driver.find_elements_by_class_name(\"ThingCardHeader__avatarWrapper--1Jliv\")\n all_remixes_created_at = driver.find_elements_by_class_name(\"ThingCardHeader__cardCreatedAfter--3xS2o\")\n sleep(2)\n all_remixes = []\n\n for x1, y1, z1 in list(zip_longest(all_remixes_user, all_remixes_url, all_remixes_created_at)):\n try:\n\n remixes_by_url = x1.get_attribute('href')\n remixes_by = str(remixes_by_url).rsplit('/', 1)[1]\n remixes_created_at = z1.text\n remixes_product_url = y1.get_attribute('href')\n all_remixes.append((owner_name, remixes_by, remixes_product_url, remixes_created_at))\n except Exception as e:\n print(e)\n pass\n\n print(all_remixes)\n\n sleep(2)\n print('test5')\n # get into the owner Profile\n driver.get(owner_profile_url)\n sleep(2)\n number_of_created_products=int(driver.find_element_by_xpath(\"//div[@id='react-app']/div[1]/div[1]/div[1]/div[4]/div[2]/div[1]/div[1]/div[2]/div[1]\").text)\n print('test6')\n sleep(2)\n\n # get list of urls of followers\n get_followers_url = driver.find_element_by_xpath(\n '//*[@id=\"react-app\"]/div/div/div/div[4]/div[1]/div[1]/div[3]/a[1]').get_attribute('href')\n sleep(1)\n print('test7')\n driver.get(get_followers_url)\n sleep(3)\n get_all_followers_urls = driver.find_elements_by_class_name('user-header')\n print('test8')\n for follower_url in get_all_followers_urls :\n followers_urls_list.append(follower_url.get_attribute('href'))\n\n\n # TODO : get user likes (products) from his Profile\n # # users_likes=driver.find_elements_by_class_name('ThingCardBody__cardBodyWrapper--ba5pu')\n #\n # for i in users_likes :\n # users_likes.append(i.get_attribute('href'))\n\n list_of_products.append((product_name,created_at,summary_of_product,print_Settings_string,owner_name,\n owner_profile_url,number_of_created_products,users_url_who_writes_comments,\n followers_urls_list,len(followers_urls_list),len(all_makes),all_makes,len(all_remixes),all_remixes))\n\n user_Data.append((owner_name,owner_profile_url,number_of_created_products,users_url_who_writes_comments,followers_urls_list,len(followers_urls_list)))\n\n designs_Data.append((product_name,created_at,summary_of_product,print_Settings_string,comments,len(all_makes),all_makes,len(all_remixes),all_remixes))\n except Exception as e :\n print(e)\n pass\n\nall_data_frame = pd.DataFrame(data=list_of_products,columns=['Product_name','created_at','summary','print_Settings','owner_name','owner_profile_url',\n 'number_of_created_products','users_Profile_who_writes_comments','followers_profile_urls','followers_count','number_of_makes','makes','number_of_remixes','remixes'])\n\n\n\nusers_data_frame=pd.DataFrame(data=user_Data,columns=['owner_name','owner_profile_url',\n 'number_of_created_products','users_Profile_who_writes_comments','followers','followers_count'])\n\ndesigns_Data_frame=pd.DataFrame(data=designs_Data,columns=['Product_name','created_at','summary','print_Settings','comments','number_of_makes','makes','number_of_remixes','remixes'])\n\n\ndesigns_Data_frame.to_json('designs_Data_500_1000.json', orient='index',default_handler=str)\nusers_data_frame.to_json('users_Data_500_1000.json', orient='index',default_handler=str)\nall_data_frame.to_json('all_data_500_1000.json', orient='index',default_handler=str)\n" } ]
1
AhmadIssaAlaa/SequentialHearstPatterns
https://github.com/AhmadIssaAlaa/SequentialHearstPatterns
1e94f97c9058a9783b44389f7dd821b2329c7044
89b45f09f95d2d96876f57a4c2f7fc92368a59cc
bf3eb0aee4b0cdd48d1c0bf210eb7cf146498bce
refs/heads/master
2020-04-19T16:47:00.049427
2019-02-06T17:05:25
2019-02-06T17:05:25
168,314,615
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.490578293800354, "alphanum_fraction": 0.49187785387039185, "avg_line_length": 29.4489803314209, "blob_id": "e00e9a16f71001ff26c6172ba11c44a52226a53d", "content_id": "e8b1885c64caff40ed9fa24d17364c848bd7575b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 192, "num_lines": 49, "path": "/common/parsed_sentence.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "class parsed_sentence:\r\n def __init__(self):\r\n self.words = []\r\n self.NPs = []\r\n\r\n def add_word(self, w, l, pos, i, par, parent_index, rel , ty):\r\n word = parsed_word(w, l, pos, i, par, parent_index, rel, ty)\r\n self.words.append(word)\r\n\r\n def add_NP(self, np, root, ri, start, end):\r\n np2 = noun_phrase(np, root, ri, start, end)\r\n self.NPs.append(np2)\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n return \" \".join([word.word for word in self.words])\r\n\r\nclass parsed_word:\r\n def __init__(self, w, l, pos, i, par, parent_index, rel, ty):\r\n self.word = w\r\n self.lemma = l\r\n self.pos = pos\r\n self.index = i\r\n self.dep_rel = rel\r\n self.parent = par\r\n self.parent_index = parent_index\r\n self.type = ty\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n return \"(\" + self.word + \", \" + self.lemma + \", \" + self.pos + \", \" + str(self.index) + \", \" + self.parent + \", \" + str(self.parent_index) + \", \" + self.dep_rel+ \", \" + self.type + \")\"\r\n\r\nclass noun_phrase:\r\n def __init__(self, np, root, ri, start, end):\r\n self.text = np\r\n self.root = root\r\n self.root_index = ri\r\n self.start = start\r\n self.end = end\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n return \"(\" + self.text + \", \" + self.root + \", \" + str(self.root_index) + \", \" + str(self.start) + \", \" + str(self.end) + \")\"" }, { "alpha_fraction": 0.7418655157089233, "alphanum_fraction": 0.7700650691986084, "avg_line_length": 56.625, "blob_id": "1ac3054cac42cc56d91f80ee1f24df1e1e4589b4", "content_id": "94a67d7c0e8d561d1697d83d693b95891cdc512e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 461, "license_type": "no_license", "max_line_length": 250, "num_lines": 8, "path": "/corpus_preprocessing/readme.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "main.java is a java code to preprocess corpus.\n\nTo apply this code: download and configure the Stanford parser (Standord CoreNLP 3.7.0).\n\nlink: https://stanfordnlp.github.io/CoreNLP/download.html\n\n# Reference:\nManning, Christopher D., Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David McClosky. 2014. The Stanford CoreNLP Natural Language Processing Toolkit In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pp. 55-60.\n" }, { "alpha_fraction": 0.5461389422416687, "alphanum_fraction": 0.552484393119812, "avg_line_length": 44.71356964111328, "blob_id": "93cd2cc2107a7efd4824249355acdf6a609f95f7", "content_id": "1c729f8a6668cc747f43c47baba09a92eb2af686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9298, "license_type": "no_license", "max_line_length": 139, "num_lines": 199, "path": "/dependency_Hearst_patterns/DHP.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "# The dependency patterns:\r\n# (\"nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)\"),\r\n# (\"case(hypoHead, such), mwe(such, as), nmod:such_as(hyperHead, hypoHead)\"),\r\n# (\"case(hypoHead, including), nmod:including(hyperHead, hypoHead)\"),\r\n# (\"amod(hyperHead, such), case(hypoHead, as), nmod:as(hyperHead, hypoHead)\"),\r\n# (\"cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)\"),\r\n# (\"advmod(hyperHead, especially), dep(hyperHead, hypoHead)\")\r\n\r\nfrom common import HyperHypoCouple as HH\r\nimport common.core_functions as cf\r\n\r\ndef get_NP(NPs, index):\r\n for np in NPs:\r\n if int(index) in range(int(np.start), int(np.end) + 1):\r\n return np.text\r\n return \"\"\r\n\r\ndef get_couples(parsed_sentence, hyper_index, hypo_index):\r\n hyper_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hyper_index))\r\n hypo_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hypo_index))\r\n couples = []\r\n if hyper_np != \"\" and hypo_np != \"\" and hypo_np != hyper_np:\r\n hh = HH.HHCouple(hypo_np, hyper_np)\r\n couples.append(hh)\r\n parsed_words = parsed_sentence.words\r\n for i in range(int(hypo_index) + 1, len(parsed_words)):\r\n parsed_word = parsed_words[i]\r\n if str(parsed_word.dep_rel).__contains__(\"conj\") and parsed_word.parent_index == hypo_index:\r\n new_hypo_index = parsed_word.index\r\n new_hypo_np = get_NP(parsed_sentence.NPs, new_hypo_index)\r\n if hyper_np != \"\" and hypo_np != \"\" and new_hypo_np != hyper_np:\r\n new_hh = HH.HHCouple(new_hypo_np, hyper_np)\r\n couples.append(new_hh)\r\n return couples\r\n\r\n\r\ndef such_A_as_B(parsed_sentence):\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i] #(\"amod(hyperHead, such), case(hypoHead, as), nmod:as(hyperHead, hypoHead)\"),\r\n if str(parsed_word.dep_rel).__contains__(\"nmod:as\"):\r\n hypo_index = parsed_word.index\r\n hyper_index = parsed_word.parent_index\r\n flag1 = False\r\n flag2 = False\r\n for j in range(i - 1, max(-1, i-10), -1):\r\n pre_word = parsed_words[j]\r\n if str(pre_word.dep_rel).__contains__(\"case\") and pre_word.word == \"as\" and pre_word.parent_index == hypo_index:\r\n flag1 = True\r\n elif str(pre_word.dep_rel).__contains__(\"amod\") and pre_word.word == \"such\" and pre_word.parent_index == hyper_index:\r\n flag2 = True\r\n if flag1 and flag2:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\ndef A_is_a_B(parsed_sentence):\r\n vtb = [\"is\", \"are\", \"was\", \"were\"]\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i] #(\"nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)\"),\r\n if str(parsed_word.dep_rel).__contains__(\"nsubj\"):\r\n hypo_index = parsed_word.index\r\n hyper_index = parsed_word.parent_index\r\n for j in range(i + 1, min(len(parsed_words), i + 10)):\r\n next_word = parsed_words[j]\r\n if str(next_word.dep_rel).__contains__(\"cop\") and next_word.word in vtb and next_word.parent_index == hyper_index:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\n\r\ndef A_and_other_B(parsed_sentence):\r\n conj = [\"or\", \"and\"]\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i] #(\"cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)\"),\r\n if str(parsed_word.dep_rel).__contains__(\"conj\"):\r\n hyper_index = parsed_word.index\r\n hypo_index = parsed_word.parent_index\r\n flag1 = False\r\n flag2 = False\r\n for j in range(i - 1, max(-1, i - 10), -1):\r\n pre_word = parsed_words[j]\r\n if str(pre_word.dep_rel).__contains__(\"amod\") and pre_word.word == \"other\" and pre_word.parent_index == hyper_index:\r\n flag1 = True\r\n elif str(pre_word.dep_rel).__contains__(\r\n \"cc\") and pre_word.word in conj and pre_word.parent_index == hypo_index:\r\n flag2 = True\r\n if flag1 and flag2:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\n\r\ndef A_especially_B(parsed_sentence):\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i] #(\"advmod(hyperHead, especially), dep(hyperHead, hypoHead)\")\r\n if str(parsed_word.dep_rel).__contains__(\"dep\"):\r\n hypo_index = parsed_word.index\r\n hyper_index = parsed_word.parent_index\r\n for j in range(i - 1, max(-1, i - 10), -1):\r\n pre_word = parsed_words[j]\r\n if str(pre_word.dep_rel).__contains__(\"advmod\") and pre_word.word == \"especially\" and pre_word.parent_index == hyper_index:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\ndef A_including_B(parsed_sentence):\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i] #(\"case(hypoHead, including), nmod:including(hyperHead, hypoHead)\"),\r\n if str(parsed_word.dep_rel).__contains__(\"nmod:including\"):\r\n hypo_index = parsed_word.index\r\n hyper_index = parsed_word.parent_index\r\n for j in range(i - 1, max(-1, i - 10), -1):\r\n pre_word = parsed_words[j]\r\n if str(pre_word.dep_rel).__contains__(\"case\") and pre_word.word == \"including\" and pre_word.parent_index == hypo_index:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\ndef A_such_as_B(parsed_sentence):\r\n parsed_words = parsed_sentence.words\r\n for i in range(len(parsed_words)):\r\n parsed_word = parsed_words[i]\r\n if str(parsed_word.dep_rel).__contains__(\"nmod:such_as\"):\r\n hypo_index = parsed_word.index\r\n hyper_index = parsed_word.parent_index\r\n flag1 = False\r\n flag2 = False\r\n for j in range(i - 1, max(-1, i-10), -1):\r\n pre_word = parsed_words[j]\r\n if str(pre_word.dep_rel).__contains__(\"mwe\") and pre_word.word == \"as\" and pre_word.parent == \"such\":\r\n flag1 = True\r\n elif str(pre_word.dep_rel).__contains__(\"case\") and pre_word.word == \"such\" and pre_word.parent_index == hypo_index:\r\n flag2 = True\r\n if flag1 and flag2:\r\n couples = get_couples(parsed_sentence, hyper_index, hypo_index)\r\n if len(couples) > 0:\r\n return True, couples\r\n return False, []\r\n\r\ndef sentence_couples_annotation(sentence, couples):\r\n sentence = sentence.replace(\"_hypo\", \"\").replace(\"_hyper\", \"\").replace(\"_\", \" \")\r\n for couple in couples:\r\n hyper = couple.hypernym\r\n hyper2 = hyper.replace(\" \", \"_\")\r\n sentence = sentence.replace(\" \" + hyper + \" \", \" \" + hyper2 + \"_hyper \").strip()\r\n hypo = couple.hyponym\r\n hypo2 = hypo.replace(\" \", \"_\")\r\n try:\r\n sentence = sentence.replace(\" \" + hypo + \" \", \" \" + hypo2 + \"_hypo \").strip()\r\n except:\r\n sentence = sentence\r\n return sentence\r\n\r\ndef DHP_matching(parsed_sentence, sentence = \"\"):\r\n couples = []\r\n patterns = []\r\n # NP such as NP\r\n flag, co = A_such_as_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"NP such as NP\")\r\n # NP including NP\r\n flag, co = A_including_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"NP including NP\")\r\n # NP is a NP\r\n flag, co = A_is_a_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"NP is a NP\")\r\n # NP and other NP\r\n flag, co = A_and_other_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"NP and other NP\")\r\n # NP especially NP\r\n flag, co = A_especially_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"NP especially NP\")\r\n # such NP as NP\r\n flag, co = such_A_as_B(parsed_sentence)\r\n if flag:\r\n couples.extend(co)\r\n patterns.append(\"such NP as NP\")\r\n if len(couples) == 0:\r\n return False, \"\", \"\", \"\"\r\n return True, couples, patterns, sentence_couples_annotation(sentence, couples)\r\n\r\n" }, { "alpha_fraction": 0.6995798349380493, "alphanum_fraction": 0.7731092572212219, "avg_line_length": 58.5, "blob_id": "b486ea0dad9d3b013a8a59315b64545c18c49600", "content_id": "6a21f4adc8ffb2f315358d94608214ee5877ccdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 476, "license_type": "no_license", "max_line_length": 124, "num_lines": 8, "path": "/dependency_Hearst_patterns/readme.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "This package corresponds for the word done in our previous paper (redefining Hearst patterns by using dependency relations).\n\n# Reference: \nAldine, A.I.A., Harzallah, M., Giuseppe, B., Bchet, N., Faour, A.: Redefining hearst\npatterns by using dependency relations. In: Proceedings of the 10th International\nJoint Conference on Knowledge Discovery, Knowledge Engineering and Knowl-\nedge Management - Volume 2: KEOD,. pp. 148{155. INSTICC, SciTePress (2018).\nhttps://doi.org/10.5220/0006962201480155\n" }, { "alpha_fraction": 0.5370036363601685, "alphanum_fraction": 0.5406137108802795, "avg_line_length": 32.57575607299805, "blob_id": "020309b08d5db85a38e1cd1612037b3d53a56728", "content_id": "8ffc77734c7811022d9a53a31491d5da16ce570e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 86, "num_lines": 33, "path": "/dependency_Hearst_patterns/extracted_couples_validation.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nfrom common import core_functions as cf\n\ndef main():\n \"\"\"\n Goal: validate the extracted couples using DHP\n inputs:\n -res_files_directory: a directory path for the DHP matching result files\n \"\"\"\n res_files_directory = r\"..\\matching_DHP_subcorpora\"\n\n allfiles = [join(res_files_directory, f) for f in listdir(res_files_directory) if\n isfile(join(res_files_directory, f))]\n dataset_path = r\"..\\datasets\\Music.txt\"\n\n for file in allfiles:\n s = \"\"\n for res in cf.get_result_sentences(file):\n s += \"<s>\\n\"\n s += str(res[0]).strip() + \"\\n\"\n s += str(res[1]) + \"\\n\"\n s += \"Label: \" + str(res[2]).strip() + \"\\n\"\n predicted, predicted_by = cf.check_extracted_couples(res[1], dataset_path)\n s += \"Validated: \" + str(predicted).strip() + \"\\n\"\n s += \"Validated by: \" + str(predicted_by).strip() + \"\\n\"\n s += \"</s>\\n\"\n f = open(file, \"w\")\n f.write(s)\n f.close()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5995085835456848, "alphanum_fraction": 0.6109746098518372, "avg_line_length": 38.3870964050293, "blob_id": "0f93df7ade55306dc8f4db00ee3ad21e2a7df1b2", "content_id": "9df92ba39b4cc2d4d7f0222d21f764132cb41d7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 123, "num_lines": 31, "path": "/common/evaluation.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nimport core_functions as cf\n\ndef evaluate(res_files_directory, sem_pos_labeled_corpus_file):\n \"\"\"\n Evaluate precision, recall, and F-measure from a set of files corresponding the results of matching patterns\n :param res_files_directory: the directory of result files\n :param sem_pos_labeled_corpus_file: the file path of semantically positive labeled corpus\n :return: the measures: precision, recall, and F-measure\n \"\"\"\n all_pos = len(open(sem_pos_labeled_corpus_file).readlines())\n TM = 0\n PTM = 0\n FM = 0\n allfiles = [join(res_files_directory, f) for f in listdir(res_files_directory) if isfile(join(res_files_directory, f))]\n for file in allfiles:\n for res in cf.get_result_sentences(file):\n label = res[2]\n predicted = res[3]\n if predicted == \"True\":\n TM += 1\n if label == \"positive\":\n PTM += 1\n else:\n FM += 1\n FNM = all_pos - PTM\n precision = TM*1.0 / (TM + FM)\n recall = TM * 1.0 / (TM + FNM)\n f_measure = 2.0 * precision * recall / (precision + recall)\n return precision, recall, f_measure\n" }, { "alpha_fraction": 0.5835790634155273, "alphanum_fraction": 0.5865275859832764, "avg_line_length": 30.659259796142578, "blob_id": "691388884ee24a34baa5a18f6d9fae8e7f7731e5", "content_id": "fde185658e436210f2ac42b3fda87765a9b75900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4409, "license_type": "no_license", "max_line_length": 153, "num_lines": 135, "path": "/corpus_labeling/cleaning_pos_labeled_sentences.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "import re\r\nimport nltk\r\nimport spacy\r\nfrom Hearst_patterns import HearstPattern\r\nHP = HearstPattern.HearstPatterns()\r\n\r\n\r\ndef main():\r\n '''\r\n Goal: remove non semantically positive sentences from positive labeled sentences and select the same number of negative sentences as negative samples\r\n inputs:\r\n -posSentFile: a file path for the labeled positive sentences by the sentence labeling process\r\n -semPosSentOutputFile: an output file path of semantically positive labeled sentences\r\n -negSentFile: a file path for the labeled negative sentences by the sentence labeling process\r\n -samplesNegSentOutputFile: an output file path of samples of negative labeled sentences\r\n '''\r\n\r\n # inputs\r\n posSentFile = r\"..\\labeled_corpus\\Music_Pos.txt\"\r\n semPosSentOutputFile = r\"..\\labeled_corpus\\Music_Sem_Pos.txt\"\r\n negSentFile = r\"..\\labeled_corpus\\Music_Neg.txt\"\r\n samplesNegSentOutputFile = r\"..\\labeled_corpus\\Music_Neg_Samples.txt\"\r\n\r\n # open output file\r\n ofsp = open(semPosSentOutputFile, \"wb\")\r\n\r\n # process positive sentences\r\n i = 0\r\n count = 0\r\n with open(posSentFile, \"rb\") as f:\r\n for line in f:\r\n annSent = line.decode(\"ascii\", \"ignore\")\r\n i += 1\r\n print i\r\n if not is_non_sem_pos(annSent):\r\n count += 1\r\n ofsp.write(annSent.strip() + \"\\n\")\r\n\r\n ofsp.close()\r\n f.close()\r\n\r\n # open output file\r\n ofsn = open(samplesNegSentOutputFile, \"wb\")\r\n\r\n # process negative sentences\r\n i = 0\r\n with open(negSentFile, \"rb\") as f:\r\n for line in f:\r\n sent = line.decode(\"ascii\", \"ignore\")\r\n i += 1\r\n print i\r\n ofsn.write(sent.strip() + \"\\n\")\r\n if i == count:\r\n break\r\n\r\n ofsn.close()\r\n f.close()\r\n\r\ndef is_non_sem_pos(annotatedSentence):\r\n # check if couples occur between brackets and not in the same brackets\r\n if btw_brackets(annotatedSentence):\r\n print \"btw brackets\"\r\n return True\r\n # check if there is conjunction relation between couple terms\r\n if is_conjunction(annotatedSentence):\r\n return True\r\n return False\r\n\r\ndef is_conjunction(sent):\r\n res = HP.label_cohyponyms(sent)\r\n if not res:\r\n return False\r\n cohyponymCouples = res[1]\r\n hypoFlag = False\r\n hyperFlag = False\r\n for cop in cohyponymCouples:\r\n if str(cop.hyponym).__contains__(\"hypo\") or str(cop.hypernym).__contains__(\"hypo\"):\r\n hypoFlag = True\r\n if str(cop.hyponym).__contains__(\"hyper\") or str(cop.hypernym).__contains__(\"hyper\"):\r\n hyperFlag = True\r\n if (hypoFlag and hyperFlag):\r\n return True\r\n return False\r\n\r\ndef btw_brackets(sent):\r\n brackets = re.findall(r'\\((.*?)\\)', sent)\r\n if len(brackets) == 0:\r\n return False\r\n hypoBrackets = []\r\n hyperBrackets = []\r\n i = 0\r\n for bracket in brackets:\r\n if str(bracket).__contains__(\"_hypo\"):\r\n hypoBrackets.append(i)\r\n if str(bracket).__contains__(\"_hyper\"):\r\n hyperBrackets.append(i)\r\n i += 1\r\n if len(hyperBrackets)==0 or len(hypoBrackets)==0:\r\n return False\r\n return not any(x in hyperBrackets for x in hypoBrackets)\r\n\r\ndef remove_HH_annotation(annSent):\r\n return annSent.replace(\"_hypo\", \"\").replace(\"_hyper\", \"\").replace(\"_\", \" \")\r\n\r\ndef get_hypo_hyper(annSent):\r\n words = str(annSent).strip().split()\r\n hypo = \"\"\r\n hyper = \"\"\r\n for word in words:\r\n if word.__contains__(\"_hypo\"):\r\n hypo = word.replace(\"_hypo\", \"\").replace(\"_\", \" \")\r\n elif word.__contains__(\"_hyper\"):\r\n hyper = word.replace(\"_hyper\", \"\").replace(\"_\", \" \")\r\n return hypo, hyper\r\n\r\n# def is_conjunction(annSent):\r\n# sent = remove_HH_annotation(annSent)\r\n# hypo, hyper = get_hypo_hyper(annSent)\r\n# res = HP.label_cohyponyms(sent)\r\n# if not res:\r\n# return False\r\n# cohyponymCouples = res[1]\r\n# hypoFlag = False\r\n# hyperFlag = False\r\n# for cop in cohyponymCouples:\r\n# if str(cop.hyponym) == hypo or str(cop.hypernym) == hypo:\r\n# hypoFlag = True\r\n# if str(cop.hyponym) == hyper or str(cop.hypernym) == hyper:\r\n# hyperFlag = True\r\n# if (hypoFlag and hyperFlag):\r\n# return True\r\n# return False\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6069881319999695, "alphanum_fraction": 0.628645658493042, "avg_line_length": 49.18840408325195, "blob_id": "4344f463db90609dd542015ec5866286e4d6488c", "content_id": "09bf8af57b52ee2ca9d87a08be9c02f5b2507bce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3463, "license_type": "no_license", "max_line_length": 142, "num_lines": 69, "path": "/dependency_Hearst_patterns/DHP_matching.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from common import core_functions as cf\nfrom DHP import DHP_matching\ndef main():\n \"\"\"\n Goal: Match DHP and output the couples extracted (with sentence annotation) by a specific pattern into a corresponding output file\n inputs:\n -sem_pos_file: a file path for the semantically positive sentences (result of cleaning process)\n -sem_pos_processed_file: a file path for the semantically positive sentences after processing (result of java preprocessing step)\n -neg_samples_file: a file path for samples of negative sentences (same number of semantically positive sentences may be selected randomly)\n -sem_pos_processed_file: a file path for samples of negative sentences after processing (result of java preprocessing step)\n -output_files: a list of output files, each one corresponds for a specific DHP\n \"\"\"\n\n #inputs\n sem_pos_file = r\"..\\labeled_corpus\\Music_Test_Sem_Pos.txt\"\n sem_pos_processed_file = r\"..\\processed_corpus\\Music_Test_Sem_Pos_processed.txt\"\n neg_samples_file = r\"..\\labeled_corpus\\Music_Test_Neg_samples.txt\"\n neg_samples_processed_file = r\"..\\processed_corpus\\Music_Test_Neg_Samples_processed.txt\"\n output_files = [r\"..\\matching_DHP_subcorpora\\matching_such_as.txt\",\n r\"..\\matching_DHP_subcorpora\\matching_including.txt\",\n r\"..\\matching_DHP_subcorpora\\matching_is_a.txt\",\n r\"..\\matching_DHP_subcorpora\\matching_and_other.txt\",\n r\"..\\matching_DHP_subcorpora\\matching_especially.txt\",\n r\"..\\matching_DHP_subcorpora\\matching_such_NP_as.txt\"]\n\n patterns = [\"NP such as NP\", \"NP including NP\", \"NP is a NP\", \"NP and other NP\", \"NP especially NP\",\n \"such NP as NP\"]\n f0 = open(output_files[0], \"w\")\n f1 = open(output_files[1], \"w\")\n f2 = open(output_files[2], \"w\")\n f3 = open(output_files[3], \"w\")\n f4 = open(output_files[4], \"w\")\n f5 = open(output_files[5], \"w\")\n\n matching_DHP_and_write_into_files(sem_pos_file, sem_pos_processed_file, patterns, f0, f1, f2, f3, f4, f5, \"positive\")\n matching_DHP_and_write_into_files(neg_samples_file, neg_samples_processed_file, patterns, f0, f1, f2, f3, f4, f5, \"negative\",)\n f0.close()\n f1.close()\n f2.close()\n f3.close()\n f4.close()\n f5.close()\n\ndef matching_DHP_and_write_into_files(file, processed_file, patterns, f0, f1, f2, f3, f4, f5, label):\n sentences = open(file).readlines()\n i = 0\n for parsed_sentence in cf.get_sentences(processed_file):\n sentence = sentences[i]\n print i\n i += 1\n res = DHP_matching(parsed_sentence, sentence)\n if res[0]:\n if res[2][0] == patterns[0]:\n cf.write_sentence_matching_result_into_file(f0, res[3], res[1], label)\n elif res[2][0] == patterns[1]:\n cf.write_sentence_matching_result_into_file(f1, res[3], res[1], label)\n elif res[2][0] == patterns[2]:\n cf.write_sentence_matching_result_into_file(f2, res[3], res[1], label)\n elif res[2][0] == patterns[3]:\n cf.write_sentence_matching_result_into_file(f3, res[3], res[1], label)\n elif res[2][0] == patterns[4]:\n cf.write_sentence_matching_result_into_file(f4, res[3], res[1], label)\n elif res[2][0] == patterns[5]:\n cf.write_sentence_matching_result_into_file(f5, res[3], res[1], label)\n\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5188037157058716, "alphanum_fraction": 0.5258775353431702, "avg_line_length": 33.560508728027344, "blob_id": "8f8272e5f82831e01148cbb630c503564c8e849e", "content_id": "112ce93777bdfb5c7cf6b9d12e8b6d2b519ef31b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11168, "license_type": "no_license", "max_line_length": 124, "num_lines": 314, "path": "/common/core_functions.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from nltk import word_tokenize\r\nfrom nltk import pos_tag\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk import WordNetLemmatizer\r\nfrom common import HyperHypoCouple as HH\r\nimport spacy\r\nimport gzip\r\nimport shutil\r\nimport parsed_sentence as ps\r\nnlp = spacy.load('en_core_web_sm')\r\nfrom spacy.lang.en.stop_words import STOP_WORDS\r\nstopWords = set(STOP_WORDS)\r\nlemma = WordNetLemmatizer()\r\n\r\n\r\ndef write_sentence_matching_result_into_file(f, ann_sent, couples, label):\r\n f.write(\"<s>\\n\")\r\n f.write(ann_sent + \"\\n\")\r\n f.write(str(couples) + \"\\n\")\r\n f.write(\"Label: \" + label + \"\\n\")\r\n f.write(\"</s>\\n\")\r\n\r\ndef check_extracted_couples(extracted_couples, dataset_path):\r\n dataset_couples = get_couples(dataset_path)\r\n for extC in extracted_couples:\r\n if extC in dataset_couples:\r\n return True, \"dataset\"\r\n if check_wordNet_hypernymy(extC.hyponym, extC.hypernym):\r\n return True, \"wordNet\"\r\n lemma_extC = HH.HHCouple(HeadWithLemma(extC.hyponym), HeadWithLemma(extC.hypernym))\r\n if lemma_extC in dataset_couples:\r\n return True, \"dataset\"\r\n if check_wordNet_hypernymy(lemma_extC.hyponym, lemma_extC.hypernym):\r\n return True, \"wordNet\"\r\n if check_structural_hypernym_relation(extC.hyponym, extC.hypernym):\r\n return True, \"structural\"\r\n return False, \"None\"\r\n\r\ndef check_structural_hypernym_relation(hypo, hyper):\r\n if len(hypo.split()) == 1 or len(hypo) <= len(hyper) or not str(hypo + \" \").endswith(\" \" + hyper + \" \"):\r\n return False\r\n tokens = word_tokenize(hypo)\r\n tags = pos_tag(tokens)\r\n print tags\r\n hypo2 = str(hypo).replace(\" \" + hyper, \"\")\r\n hypos = hypo2.split()\r\n for tag in tags:\r\n if tag[0] == hypos[len(hypos) - 1] and (tag[1].__contains__(\"NN\") or tag[1].__contains__(\"JJ\")):\r\n return True\r\n return False\r\n\r\ndef check_wordNet_hypernymy(hypo, hyper):\r\n hypos = wn.synsets(hypo)\r\n if len(hypos) == 0:\r\n return False\r\n hypo = hypos[0]\r\n hypers = set([i for i in hypo.closure(lambda s:s.hypernyms())])\r\n hypers2 = wn.synsets(hyper)\r\n if len(hypers2) == 0:\r\n return False\r\n hyper = hypers2[0]\r\n if hyper in hypers:\r\n return True\r\n else:\r\n return False\r\n\r\ndef get_couples_from_string(couples_string): #[(sonatas, works), (symphonies, works)]\r\n \"\"\"\r\n get a string of couples and return them as list of HH couples\r\n :param couples_string: the string representing a list of couples\r\n :return: HH couples list\r\n \"\"\"\r\n couples = []\r\n couples_temp = couples_string.replace(\"[\", \"\").replace(\"]\", \"\").split(\"),\")\r\n for co in couples_temp:\r\n hypo, hyper = str(co).replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\r\n hh = HH.HHCouple(hypo.strip(), hyper.strip())\r\n couples.append(hh)\r\n return couples\r\n\r\ndef get_result_sentences(result_file):\r\n \"\"\"\r\n Returns all the content of a matched corpus file\r\n :param result_file: the processed corpus file (.gz)\r\n :return: the next sentence result (yield)\r\n \"\"\"\r\n sent = ps.parsed_sentence()\r\n # Read all the sentences in the file\r\n with open(result_file, 'r') as f_in:\r\n i = 0\r\n ann_sent = \"\"\r\n couples = []\r\n label = \"\"\r\n predicted = \"\"\r\n predicted_by = \"\"\r\n for line in f_in:\r\n line = line.decode('ISO-8859-2')\r\n # Ignore start and end of doc\r\n if '<s>' in line:\r\n i += 1\r\n continue\r\n # End of sentence\r\n elif '</s>' in line:\r\n yield ann_sent, couples, label, predicted, predicted_by\r\n i = 0\r\n ann_sent = \"\"\r\n couples = []\r\n label = \"\"\r\n predicted = \"\"\r\n predicted_by = \"\"\r\n else:\r\n if i == 1:\r\n ann_sent = line\r\n elif i == 2:\r\n couples = get_couples_from_string(line)\r\n elif i == 3:\r\n label = line.split(\":\")[1].strip()\r\n elif i == 4:\r\n predicted = line.split(\":\")[1].strip()\r\n elif i == 5:\r\n predicted_by = line.split(\":\")[1].strip()\r\n i += 1\r\n \r\ndef get_sentences(corpus_file):\r\n \"\"\"\r\n Returns all the (content) sentences in a processed corpus file\r\n :param corpus_file: the processed corpus file (may be compressed or not)\r\n :return: the next sentence (yield)\r\n \"\"\"\r\n sent = ps.parsed_sentence()\r\n # Read all the sentences in the file\r\n if str(corpus_file).endswith(\".gz\"):\r\n f_in = gzip.open(corpus_file, 'r')\r\n elif str(corpus_file).endswith(\".txt\"):\r\n f_in = open(corpus_file, 'r')\r\n else:\r\n print \"wrong input file.\"\r\n # with gzip.open(corpus_file, 'r') as f_in:\r\n s = []\r\n isNP = False\r\n is_root = False\r\n root = \"\"\r\n ri = 0\r\n np = \"\"\r\n np_indexes = []\r\n for line in f_in:\r\n line = line.decode('ISO-8859-2')\r\n # Ignore start and end of doc\r\n if '<text' in line or '</Text' in line or '<s>' in line:\r\n continue\r\n # End of sentence\r\n elif '</s>' in line:\r\n yield sent\r\n s = []\r\n isNP = False\r\n is_root = False\r\n root = \"\"\r\n ri = 0\r\n np = \"\"\r\n np_indexes = []\r\n sent = ps.parsed_sentence()\r\n elif '<NP>' in line:\r\n isNP = True\r\n elif '</NP>' in line:\r\n isNP = False\r\n sent.add_NP(np.strip(), root, ri, min(np_indexes), max(np_indexes))\r\n np = \"\"\r\n np_indexes = []\r\n elif '<root>' in line:\r\n is_root = True\r\n elif '</root>' in line:\r\n is_root = False\r\n else:\r\n try:\r\n word, lemma, pos, index, parent, parent_index, dep, type = line.split(\"\\t\")\r\n if is_root:\r\n root = word\r\n ri = int(index)\r\n if isNP:\r\n np_indexes.append(int(index))\r\n np = np + \" \" + word\r\n sent.add_word(word, lemma, pos, int(index), parent, int(parent_index), dep, type.strip())\r\n # One of the items is a space - ignore this token\r\n except Exception, e:\r\n print str(e)\r\n continue\r\n\r\ndef remove_first_occurrences_stopwords(text):\r\n \"\"\"\r\n :param text: text string\r\n :return: the text after removing the first occurrences of stop words in the text\r\n \"\"\"\r\n if text == \"\":\r\n return text\r\n words = text.split()\r\n if words[0] in stopWords:\r\n text = str(\" \" + text + \" \").replace(\" \" + words[0] + \" \", \"\").strip()\r\n return remove_first_occurrences_stopwords(text)\r\n else:\r\n return text\r\n\r\ndef noun_phrase_chunker(sentence):\r\n \"\"\"\r\n :param sentence: a sentence string\r\n :return: a list of sentence noun phrases\r\n \"\"\"\r\n nps = []\r\n sentParsing = nlp(sentence.decode(\"ascii\", \"ignore\"))\r\n for chunk in sentParsing.noun_chunks:\r\n np = chunk.text.lower().encode(\"ascii\", \"ignore\")\r\n np = remove_first_occurrences_stopwords(np)\r\n nps.append(np)\r\n return nps\r\n\r\n\r\ndef label_sentence(sentence, couples, min_gap = 1, max_gap = 10):\r\n \"\"\"\r\n :param sentence: a sentence string\r\n :param couples: list of dataset HH-couples\r\n :param min_gap: the minimum gap between the index of occurrence of hypernym and hyponym (default = 1)\r\n :param max_gap: the maximum gap between the index of occurrence of hypernym and hyponym (default = 7)\r\n :return: tuple (occur boolean, annotated sentence), the occur boolean is true if any of the couple occur at the sentence\r\n \"\"\"\r\n nps = noun_phrase_chunker(sentence)\r\n nps.sort(key=lambda s: len(s), reverse=True)\r\n sentence1 = \" \" + sentence\r\n for np in nps:\r\n np_ann = str(np).replace(\" \", \"_\") + \"_np\"\r\n sentence1 = sentence1.lower().replace(\" \" + np + \" \", \" \" + np_ann + \" \")\r\n for hh in couples:\r\n hypo = hh.hyponym\r\n hyper = hh.hypernym\r\n if hypo.lower() in nps and hyper.lower() in nps:\r\n hypo_np = str(hypo).replace(\" \", \"_\") + \"_np\"\r\n hypo2 = hypo_np.replace(\"_np\", \"_hypo\")\r\n sentence2 = str(sentence1).replace(\" \" + hypo_np + \" \", \" \" + hypo2 + \" \")\r\n hyper_np = str(hyper).replace(\" \", \"_\") + \"_np\"\r\n hyper2 = hyper_np.replace(\"_np\", \"_hyper\")\r\n sentence3 = str(sentence2).replace(\" \" + hyper_np + \" \", \" \" + hyper2 + \" \")\r\n hypoIndexes = get_indexes(sentence3, hypo2)\r\n hyperIndexes = get_indexes(sentence3, hyper2)\r\n for index1 in hypoIndexes:\r\n for index2 in hyperIndexes:\r\n if abs(index2 - index1) > min_gap and abs(index2 - index1) <= max_gap:\r\n for np in nps:\r\n np_ann = str(np).replace(\" \", \"_\") + \"_np\"\r\n sentence3 = sentence3.replace(\" \" + np_ann + \" \", \" \" + np + \" \")\r\n return True, sentence3.strip()\r\n return False, sentence.strip()\r\n\r\ndef get_indexes(sentence, token):\r\n \"\"\"\r\n :param sentence: a string sentence\r\n :param token: a string token (such as word)\r\n :return: a list of all indexes where the token occurs in the sentence\r\n \"\"\"\r\n tokens = word_tokenize(sentence)\r\n indexes = []\r\n while True:\r\n try:\r\n ind = tokens.index(token)\r\n indexes.append(ind)\r\n tokens[ind] = \"_\"\r\n except:\r\n break\r\n return indexes\r\n\r\ndef get_couples(datasetPath):\r\n \"\"\"\r\n :param datasetPath: dataset file path (dataset format --> hyponym\\thypernym\\n)\r\n :return: return a list of dataset HH-couples\r\n \"\"\"\r\n couples = []\r\n with open(datasetPath, \"rb\") as f:\r\n for line in f:\r\n hypo, hyper = line.split(\"\\t\")\r\n hh = HH.HHCouple(hypo.strip(), hyper.strip())\r\n couples.append(hh)\r\n f.close()\r\n return couples\r\n\r\ndef compressFile(output_file):\r\n with open(output_file, 'rb') as f_in, gzip.open(output_file + '.gz', 'wb') as f_out:\r\n shutil.copyfileobj(f_in, f_out)\r\n\r\ndef HeadWithLemma(couple_term):\r\n if len(str(couple_term).split(\" \")) == 1:\r\n try:\r\n hyper = lemma.lemmatize(couple_term)\r\n except:\r\n print \"exception\"\r\n return hyper\r\n nn = 0\r\n text = word_tokenize(couple_term)\r\n tags = pos_tag(text)\r\n allTags = [tag[1] for tag in tags]\r\n ConjFlag = False\r\n if \"CC\" in allTags:\r\n ConjFlag = True\r\n i = 0\r\n word = \"\"\r\n for tag in tags:\r\n if str(tag[1]).__eq__(\"IN\") or (ConjFlag and str(tag[1]).__eq__(\",\")) or (ConjFlag and str(tag[1]).__eq__(\"CC\")):\r\n break\r\n if str(tag[1]).__contains__(\"NN\"):\r\n word = tag[0]\r\n nn += 1\r\n try:\r\n word = lemma.lemmatize(word)\r\n except:\r\n print \"exception\"\r\n if word == \"\":\r\n return couple_term\r\n return word\r\n\r\n" }, { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.7910447716712952, "avg_line_length": 66, "blob_id": "04947ef9e3087358bb53e165f3c1bbe08449a377", "content_id": "fba66971676f59d9a507fc4858111dace524c94e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 66, "num_lines": 1, "path": "/datasets/readme.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "Tab space hyponym hypernym seperated file (hyponym \\t hypernym \\n)\n" }, { "alpha_fraction": 0.6845386624336243, "alphanum_fraction": 0.6845386624336243, "avg_line_length": 35.45454406738281, "blob_id": "5fbb46c5ca3faedd9fab18c52a512867dabb2d0c", "content_id": "fd070e7359642819086f1208b9aeceb93b2ae048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 127, "num_lines": 22, "path": "/dependency_Hearst_patterns/evaluate.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nfrom common import evaluation as ev\n\ndef main():\n \"\"\"\n Goal: evaluate precision, recall, F-measure of DHP\n inputs:\n -res_files_directory: a directory path for the DHP matching result files\n -sem_pos_labeled_corpus: a file path for the semantically positive labeled sentences (all positive used to evaluate recall)\n \"\"\"\n res_files_directory = r\"..\\matching_DHP_subcorpora\"\n sem_pos_labeled_corpus = r\"..\\labeled_corpus\\Music_Test_Sem_Pos.txt\"\n\n precision, recall, f_measure = ev.evaluate(res_files_directory, sem_pos_labeled_corpus)\n print \"DHP evaluation:\"\n print \"precision : \" + str(precision)\n print \"recall : \" + str(recall)\n print \"F-measure : \" + str(f_measure)\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7989130616188049, "alphanum_fraction": 0.8097826242446899, "avg_line_length": 60.33333206176758, "blob_id": "3bc7f719b3362a2c235e920cfc9effcfc07c96dc", "content_id": "67f344006d4a53d1d5228733360af3d1fd411085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 184, "license_type": "no_license", "max_line_length": 82, "num_lines": 3, "path": "/corpus_labeling/readme.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "This pickage consists of two processes:\n 1- label a corpus between positive and negative sentences;\n 2- clean the positive sentences by removing non semantically positive sentences.\n" }, { "alpha_fraction": 0.5634328126907349, "alphanum_fraction": 0.5634328126907349, "avg_line_length": 29.647058486938477, "blob_id": "9e86022724c726ed3e084c764903fcb16dd9b9ba", "content_id": "970f52840266cee7b34e193119d82aeaf95bd14f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 80, "num_lines": 17, "path": "/common/HyperHypoCouple.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "class HHCouple:\r\n def __init__(self, hypo, hyper):\r\n self.hypernym = hyper\r\n self.hyponym = hypo\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n return \"(\" + self.hyponym + \", \" + self.hypernym + \")\"\r\n\r\n def __eq__(self, other):\r\n return self.hypernym == other.hypernym and self.hyponym == other.hyponym\r\n\r\n def __ne__(self, other):\r\n \"\"\"Override the default Unequal behavior\"\"\"\r\n return self.hypernym != other.hypernym or self.hyponym != other.hyponym" }, { "alpha_fraction": 0.8295454382896423, "alphanum_fraction": 0.8295454382896423, "avg_line_length": 87, "blob_id": "d168e12ce1407e8f7ed4ca2ab8ea410bba0f8236", "content_id": "b75adc035c50c6c5fc25572fa97c492db069b502", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "no_license", "max_line_length": 87, "num_lines": 1, "path": "/common/readme.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "This is a commong package that contain common classes and functions for other packages.\n" }, { "alpha_fraction": 0.7944492697715759, "alphanum_fraction": 0.8005203604698181, "avg_line_length": 59.578948974609375, "blob_id": "490f7642d2424bc09104cd41f8fb42ba509e6051", "content_id": "e4f8da2af1f9439c2c52e85059787643c05f4969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 191, "num_lines": 19, "path": "/README.md", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "# SequentialHearstPatterns\nA new formalization of Hearst's patterns as dependency patterns (DHP), and a mining method to learn sequential Hearst's patterns (SHP).\n\n# Requirements\nWe are using python 2.7 (Anaconda framework)\n\nWe are using nltk: pip install nltk\n\nWe are using spacy: pip install spacy\n\nTake care to change the input of each process (such as paths for input and output files).\n\n# Modules Structure\n\n* M1 corpus labeling (corpus_labeling\\sentence_labeling_pos_neg.py): module to label sentences of a corpus into positive and negative sentences.\n* M2 corpus cleaning (corpus_labeling\\cleaning_pos_labeled_sentences.py: module to remove non-semantically positive sentences.\n* M3 DHP matching (dependency_Hearst_patterns\\DHP_matching.py): module to match DHP on positive and negative sentences and identify HH couples.\n* M4 extracted couples validation (dependency_Hearst_patterns\\extracted_couples_validation.py): module to validate the extracted couples using a given dataset, WordNet, and structural method.\n* M5 evaluate DHP (dependency_Hearst_patterns\\evaluate.py): module to evalaute DHP by measuring precision, recall, and F-measure.\n\n\n" }, { "alpha_fraction": 0.5947940945625305, "alphanum_fraction": 0.5998445749282837, "avg_line_length": 37.599998474121094, "blob_id": "593aab72be355199101fe50543aaaafd3a2a3787", "content_id": "96deedca355e83071559dc9212c500e3d2cb59d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2574, "license_type": "no_license", "max_line_length": 114, "num_lines": 65, "path": "/corpus_labeling/sentence_labeling_pos_neg.py", "repo_name": "AhmadIssaAlaa/SequentialHearstPatterns", "src_encoding": "UTF-8", "text": "from nltk import word_tokenize\r\nfrom nltk import WordNetLemmatizer\r\nfrom spacy.lang.en import English\r\nnlp = English()\r\nnlp.add_pipe(nlp.create_pipe('sentencizer'))\r\nfrom spacy.lang.en.stop_words import STOP_WORDS\r\nstopWords = set(STOP_WORDS)\r\nlemma = WordNetLemmatizer()\r\nfrom common import core_functions as cf\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Goal: Take a list of corpus text files and label the sentences as positive and negative according to a dataset\r\n after filtering the sentences that contains number of tokens above N\r\n inputs:\r\n -corpusFilesInput: A list of paths for corpus text files\r\n -posSentOutputFile: an output file path for positive labeled sentences\r\n -negSentOutputFile: an output file path for negative labeled sentences\r\n -datasetFilePath: a dataset file path\r\n -minTokens: minimum number of tokens in a sentence\r\n -maxTokens: maximum number of tokens in a sentence\r\n \"\"\"\r\n #inputs\r\n corpusFilesInput = [r\"E:\\SemEvalData\\SemEval18-Task9\\corpuses\\2B_music_bioreviews_tokenized_Training.txt\",\r\n r\"E:\\SemEvalData\\SemEval18-Task9\\corpuses\\2B_music_bioreviews_tokenized_Testing.txt\"]\r\n posSentOutputFile = r\"..\\labeled_corpus\\Music_Pos.txt\"\r\n negSentOutputFile = r\"..\\labeled_corpus\\Music_Neg.txt\"\r\n datasetFilePath = r\"..\\datasets\\Music.txt\"\r\n minTokens = 5\r\n maxTokens = 50\r\n\r\n #get dataset couples\r\n couples = cf.get_couples(datasetFilePath)\r\n\r\n #open output files\r\n ofp = open(posSentOutputFile, \"wb\")\r\n ofn = open(negSentOutputFile, \"wb\")\r\n\r\n #process each corpus file\r\n for cFile in corpusFilesInput:\r\n with open(cFile, \"rb\") as f:\r\n i = 0\r\n for line in f:\r\n line = line.decode(\"ascii\", \"ignore\")\r\n i += 1\r\n print i\r\n sentences = nlp(line.decode(\"ascii\", \"ignore\"))\r\n for sentence in sentences.sents:\r\n sentence = sentence.string.strip()\r\n tokens = word_tokenize(sentence)\r\n if len(tokens) < minTokens or len(tokens) > maxTokens:\r\n continue\r\n else:\r\n label, resSent = cf.label_sentence(sentence, couples)\r\n if label:\r\n ofp.write(resSent.encode(\"ascii\", \"ignore\").strip()+\"\\n\")\r\n else:\r\n ofn.write(resSent.encode(\"ascii\", \"ignore\").strip()+\"\\n\")\r\n ofp.close()\r\n ofn.close()\r\n f.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" } ]
16
mhshohag-ice/IoT-Based-Cold-Storage-Management-System
https://github.com/mhshohag-ice/IoT-Based-Cold-Storage-Management-System
f88bcc50856d44768c72c35f0db5e78271a063a2
0607c62c1d9d6044866f45e032c70867c90a5000
47ff1cd641366c0d3f51f074834dd7f27e4fd539
refs/heads/master
2021-08-19T14:10:57.378500
2017-11-26T15:33:26
2017-11-26T15:33:26
110,701,890
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6479647755622864, "alphanum_fraction": 0.6941694021224976, "avg_line_length": 21.674999237060547, "blob_id": "2ba98fa1ec6c115cda444baba081845b8403ed2d", "content_id": "d50694c92ec74d91d8a1d6197e191f9632b8e329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 909, "license_type": "no_license", "max_line_length": 95, "num_lines": 40, "path": "/ColdStorage.py", "repo_name": "mhshohag-ice/IoT-Based-Cold-Storage-Management-System", "src_encoding": "UTF-8", "text": "\nimport sys\nimport RPi.GPIO as GPIO\nimport time\nimport Adafruit_DHT\nimport urllib2\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\nMotor1A = 16\nMotor1B = 18\nMotor1E = 22\nGPIO.setup(Motor1A,GPIO.OUT)\nGPIO.setup(Motor1B,GPIO.OUT)\nGPIO.setup(Motor1E,GPIO.OUT)\n\nmyAPI = \"NSWUGLOJIDCEKSUE\"\nmyDelay = 15\nbaseURL = 'https://api.thingspeak.com/update?api_key=%s' % myAPI\nLight = 1000\n\nGPIO.setup(11,GPIO.IN)\nGasPin = 11\nDHT_1 = 11\nDHT_2 = 4\nFireDetection = 10\nwhile True:\n FireDetection = GPIO.input(GasPin)\n hum , temp = Adafruit_DHT.read_retry(DHT_1,DHT_2)\n f = urllib2.urlopen(baseURL +\"&field1=%s&field2=%s&field3=%s\" % (temp, hum, FireDetection))\n print(temp)\n print(hum)\n print(FireDetection)\n if temp>30:\n GPIO.output(Motor1A,GPIO.HIGH)\n GPIO.output(Motor1B,GPIO.LOW)\n GPIO.output(Motor1E,GPIO.HIGH)\n else:\n GPIO.output(Motor1E,GPIO.LOW)\nGPIO.cleanup();\n\n" }, { "alpha_fraction": 0.7325102686882019, "alphanum_fraction": 0.7325102686882019, "avg_line_length": 59.75, "blob_id": "4676d9a504abb54d9f942923fa20ceeb0eedb736", "content_id": "ae1dbd689ac940fe1267943cdc6c111bb5efd91b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 243, "license_type": "no_license", "max_line_length": 114, "num_lines": 4, "path": "/ReadMe.txt", "repo_name": "mhshohag-ice/IoT-Based-Cold-Storage-Management-System", "src_encoding": "UTF-8", "text": "This is a project based on paper from INTERNATIONAL JOURNAL OF RECENT TRENDS IN ENGINEERING & RESEARCH (IJRTER) by\nMr. Deepak Venkatesh, Ms. Megha Tatti, Ms. Prithvi G Hardikar, Mr. Syed Saqlain Ahmed, Mr. Sharavana K \ntitled by\n\"Cold Storage Management System for Farmers based on IOT\"\n" } ]
2
mikelaughton/reimagined-fiesta
https://github.com/mikelaughton/reimagined-fiesta
4350d0721d0d267e5cb38026028fce6aaccd2ec8
9dcdf0cfe583b66b5a6e56d6a5f381aec730e604
c7e403c7fb6cd6f5e5a980247afd72ccc154ed45
refs/heads/master
2021-01-20T13:56:18.408112
2017-03-16T21:58:48
2017-03-16T21:58:48
82,724,036
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7425650358200073, "alphanum_fraction": 0.7458178400993347, "avg_line_length": 32.625, "blob_id": "97998bf460574f085f501f2b4b6620a07a5bb872", "content_id": "1e5709bdfbb581e2279fc09705fec8388b17d183", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4304, "license_type": "no_license", "max_line_length": 169, "num_lines": 128, "path": "/reminders/views.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse, HttpResponseForbidden\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import generic\nfrom django.contrib.auth import authenticate, login\n#For @login_required\nfrom django.contrib.auth.decorators import login_required\n#For UserCreationForm\nfrom django.contrib.auth.forms import UserCreationForm\n#Because you can't decorate classes...\nfrom django.utils.decorators import method_decorator\n# Create your views here.\n\nfrom .models import *\nfrom reminders.forms import *\n\n#Reverse lazy to stop there being a circular import error.\n@method_decorator(login_required, name='dispatch')\nclass IndexView(generic.ListView):\n\ttemplate_name = 'reminders/index.html'\n\tcontext_object_name = 'tasks'\n\tnext = reverse_lazy(\"adulting:index\")\n\n\tdef get_queryset(self):\n\t\treturn Task.objects.filter(user=self.request.user)\n\n@method_decorator(login_required,name='dispatch')\nclass MasonryView(generic.ListView):\n\ttemplate_name = 'reminders/index_masonry.html'\n\tcontext_object_name = 'tasks'\n\tnext = reverse_lazy(\"adulting:index\")\n\tform_media = TaskForm()\n\tform_media = str(form_media.media)\n\tdef get_context_data(self,**kwargs):\n\t\t#Ask yourself the question, is this necessary?\n\t\t#Add in the form media for the datepicker widget\n\t\t#The 'create reminder' button depends on the media you supply to the form - if you change the form, you want the button to still work, because it's a pain in the arse.\n\t\tcontext = super(MasonryView,self).get_context_data(**kwargs)\n\t\tcontext['form']=str(self.form_media)\n\t\treturn context\n\tdef get_queryset(self):\n\t\treturn Task.objects.filter(user=self.request.user).order_by('-entry_date')\n\nclass RegisterView(generic.edit.CreateView):\n\ttemplate_name = 'reminders/register.html'\n\tform_class = UserCreationForm\n\tsuccess_url = '/'\n\nclass TaskDetailView(generic.DetailView):\n\tmodel = Task\n\n@method_decorator(login_required,name='dispatch')\nclass TaskDeleteView(generic.edit.DeleteView):\n\t#def ajax to send Json instead\n\tmodel = Task\n\tsuccess_url = reverse_lazy(\"reminders:index\")\n\n#Ajax response mixin.\nclass AJAXMixin(object):\n\tdef form_invalid(self,form):\n\t\tresponse = super(AJAXMixin,self).form_invalid(form)\n\t\tif self.request.is_ajax:\n\t\t\treturn JsonResponse(form.errors,status=400)\n\t\telse:\n\t\t\treturn response\n\t\t\t\n\tdef form_valid(self,form):\n\t\t#Redirects to success_url normally\n\t\tresponse = super(AJAXMixin,self).form_valid(form)\n\t\tif self.request.is_ajax():\n\t\t\t#Let the view object query the object's PK.\n\t\t\tdata = { 'pk': self.object.pk, }\n\t\t\treturn JsonResponse(data)\n\t\telse:\n\t\t\t#Defined elsewhere\n\t\t\treturn response\n\n@method_decorator(login_required, name='dispatch')\nclass TaskCreateView(AJAXMixin,generic.edit.CreateView):\n\tmodel = Task\n\ttemplate_name_suffix = '_create'\n\tsuccess_url = '/'\n\tform_class = TaskForm\n\n\tdef form_valid(self,form):\n\t\tform.instance.user = self.request.user\n\t\treturn super(TaskCreateView,self).form_valid(form)\n\n@login_required\ndef TaskCreateAjaxView(request):\n\tif request.is_ajax():\n\t\tform = TaskForm()\n\t\t#Pass the form to the JSON so it can be dynamically rendered.\n\t\tdata = { 'status':'200', 'form':form.as_p() }\n\t\treturn JsonResponse(data)\n\telse:\n\t\treturn HttpResponseForbidden(\"Maybe you meant to go to <a href='{0}'>{0}</a>?\".format(reverse_lazy(\"reminders:create\")))\n\nclass PerformanceCreateView(generic.edit.CreateView):\n\tmodel = Performance\n\tfields = ['perf_date']\n\ttemplate_name_suffix = '_update_form'\n\t\n\tdef get_context_data(self,**kwargs):\n\t\tcontext = super(PerformanceChangeView,self).get_context_data(**kwargs)\n\t\treturn context\n\ndef PerformView(request,task_id):\n\tthe_task = get_object_or_404(Task,pk=task_id)\n\tif request.method == \"POST\":\n\t\tnew_perf = Performance(perf_date=datetime.now(),task=the_task)\n\t\tif request.is_ajax:\n\t\t\tdata = {}\n\t\t\ttry:\n\t\t\t\tnew_perf.save()\n\t\t\t\tdata[\"message\"]=\"Success\"\n\t\t\t\tdata[\"is_countdown\"] = the_task.countdown\n\t\t\t\treturn JsonResponse(data)\n\t\t\texcept Exception:\n\t\t\t\tdata[\"message\"]=\"Failure\"\n\t\t\t\treturn JsonResponse(data)\n\t\telse:\n\t\t\treturn HTTPResponseRedirect(reverse(\"adulting:index\"))\n\tif request.method == \"GET\":\n\t\tform = PerformanceForm\n\t\textra_context = {}\n\t\textra_context['form'] = form\n\t\treturn render(request,\"reminders/performance_update_form.html\",extra_context)\n" }, { "alpha_fraction": 0.5146299600601196, "alphanum_fraction": 0.585197925567627, "avg_line_length": 28.049999237060547, "blob_id": "81975dc641062ddc42c8e545dc865ed95048986b", "content_id": "0d13ccad070712914c96c995537b869d45d15c1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 187, "num_lines": 20, "path": "/reminders/migrations/0007_auto_20170225_0041.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-02-25 00:41\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reminders', '0006_task_colour'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='task',\n name='colour',\n field=models.CharField(choices=[('#F44336', 'red'), ('#4CAF50', 'green'), ('#FFEB3B', 'yellow'), ('#2196F3', 'blue')], default='#F44336', max_length=7, verbose_name='Colour'),\n ),\n ]\n" }, { "alpha_fraction": 0.7343441247940063, "alphanum_fraction": 0.7343441247940063, "avg_line_length": 38.921051025390625, "blob_id": "5a7b66dbd17dd1da5f55e5596d5a2334e84b9120", "content_id": "46c183bc29bc9fd89baef66f8d7399ffc66ce9ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 101, "num_lines": 38, "path": "/TODO.txt", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "Refactoring:\n\t* Modularise jQuery (how in all sweet Mary I do not know) (done)\n\t* Split settings (done)\n\t* Change secret keys (done)\n\t* Shift all CSS to external CSS files (done)\n\t* Self host bootstrap and all that - move invocation to bottom, it's REALLY doing your loadtime hell\n\t* Add reasonable fallbacks if Google Fonts isn't available\n\t* Image-ify logo - will lower load on Pi\n\nFeatures:\n\t* Better theme-picker widget (you may need to build it yourself)\n\t* Date/time widget (pick one) (done)\n\t\t* It does however appear slightly off screen on mobile.\n\t* Archive task\n\t\t* Archive task without leaving page\n\t* Create task (done)\n\t\t* Create task without leaving page (done)\n\t\t* Make it toggle - create button inserted by jquery doesn't work.\n\t* Perform task with leaving page (done-ish)\n\t* Create recurring tasks\n\t* Notification when something needs doing\n\t\t* By email\n\t\t* Stretch: by text\n\t* Secret tasks (by extension, one will not need email)\n\t\t* Padlock, tap to swivel around with password field displayed? (Stupid, no idea what the task is.)\n\t\t* Button in top-right saying \"show all\"?\n\t\t* Button in top-right saying \"show archived\" to disambiguate?\n\t\t* Custom tag filter - \"Secret\" becomes \"s****t\"?\n\t* Show something more exciting in the history\n\t\t* Attach details? (GenericForeignKeyRelation)\n\t* Perform task within task detail page\n\nAppearance:\n\t* Get your own icons, the Unicode jazz ain't cutting it\n\t* Change create button\n\nRandom:\n\t* Get GitHub to stop calling my thing a javascript repo when it isn't.\n" }, { "alpha_fraction": 0.572139322757721, "alphanum_fraction": 0.5887230634689331, "avg_line_length": 30.736841201782227, "blob_id": "01de6c30ce5cb11ef5e7bc7e991bac0ba5f17182", "content_id": "fdffad6239bc4b0258001429b0965476024cb9d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 603, "license_type": "no_license", "max_line_length": 79, "num_lines": 19, "path": "/reminders/templates/reminders/performance_update_form.html", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "{% extends \"reminders/base.html\" %}\n{% load static i18n %}\n{% get_media_prefix as media_prefix %}\n{% block extra_head %}\n{{ form.media }}\n{% endblock %}\n{% block title %} Adulting: {{ task }} detail {% endblock %}\n{% block content %}\n\t\t<div class=\"row\" id=\"\">\n\t\t\t<div class=\"activity-single col-xs-12 col-sm-12 col-md-9\" id=\"1\">\n\t\t\t\t<h1 class=\"activity_title\">{{performance.task.label}}</h1>\n\t\t\t\t<form action=\"{% url 'reminders:perform' task_id=task.pk %}\" method=\"post\">\n\t\t\t\t\t{% csrf_token %}\n\t\t\t\t\t{{ form.as_p }}\n\t\t\t\t\t<input type=\"submit\" value=\"submit\">\n\t\t\t\t</form>\n\t\t\t</div>\n\t\t</div>\n{% endblock %}\n" }, { "alpha_fraction": 0.7051070928573608, "alphanum_fraction": 0.7116968631744385, "avg_line_length": 24.29166603088379, "blob_id": "b7280caba059119a3cd893c4fbe20a09ca40eee1", "content_id": "224302151e688f31d4c48cdf5cfefbddaca59ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 108, "num_lines": 24, "path": "/reminders/forms.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django import forms\nfrom bootstrap3_datetime.widgets import DateTimePicker\nfrom reminders.models import *\n\nmy_attrs = {\n\t\"inline\":True,\n\t\"sideBySide\":True,\n\t\"todayBtn\":\"linked\",\n\t\"bootstrap_version\":3,\n\t\"usel10n\":True,\n\t\"format\":\"YYYY-M-D H:mm\",\n}\n\nclass TaskForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Task\n\t\texclude = ['user']\n\t\twidgets = { 'deadline': DateTimePicker(options=my_attrs), 'entry_date': DateTimePicker(options=my_attrs) }\n\t\t\nclass PerformanceForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Performance\n\t\texclude = ('task',)\n\t\twidgets = { 'perf_date': DateTimePicker(options=my_attrs) }\n" }, { "alpha_fraction": 0.7184586524963379, "alphanum_fraction": 0.7259167432785034, "avg_line_length": 34.75555419921875, "blob_id": "33ea94f7e0c8dbff141df37495f4fa039709daea", "content_id": "de0dd5d237581ff2e635c00c054ad8bde73f4329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1609, "license_type": "no_license", "max_line_length": 104, "num_lines": 45, "path": "/reminders/models.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.conf import settings\nfrom datetime import datetime\ndef _(arg):\n\t#Dummy _ function pending i18n, which you've forgotten.\n\treturn arg\n\n# Create your models here.\n\nclass Icon(models.Model):\n\t#Sysadmin only model, so no need to isolate by user.\n\ticon = models.ImageField(upload_to='uploads/%Y/%m/%d/')\n\tdescription = models.CharField(_('Description'),blank=True,null=True,max_length=100)\n\tdef __str__(self):\n\t\treturn self.description\n\nCOLOUR_CHOICES = (\n\t('red','red'),\n\t('yellow','yellow'),\n\t('blue','blue'),\n\t('green','green'),\n)\n\nclass Task(models.Model):\n\tuser = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,blank=True,null=True)\n\tlabel = models.CharField(max_length=200)\n\tentry_date = models.DateTimeField(_('Date entered'))\n\tdeadline = models.DateTimeField(_('Deadline'),blank=True,null=True)\n\ticon = models.ForeignKey(Icon,null=True,blank=True)\n\tis_secret = models.BooleanField(_('Secret?'))\n\t#Mebs change the widget on this.\n\tcountdown = models.BooleanField(_('Countdown'),help_text=_('Countdown or countup?'),default=True)\n\tcolour = models.CharField(_('Colour'),choices=COLOUR_CHOICES,default=COLOUR_CHOICES[0][0],max_length=7)\n\tdef last_performed(self):\n\t\treturn self.performance_set.order_by('-perf_date')[0].perf_date\n\tdef __str__(self):\n\t\treturn self.label\n\nclass Performance(models.Model):\n\t#For when a task is performed.\n\ttask = models.ForeignKey(Task)\n\tperf_date = models.DateTimeField(_('Date performed'))\n\tdef __str__(self):\n\t\treturn \"{}: {}\".format(self.task.pk,self.perf_date)\n" }, { "alpha_fraction": 0.657616913318634, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 46.35714340209961, "blob_id": "ab67fc7d670abc46f6d8ae00e449a8cc162b27f8", "content_id": "927578aa3b8dd77d572cc6a286ba1dc4f0f2f898", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 81, "num_lines": 14, "path": "/reminders/urls.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\napp_name = \"reminders\"\nurlpatterns = [\n\turl(r'^$',views.MasonryView.as_view(),name='index'),\n\t#url(r'^masonry$',views.MasonryView.as_view(),name='masonry'),\n\turl(r'^register/?$', views.RegisterView.as_view(),name='register'),\n\turl(r'^detail/(?P<pk>[0-9]*)/?$', views.TaskDetailView.as_view(),name='detail'),\n\turl(r'^perform/(?P<task_id>[0-9]*)/?$', views.PerformView, name='perform'),\n\turl(r'^create/?$',views.TaskCreateView.as_view(),name='create'),\n\turl(r'^create_task_ajax/?', views.TaskCreateAjaxView, name='create_ajax'),\n\turl(r'^delete/(?P<pk>[0-9]*)/?',views.TaskDeleteView.as_view(),name='delete'),\n]\n" }, { "alpha_fraction": 0.5139665007591248, "alphanum_fraction": 0.7206704020500183, "avg_line_length": 16.899999618530273, "blob_id": "7ffcba73a379c46fa24b59f80470acc565aeadd1", "content_id": "a0667588421e404294e0d4477ac2a752a6a8cd3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 179, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/requirements_common.txt", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "appdirs==1.4.0\nDjango==1.10.5\ndjango-bootstrap3-datetimepicker==2.2.3\nolefile==0.44\npackaging==16.8\nPillow==4.0.0\npkg-resources==0.0.0\npyparsing==2.1.10\npytz==2016.10\nsix==1.10.0\n" }, { "alpha_fraction": 0.6234177350997925, "alphanum_fraction": 0.6360759735107422, "avg_line_length": 30.600000381469727, "blob_id": "2b4568829aa2cbfddd2a6f0e41b078b12007db20", "content_id": "db56ff4134594a6d7109f3318af2aea07d824ff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/reminders/templatetags/remindertags.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django import template\nregister = template.Library()\n\[email protected](name='starout')\ndef starout(value):\n\t''' Expects 'string', returns 's****g' '''\n\tv_space_list = value.split(\" \")\n\tstar = lambda x: x[0] + \"*\"*len(x[1:-1]) + x[-1]\n\tstarred_out = [star(v) for v in v_space_list]\n\treturn \" \".join(starred_out)\n" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.6146245002746582, "avg_line_length": 24.299999237060547, "blob_id": "56d6ecb92eb82c9e788673acf05226de122d793f", "content_id": "1cbd37b47bf68afc1b3987ed465cf36c924ac59a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 102, "num_lines": 20, "path": "/reminders/migrations/0003_icon_description.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-02-18 19:58\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reminders', '0002_auto_20170218_1956'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='icon',\n name='description',\n field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Description'),\n ),\n ]\n" }, { "alpha_fraction": 0.646379828453064, "alphanum_fraction": 0.6505771279335022, "avg_line_length": 34.62616729736328, "blob_id": "54f09f704a38e4b8936f247712f2367811d91da9", "content_id": "94429fc5f52acd59e89a3faa098a8a603f19af25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3812, "license_type": "no_license", "max_line_length": 148, "num_lines": 107, "path": "/static/js/asynctasks.js", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "function getCookie(name) {\n\t//Pull a cookie. Thanks Django docs!\n\tvar cookieValue = null;\n\tif (document.cookie && document.cookie !== '') {\n\t\tvar cookies = document.cookie.split(';');\n\t\tfor (var i = 0; i < cookies.length; i++) {\n\t\t\tvar cookie = jQuery.trim(cookies[i]);\n\t\t\t// Does this cookie string begin with the name we want?\n\t\t\tif (cookie.substring(0, name.length + 1) === (name + '=')) {\n\t\t\t cookieValue = decodeURIComponent(cookie.substring(name.length + 1));\n\t\t\t break;\n\t\t\t}\n\t\t}\n\t}\n\treturn cookieValue;\n}\nvar csrftoken = getCookie('csrftoken');\n\nvar resettimer = function(obj){\n\t//Find the nearest time element and reset it to zero. \n\tdiv = $( obj ).closest(\"div.activity-group\");\n\tconsole.log( div );\n\ttime_el = div.children(\"time\").first();\n\ttime_el.text(\"0 {% trans 'seconds' noop %}\");\n };\n\tdismiss_task = function(obj){ return false; }\n\tchange_timer = function(obj,new_text){ \n\ttimer = $(obj).parents(\".activity-group\").find(\"time\").first();\n\tconsole.log(timer);\n\ttimer.text(new_text);\n}\n\n//You could make this work too.\nperform_task = function(e){\n\t//Send AJAX request to relevant URL.\n\turl = e.target.href;\n\ttask_no = e.target.href.split(\"perform/\")[1];\n\tconsole.log(\"Task no:\" + task_no);\n\tdata = {the_task:task_no, \n\t\tcsrfmiddlewaretoken: csrftoken };\n\tt = $.post(url,data);\n\t\treturn t;\n\t\t}\nperform_task_handler = function(req,e){\n\t//e is the event, req is the ajax obj.\n\tJSON = req.responseJSON;\n\trtext = req.responseJSON.message;\n\triscd = req.responseJSON.is_countdown;\n\tnew_text = \"0 seconds\";\n\tif(!riscd & rtext==\"Success\"){ change_timer(e.target,new_text); $(e.target).addClass(\"btn-success\");$(e.target).removeClass(\"btn-default\"); }\n\tif(!riscd & rtext!=\"Success\"){ $(e.target).addClass(\"btn-warning\");$(e.target).removeClass(\"btn-default\"); console.log(\"AJAX says: \" + rtext); }\n\tif(riscd) { dismiss_task(e.target); }\n}\n\n//You could make this more generic but let's run with it for now.\ndisable_buttons = function(){\n\t//Tee up the buttons. Class should really be called perform_task.\t\n\t$(\"a.perform_action\").click(function(e){\n\t\te.preventDefault();\n\t\t$(e.target).removeClass(\"btn-success\");\n\t\t$(e.target).addClass(\"btn-default\");\n\t\tp = perform_task(e);\n\t\tp.done(function(){perform_task_handler(p,e);});\n\t\t});\n\t\t\n}\n\ninit_done_buttons = function(){\n\tdisable_buttons();\n}\nvar task_create_clone = $('<div class=\"activity-group well task-create\">' + \"<h1>Create reminder</h1>\" +\n'<p id=\"create_button\" style=\"font-size:400%;\"><a href=\"/create\" class=\"create_task\">+</a></p>' +'</div>');\nrestore_task_create = function(){\n\tconsole.log(task_create_clone);\n\t$(\".task-create\").replaceWith(task_create_clone);\n\t$grid.masonry('layout');\n}\n\nbuild_create_form = function(obj,get_url,post_url){\n\tform_req = $.getJSON(get_url,function(resp){\n\t\tform = $( \"<form/>\",{action:post_url,method:\"POST\"});\n\t\th1 = $(\"<h1>Create</h1>\");\n\t\t$(form).append(h1);\n\t\t$(form).append($(resp.form));\n\t\t$(form).append($('<input type=\"submit\" value=\"Submit\" class=\"btn btn-success\"> <a class=\"btn btn-warning\" id=\"cancel_create_button\">Cancel</a>'));\n\t\t$grid.on('click','#cancel_create_button',restore_task_create);\n\t\t$(form).append('<input type=\"hidden\" value=\"'+csrftoken+'\" name=\"csrfmiddlewaretoken\">');\n\t\t$(obj).append(form);\n\t\t//I do not know why this works, but it does.\n\t\t$grid.masonry();\n\t\t});\n}\n\ninit_create_buttons = function(action_url) { \n\t$(\"a.create_task\").click(function(e){\n\t\tconsole.log(\"Clicked.\");\n\t\te.preventDefault();\n\t\tdiv = $(\"<div/>\",{'class':'grid-item','id':'newForm'});\n\t\tform_container = $(\"<div/>\",{'class':'well activity-group task-create',});\n\t\t$(div).append(form_container)\n\t\tform = build_create_form(form_container,action_url,e.target.href);\n\t\t$div = $(div);\n\t\t$(\".task-create\").first().replaceWith(form_container);\n\t\t//$(\"#create_button\").hide();\n\t\t$grid.masonry('layout');\n\t});\n }\n" }, { "alpha_fraction": 0.5745097994804382, "alphanum_fraction": 0.615686297416687, "avg_line_length": 24.5, "blob_id": "df455072523ff3744e276ba49a7029c47d27fd16", "content_id": "2d273f844c8f113a5c9f607b1e370f3de2366a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 113, "num_lines": 20, "path": "/reminders/migrations/0005_task_countdown.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-02-23 21:58\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reminders', '0004_task_deadline'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='task',\n name='countdown',\n field=models.BooleanField(default=True, help_text='Countdown or countup?', verbose_name='Countdown'),\n ),\n ]\n" }, { "alpha_fraction": 0.5658436417579651, "alphanum_fraction": 0.6090534925460815, "avg_line_length": 23.299999237060547, "blob_id": "809e376d2e77e764cddedd94809957f38e202fe7", "content_id": "ac3fe2071524d8b7674772552d3b4b4933a3cc2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 87, "num_lines": 20, "path": "/reminders/migrations/0004_task_deadline.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-02-18 23:41\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reminders', '0003_icon_description'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='task',\n name='deadline',\n field=models.DateTimeField(blank=True, null=True, verbose_name='Deadline'),\n ),\n ]\n" }, { "alpha_fraction": 0.8100558519363403, "alphanum_fraction": 0.8100558519363403, "avg_line_length": 24.571428298950195, "blob_id": "441708e1e0fef498b97e779e5d3fdc7569959df5", "content_id": "0a0f658fc7dd7e5e4bc2031adba1bc742c7ba503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/reminders/admin.py", "repo_name": "mikelaughton/reimagined-fiesta", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom reminders.models import *\n\n# Register your models here.\nadmin.site.register(Icon)\nadmin.site.register(Task)\nadmin.site.register(Performance)\n" } ]
14
wavecrasher/-3-W2-questions-
https://github.com/wavecrasher/-3-W2-questions-
580081348f34272374f13659c27cf06c0e9a8bb5
a1ae9760f4f00965b0e64289601a983a4fb429b1
c6a2fbfa43d4221fc55e46c473a8a72d1bc329b3
refs/heads/master
2022-12-05T14:26:55.776383
2020-07-27T17:22:45
2020-07-27T17:22:45
282,966,851
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6683937907218933, "alphanum_fraction": 0.7098445892333984, "avg_line_length": 37.70000076293945, "blob_id": "4c30a5734df8ca9cc34838998c7b9163d851ba27", "content_id": "3268d2cace26878bea28929b70f382bdb1363daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 172, "num_lines": 10, "path": "/main.py", "repo_name": "wavecrasher/-3-W2-questions-", "src_encoding": "UTF-8", "text": "#Write a program that takes a number and tells the user if the number is greater than 10, less than 10, or equal to 10. Don't forget to convert the string into an integer. \n\nuser_num = int(input(\"enter enter a number: \"))\n\nif user_num > 10:\n print(\"Your number is greater than 10!\")\nelif user_num < 10:\n print(\"Your number is less than 10!\")\nelse:\n print(\"Your numbers equal to 10!\")" } ]
1
ananya299/Library-Management-System-Project
https://github.com/ananya299/Library-Management-System-Project
9ac9e9ad823cfb8cc293b51a6f7656e41d94176b
4824ed878ecfc085902a445abbfca646bad14df9
fdc5c9986d35da7ad95be64b15539ab146680952
refs/heads/master
2020-06-24T17:47:18.482223
2019-07-26T14:56:40
2019-07-26T14:56:40
199,034,937
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4646051526069641, "alphanum_fraction": 0.4717411994934082, "avg_line_length": 35.88070297241211, "blob_id": "ff44489fd51d0163e1e06c60e74a5861ff528bf2", "content_id": "c5512dd73dbbb649159eca069b08aace3ef30b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10510, "license_type": "no_license", "max_line_length": 86, "num_lines": 285, "path": "/Library Management System/methods.py", "repo_name": "ananya299/Library-Management-System-Project", "src_encoding": "UTF-8", "text": "from classes import *\nimport pickle\n \n# Function to add a book into the pickle file (Book_data).\ndef addBook(): \n l=[]\n b_name=input(\"Enter name of the book: \")\n quantity=int(input(\"Number of copies: \"))\n isbn=int(input(\"Enter ISBN number: \"))\n with open(\"Book_data.pkl\",'rb') as f:\n l=pickle.load(f)\n with open(\"Book_data.pkl\",'wb') as f:\n s=Book_details(b_name,quantity,isbn)\n l.append(s)\n pickle.dump(l,f)\n\n# Function to display all the books from Book_data.pkl.\ndef showBook():\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n i.display()\n\n# Function to add a student into the pickle file (student_data.pkl).\ndef addStudent():\n student_name=input(\"Enter student name: \")\n student_year=(input(\"Enter year of admission: 20\"))\n student_id=(input(\"Student id number: \"))\n student_branch=int(input(\"Select one-\\n1.CSE\\n2.EC\\n3.EX\\n4.ME\\n5.CE\\n\"))\n if student_branch==1:\n roll_no='0187'+'CS'+student_year+'1'+student_id\n if student_branch==2:\n roll_no='0187'+'EC'+student_year+'1'+student_id\n if student_branch==3:\n roll_no='0187'+'EX'+student_year+'1'+student_id\n if student_branch==4:\n roll_no='0187'+'ME'+student_year+'1'+student_id\n if student_branch==5:\n roll_no='0187'+'CE'+student_year+'1'+student_id\n with open(\"student_data.pkl\",'rb') as f1:\n l1=pickle.load(f1)\n for i in l1:\n if i.roll_no==roll_no:\n print(\"Student already exists\")\n return \n else:\n with open(\"student_data.pkl\",'wb') as f:\n s=Student(student_name,student_year,student_id,student_branch,roll_no)\n l1.append(s)\n pickle.dump(l1,f)\n \n# Function to display all the students from student_data.pkl.\ndef showStudents():\n with open(\"student_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n i.display()\n \n# Function to add a faculty into the pickle file (faculty_data.pkl). \ndef addFaculty():\n fname=input(\"Enter Faculty name: \")\n fid=int(input(\"Enter Faculty Id: \"))\n with open(\"faculty_data.pkl\",'rb') as f1:\n l1 = pickle.load(f1)\n for i in l1:\n if i.fid==fid:\n print(\"Faculty already exists\")\n else:\n with open(\"faculty_data.pkl\",'wb') as f:\n fobj=Faculty(fname,fid)\n l1.append(fobj)\n pickle.dump(l1,f)\n \n# Function to display all the faculties from faculty_data.pkl. \ndef showFaculties():\n with open(\"faculty_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n #obj.display()\n for i in obj:\n i.display()\n\n# Function to issue a book (Student).\ndef Book_issue_S(issued_books,issued_rolls):\n s_roll=input(\"Enter enrollment number: \")\n with open(\"student_data.pkl\",'rb') as f1:\n l1 = pickle.load(f1)\n for i in l1:\n if i.roll_no==s_roll:\n showBook()\n isb=int(input(\"Enter ISBN of required book: \"))\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.isbn==isb:\n if i.quantity==0:\n print(\"Copies exhausted\") \n issued_books.append(isb)\n issued_rolls.append(s_roll)\n i.quantity-=1\n with open(\"Book_data.pkl\",'wb') as f:\n pickle.dump(obj,f)\n break\n else:\n print(\"Student doesn't exist\")\n\n# Function to issue a book (Faculty).\ndef Book_issue_F(issued_fbooks,issued_fid):\n f_id=int(input(\"Enter Employee ID: \"))\n with open(\"faculty_data.pkl\",'rb') as f1:\n l1 = pickle.load(f1)\n for i in l1:\n if i.fid==f_id:\n showBook()\n isb=int(input(\"Enter ISBN of required book: \"))\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.isbn==isb:\n if i.quantity==0:\n print(\"Copies exhausted\")\n issued_fbooks.append(i.isbn)\n issued_fid.append(f_id)\n i.quantity-=1\n with open(\"Book_data.pkl\",'wb') as f:\n pickle.dump(obj,f)\n break\n else:\n print(\"Faculty doesn't exist\") \n\n#Function to return a book (Student).\ndef return_book_S(issued_books,issued_rolls):\n s_roll=input(\"Enter enrollment number: \")\n with open(\"student_data.pkl\",'rb') as f1:\n l1 = pickle.load(f1)\n for i in l1:\n if i.roll_no==s_roll:\n showBook()\n isb=int(input(\"Enter ISBN of book to be returned: \"))\n for i in issued_books:\n for j in issued_rolls:\n if i==isb and j==s_roll:\n issued_books.remove(i)\n issued_rolls.remove(j)\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for z in obj:\n if z.isbn==isb:\n z.quantity+=1\n with open(\"Book_data.pkl\",'wb') as f:\n pickle.dump(obj,f) \n break\n else:\n print(\"Student doesn't exist\")\n\n# Function to return a book (Faculty).\ndef return_book_F(issued_fbooks,issued_fid):\n eid=int(input(\"Enter Employee ID: \"))\n with open(\"faculty_data.pkl\",'rb') as f1:\n l1 = pickle.load(f1)\n for i in l1:\n if i.fid==eid:\n showBook()\n isb=int(input(\"Enter ISBN of book to be returned: \"))\n for i in issued_fbooks:\n for j in issued_fid:\n if i==isb and j==eid:\n issued_fbooks.remove(i)\n issued_fid.remove(j)\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for z in obj:\n if z.isbn==isb:\n z.quantity+=1\n with open(\"Book_data.pkl\",'wb') as f:\n pickle.dump(obj,f) \n break\n else:\n print(\"Faculty doesn't exist\")\n \n# Function to display number of books issued by a particular student. \ndef display_records_S(issued_books,issued_rolls):\n s_roll=input(\"Enter roll no.: \")\n with open(\"student_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.roll_no==s_roll:\n i.display()\n for z in range(0,len(issued_books)):\n for j in range(0,len(issued_rolls)):\n if issued_rolls[j]==s_roll:\n z=j\n print(f\"Book Issued (ISBN): {issued_books[z]}\")\n break\n break\n else:\n print(\"Not Found\")\n\n# Function to display number of books issued by a particular faculty.\ndef display_records_F(issued_fbooks,issued_fid):\n eid=int(input(\"Enter Emlpoyee ID: \"))\n with open(\"faculty_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.fid==eid:\n i.display()\n for z in range(0,len(issued_fbooks)):\n for j in range(0,len(issued_fid)):\n if issued_fid[j]==eid:\n z=j\n print(f\"Book Issued (ISBN): {issued_fbooks[z]}\")\n break\n break\n else:\n print(\"Not Found\")\n\n# Function to search a paricular student from student_data.pkl.\ndef search_S():\n s_roll=input(\"Enter roll no.: \")\n with open(\"student_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.roll_no==s_roll:\n print(\"Student Found\")\n i.display()\n break\n else:\n print(\"Student not Found\")\n \n# Function to search a paricular faculty from faculty_data.pkl.\ndef search_F():\n eid=int(input(\"Enter Faculty ID: \"))\n with open(\"faculty_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.fid==eid:\n print(\"Faculty Found\")\n i.display()\n break\n else:\n print(\"Faculty not Found\")\n\n# Function to search a paricular book from Book_data.pkl.\ndef search_B():\n isb=int(input(\"Enter Book ISBN: \"))\n with open(\"Book_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.isbn==isb:\n print(\"Book Found\")\n i.display()\n break\n else:\n print(\"Book not Found\")\n \n# Function to remove a paricular student from student_data.pkl.\ndef remove_S():\n s_roll=input(\"Enter roll no.: \")\n with open(\"student_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.roll_no==s_roll:\n obj.remove(i)\n print(\"Student removed\")\n i.display()\n with open(\"student_data.pkl\",'wb') as f:\n pickle.dump(obj,f) \n break\n else:\n print(\"Student not Found\")\n \n# Function to remove a paricular faculty from faculty_data.pkl. \ndef remove_F():\n eid=int(input(\"Enter Employee ID: \"))\n with open(\"faculty_data.pkl\",'rb') as f:\n obj=pickle.load(f)\n for i in obj:\n if i.fid==eid:\n obj.remove(i)\n print(\"Faculty removed\")\n i.display()\n with open(\"faculty_data.pkl\",'wb') as f:\n pickle.dump(obj,f) \n break\n else:\n print(\"Faculty not Found\")" }, { "alpha_fraction": 0.6172972917556763, "alphanum_fraction": 0.6172972917556763, "avg_line_length": 30.931034088134766, "blob_id": "eefd30400ba7a2c8c5a89b2274b7511f34dbe9b1", "content_id": "a7f36ccd2fb78cdb74c41719f3516c1e42e9a024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 925, "license_type": "no_license", "max_line_length": 100, "num_lines": 29, "path": "/Library Management System/classes.py", "repo_name": "ananya299/Library-Management-System-Project", "src_encoding": "UTF-8", "text": "class Student:\n def __init__(self,student_name,student_year,student_id,student_branch,roll_no):\n self.student_name=student_name\n self.student_year=student_year\n self.student_id=student_id\n self.student_branch=student_branch\n self.roll_no=roll_no\n def display(self):\n print(f\"Name: {self.student_name},\\nRoll no: {self.roll_no}\\n**\")\n \nclass Book_details:\n def __init__(self,b_name,quantity,isbn):\n self.b_name=b_name\n self.quantity=quantity\n self.isbn=isbn\n def display(self):\n print(f\"Title: {self.b_name},\\n Number of copies: {self.quantity},\\n ISBN: {self.isbn}\\n**\")\n\nclass Faculty:\n def __init__(self,fname,fid):\n self.fname=fname\n self.fid=fid\n def display(self):\n print(f\"Faculty Name: {self.fname},\\nFaculty ID: {self.fid}\\n**\")\n \nissued_books=[]\nissued_rolls=[]\nissued_fbooks=[]\nissued_fid=[]" }, { "alpha_fraction": 0.3753872215747833, "alphanum_fraction": 0.3979160785675049, "avg_line_length": 29.8869571685791, "blob_id": "b3121bad455f8371ae85fd45706cc9335432b3cf", "content_id": "3e289641ac816f4c33cedb1f295aa6d26b268178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3551, "license_type": "no_license", "max_line_length": 64, "num_lines": 115, "path": "/Library Management System/frontend.py", "repo_name": "ananya299/Library-Management-System-Project", "src_encoding": "UTF-8", "text": "from methods import *\nimport getpass\nimport sys\n\ndef main():\n done=False\n while done==False:\n print(\"\"\" ======LIBRARY MANAGEMENT SYSTEM=======\n 1. ADMIN\n 2. FACULTY\n 3. STUDENT\n 4. Exit\n \"\"\")\n ch=int(input(\"Enter choice: \"))\n if ch==1:\n print(\"Enter Password:\")\n p = getpass.getpass()\n if p == '123':\n print('Welcome User\\n')\n choice=int(input(\"\"\"Select any one: \n1. Add Student\n2. Display all Students\n3. Add Faculty\n4. Display all Faculties\n5. Add Book\n6. Show all Books\n7. Remove Student\n8. Remove Faculty\n9. Search Student \n10. Search Faculty\n11. Search Book\n12. Issue Book Student\n13. Issue Book Faculty\n14. Return Book Student\n15. Return Book Faculty\n16. Display Student Records\n17. Display Faculty Records\n18. EXIT\n**************************\n**************************\n\"\"\"))\n if choice==1:\n addStudent()\n elif choice==2:\n showStudents()\n elif choice==3:\n addFaculty()\n elif choice==4:\n showFaculties()\n elif choice==5:\n addBook()\n elif choice==6:\n showBook()\n elif choice==7:\n remove_S()\n elif choice==8:\n remove_F()\n elif choice==9:\n search_S()\n elif choice==10:\n search_F()\n elif choice==11:\n search_B()\n elif choice==12:\n Book_issue_S(issued_books,issued_rolls)\n elif choice==13:\n Book_issue_F(issued_fbooks,issued_fid)\n elif choice==14:\n return_book_S(issued_books,issued_rolls)\n elif choice==15:\n return_book_F(issued_fbooks,issued_fid)\n elif choice==16:\n display_records_S(issued_books,issued_rolls)\n elif choice==17:\n display_records_F(issued_fbooks,issued_fid)\n else:\n sys.exit(0)\n else: \n print('Invalid Password. Try Again!')\n \n elif ch==2:\n choice=int(input(\"\"\"Select any one:\n 1. Display Records\n 2. Show Available Books\n 3. Search Book\n 4. EXIT\n \"\"\"))\n if choice==1:\n display_records_F(issued_fbooks,issued_fid)\n elif choice==2:\n showBook()\n elif choice==3:\n search_B()\n else:\n sys.exit(0)\n \n elif ch==3:\n choice=int(input(\"\"\"Select any one: \n 1. Display Records\n 2. Show Available Books\n 3. Search Book \n 4. EXIT\n \"\"\"))\n if choice==1:\n display_records_S(issued_books,issued_rolls)\n elif choice==2:\n showBook()\n elif choice==3:\n search_B()\n else:\n sys.exit(0) \n\n else:\n sys.exit(0)\nmain()" } ]
3
tomelf/CS530-Projects-Scientific-Visualization
https://github.com/tomelf/CS530-Projects-Scientific-Visualization
99cd9e675c8d14e8488923e1f2e71fb47f130cea
cc6e28d0b776f34ccd8757c894fab8ee5a6bebea
7c7de9d01b405716863cf18d6c44c26959a31677
refs/heads/master
2020-03-08T01:25:55.548567
2018-04-03T01:09:22
2018-04-03T01:09:22
127,830,023
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5494841933250427, "alphanum_fraction": 0.5999355316162109, "avg_line_length": 33.85393142700195, "blob_id": "3fd3c94b97b8ab59a5cc202b94f8fa065561d240", "content_id": "b82a9ab1efc4dfe2d2ff19566a869c4ca4c8ea45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6204, "license_type": "no_license", "max_line_length": 100, "num_lines": 178, "path": "/project5/src/three_planes.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\n\ndef print_camera_settings(obj, event):\n global ren\n # ---------------------------------------------------------------\n # Print out the current settings of the camera\n # ---------------------------------------------------------------\n camera = ren.GetActiveCamera()\n print \"Camera settings:\"\n print \" * position: %s\" % (camera.GetPosition(),)\n print \" * focal point: %s\" % (camera.GetFocalPoint(),)\n print \" * up vector: %s\" % (camera.GetViewUp(),)\n print \" * clipping range: %s\" % (camera.GetClippingRange(),)\n \ndef after_print_camera_settings(obj, event):\n print \"\"\n\ndef Main():\n global ren, sliders, planes, planeCuts, origins\n # Create the RenderWindow, Renderer and both Actors\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.SetMultiSamples(0)\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n iren.RemoveObservers('RightButtonPressEvent')\n iren.AddObserver('RightButtonPressEvent', print_camera_settings, 1.0)\n iren.AddObserver('RightButtonPressEvent', after_print_camera_settings, -1.0)\n \n print \"data: %s %s\" % (sys.argv[1], sys.argv[2])\n \n cfdreader = vtk.vtkStructuredPointsReader()\n cfdreader.SetFileName(sys.argv[1])\n\n # setup wing data\n wingReader = vtk.vtkUnstructuredGridReader()\n wingReader.SetFileName(sys.argv[2])\n wingReader.Update()\n wingMapper = vtk.vtkDataSetMapper()\n wingMapper.SetInputConnection(wingReader.GetOutputPort())\n wingActor = vtk.vtkActor()\n wingActor.SetMapper(wingMapper)\n wingActor.GetProperty().SetColor(.4, .4, .4)\n \n planes = [\n vtk.vtkPlane(),\n vtk.vtkPlane(),\n vtk.vtkPlane()\n ]\n \n planeCuts = [\n vtk.vtkCutter(),\n vtk.vtkCutter(),\n vtk.vtkCutter()\n ]\n \n normals = [\n [1, 0, 0],\n [1, 0, 0],\n [1, 0, 0]\n ]\n origins = [\n [20, 0, 0],\n [100, 0, 0],\n [190, 0, 0]\n ]\n \n sliders = [\n vtk.vtkSliderRepresentation2D(),\n vtk.vtkSliderRepresentation2D(),\n vtk.vtkSliderRepresentation2D()\n ]\n \n sliderWidgets = [\n vtk.vtkSliderWidget(),\n vtk.vtkSliderWidget(),\n vtk.vtkSliderWidget()\n ]\n \n bPlaneToActor = [True, True, True]\n bWingToActor = True\n \n datamin = 0\n datamax = 230\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(datamin, 0, 1, 1)\n dis = (datamax - datamin) / 7\n for i in range(0, 8):\n lut.AddHSVPoint(datamin + dis * i, 0.1 * i, 1, 1)\n \n for i in range(0, len(planes)):\n planes[i].SetOrigin(origins[i])\n planes[i].SetNormal(normals[i])\n planeCuts[i].SetInputConnection(cfdreader.GetOutputPort())\n planeCuts[i].SetCutFunction(planes[i])\n \n arrowSource = vtk.vtkArrowSource()\n arrowSource.SetTipLength(0.3)\n arrowSource.SetShaftRadius(0.001)\n \n vectorGlyph = vtk.vtkGlyph3D()\n vectorGlyph.SetInputConnection(0, planeCuts[i].GetOutputPort())\n vectorGlyph.SetInputConnection(1, arrowSource.GetOutputPort())\n vectorGlyph.ScalingOn()\n vectorGlyph.SetScaleModeToScaleByVector()\n vectorGlyph.SetScaleFactor(0.35)\n vectorGlyph.OrientOn()\n vectorGlyph.ClampingOff()\n vectorGlyph.SetVectorModeToUseVector()\n vectorGlyph.SetIndexModeToOff()\n \n cutMapper = vtk.vtkDataSetMapper()\n cutMapper.SetLookupTable(lut)\n cutMapper.SetScalarRange(vectorGlyph.GetRange())\n cutMapper.SetInputConnection(vectorGlyph.GetOutputPort())\n \n cutActor = vtk.vtkActor()\n cutActor.SetMapper(cutMapper)\n cutActor.GetProperty().SetOpacity(0.4);\n cutActor.GetProperty().SetColor(0, 1, 0)\n \n if bPlaneToActor[i]:\n ren.AddActor(cutActor)\n \n sliders[i].SetMinimumValue(-50)\n sliders[i].SetMaximumValue(230)\n sliders[i].SetValue(origins[i][0])\n sliders[i].SetTitleText(\"x-axis of plane %d\" % i)\n sliders[i].GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n sliders[i].GetPoint1Coordinate().SetValue(0.0, 1 - 0.1 * (i + 1))\n sliders[i].GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n sliders[i].GetPoint2Coordinate().SetValue(0.2, 1 - 0.1 * (i + 1))\n sliders[i].SetSliderLength(0.02)\n sliders[i].SetSliderWidth(0.03)\n sliders[i].SetEndCapLength(0.01)\n sliders[i].SetEndCapWidth(0.03)\n sliders[i].SetTubeWidth(0.005)\n sliders[i].SetLabelFormat(\"%3.0lf\")\n sliders[i].SetTitleHeight(0.02)\n sliders[i].SetLabelHeight(0.02)\n sliderWidgets[i].SetInteractor(iren)\n sliderWidgets[i].SetRepresentation(sliders[i])\n sliderWidgets[i].KeyPressActivationOff()\n sliderWidgets[i].SetAnimationModeToAnimate()\n sliderWidgets[i].SetEnabled(False)\n sliderWidgets[i].AddObserver(\"InteractionEvent\", sliderHandler)\n\n if bWingToActor:\n ren.AddActor(wingActor)\n \n ren.SetBackground(0, 0, 0)\n renWin.SetSize(1600, 900)\n ren.ResetCamera()\n ren.GetActiveCamera().SetClippingRange(203.2899494251721, 731.8103494457274)\n ren.GetActiveCamera().SetFocalPoint(118.72183980792761, 0.00012969970703125, 36.469017028808594)\n ren.GetActiveCamera().SetPosition(300.86018729049954, -5.765715551063601, 435.4418666873332)\n ren.GetActiveCamera().SetViewUp(-0.802117714199773, -0.005112780752923929, 0.5971440630533839)\n \n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\ndef sliderHandler(obj, event):\n global origins, sliders, planes, planeCuts\n \n for i in range(0, len(planes)):\n origins[i][0] = sliders[i].GetValue()\n planes[i].SetOrigin(origins[i])\n planeCuts[i].Update()\n\nif __name__ == \"__main__\":\n Main()\n" }, { "alpha_fraction": 0.606601893901825, "alphanum_fraction": 0.6617605090141296, "avg_line_length": 33.1365852355957, "blob_id": "c4dc9834ed9ad81dfa62d7947f0f1dd98f9b1aba", "content_id": "5f1ce991096360ca3ac6c518da4ebec600695993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6998, "license_type": "no_license", "max_line_length": 100, "num_lines": 205, "path": "/project5/src/combined.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\n\ndef isovalueSliderHandler(obj, event):\n global isovalue, contours\n isovalue = obj.GetRepresentation().GetValue()\n contours.SetValue(0, isovalue)\n print \"Change Isovalue: %f\" % isovalue\n\ndef print_camera_settings(obj, event):\n global ren\n # ---------------------------------------------------------------\n # Print out the current settings of the camera\n # ---------------------------------------------------------------\n camera = ren.GetActiveCamera()\n print \"Camera settings:\"\n print \" * position: %s\" % (camera.GetPosition(),)\n print \" * focal point: %s\" % (camera.GetFocalPoint(),)\n print \" * up vector: %s\" % (camera.GetViewUp(),)\n print \" * clipping range: %s\" % (camera.GetClippingRange(),)\n \ndef after_print_camera_settings(obj, event):\n print \"\"\n\ndef Main():\n global isovalue, contours, ren\n \n # Create the RenderWindow, Renderer and both Actors\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.SetMultiSamples(0)\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n iren.RemoveObservers('RightButtonPressEvent')\n iren.AddObserver('RightButtonPressEvent', print_camera_settings, 1.0)\n iren.AddObserver('RightButtonPressEvent', after_print_camera_settings, -1.0)\n\n print \"data: %s %s %s\" % (sys.argv[1], sys.argv[2], sys.argv[3])\n \n cfdreader = vtk.vtkStructuredPointsReader()\n cfdreader.SetFileName(sys.argv[1])\n \n # setup wing data\n wingReader = vtk.vtkUnstructuredGridReader()\n wingReader.SetFileName(sys.argv[3])\n wingReader.Update()\n wingMapper = vtk.vtkDataSetMapper()\n wingMapper.SetInputConnection(wingReader.GetOutputPort())\n wingActor = vtk.vtkActor()\n wingActor.SetMapper(wingMapper)\n wingActor.GetProperty().SetColor(.4, .4, .4)\n \n bRakesToActor = [True, True, False]\n bWingToActor = True\n \n datamin = 0\n datamax = 230\n \n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(datamin, 0, 1, 1)\n dis = float(datamax - datamin) / 7\n for i in range(0, 8):\n lut.AddHSVPoint(float(datamin + dis * i), 0.1 * i, 1, 1)\n \n colorBar = vtk.vtkScalarBarActor()\n colorBar.SetLookupTable(lut)\n colorBar.SetTitle(\"\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.05)\n colorBar.SetHeight(0.4)\n ren.AddActor(colorBar)\n \n rakes = [\n vtk.vtkLineSource(),\n vtk.vtkLineSource(),\n vtk.vtkLineSource()\n ]\n rakes[0].SetPoint1(-230, -230, 0)\n rakes[0].SetPoint2(230, 230, 0)\n rakes[0].SetResolution(50)\n \n rakes[1].SetPoint1(230, -230, 0)\n rakes[1].SetPoint2(-230, 230, 0)\n rakes[1].SetResolution(50)\n \n rakes[2].SetPoint1(0, -100, 10)\n rakes[2].SetPoint2(0, 100, 10)\n rakes[2].SetResolution(60)\n \n rakeColors = [\n [0, 1, 0],\n [0, 1, 0],\n [0, 1, 0],\n ]\n \n for i in range(0, len(rakes)):\n integ = vtk.vtkRungeKutta4()\n streamLine = vtk.vtkStreamLine()\n streamLine.SetInputConnection(cfdreader.GetOutputPort())\n streamLine.SetSourceConnection(rakes[i].GetOutputPort())\n streamLine.SetMaximumPropagationTime(50);\n streamLine.SetIntegrationStepLength(1);\n streamLine.SetStepLength(0.01);\n streamLine.SetIntegrationDirectionToForward();\n streamLine.SetIntegrator(integ)\n streamLine.SpeedScalarsOn()\n \n streamLineMapper = vtk.vtkPolyDataMapper()\n streamLineMapper.SetInputConnection(streamLine.GetOutputPort())\n streamLineMapper.SetLookupTable(lut)\n \n streamLineActor = vtk.vtkActor()\n streamLineActor.SetMapper(streamLineMapper)\n streamLineActor.GetProperty().SetColor(rakeColors[i])\n streamLineActor.GetProperty().SetOpacity(0.9);\n \n if bRakesToActor[i]:\n ren.AddActor(streamLineActor)\n \n if bWingToActor:\n ren.AddActor(wingActor)\n \n isoreader = vtk.vtkStructuredPointsReader()\n isoreader.SetFileName(sys.argv[2])\n isoreader.Update()\n \n# r = isoreader.GetOutput().GetScalarRange()\n# datamin = r[0]\n# datamax = r[1]\n# isovalue = (datamax+datamin)/2.0\n isovalue = 300\n datamin = 0\n datamax = 300\n \n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(isoreader.GetOutputPort());\n contours.ComputeNormalsOn()\n contours.SetValue(0, isovalue)\n \n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(datamin, 0, 1, 1)\n dis = (datamax - datamin) / 7\n for i in range(0, 8):\n lut.AddHSVPoint(datamin + dis * i, 0.1 * i, 1, 1)\n \n isoMapper = vtk.vtkPolyDataMapper()\n isoMapper.SetLookupTable(lut)\n isoMapper.SetInputConnection(contours.GetOutputPort())\n\n isoActor = vtk.vtkActor()\n isoActor.GetProperty().SetRepresentationToWireframe()\n isoActor.SetMapper(isoMapper)\n isoActor.GetProperty().SetOpacity(0.08);\n\n ren.AddActor(wingActor)\n ren.AddActor(isoActor)\n \n ren.SetBackground(0, 0, 0)\n renWin.SetSize(1600, 900)\n \n isovalueSlider = vtk.vtkSliderRepresentation2D()\n isovalueSlider.SetMinimumValue(datamin)\n isovalueSlider.SetMaximumValue(datamax)\n isovalueSlider.SetValue(isovalue)\n isovalueSlider.SetTitleText(\"isovalue\")\n isovalueSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint1Coordinate().SetValue(0.0, 0.4)\n isovalueSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint2Coordinate().SetValue(0.2, 0.4)\n isovalueSlider.SetSliderLength(0.02)\n isovalueSlider.SetSliderWidth(0.03)\n isovalueSlider.SetEndCapLength(0.01)\n isovalueSlider.SetEndCapWidth(0.03)\n isovalueSlider.SetTubeWidth(0.005)\n isovalueSlider.SetLabelFormat(\"%3.0lf\")\n isovalueSlider.SetTitleHeight(0.02)\n isovalueSlider.SetLabelHeight(0.02)\n SliderWidget1 = vtk.vtkSliderWidget()\n SliderWidget1.SetInteractor(iren)\n SliderWidget1.SetRepresentation(isovalueSlider)\n SliderWidget1.KeyPressActivationOff()\n SliderWidget1.SetAnimationModeToAnimate()\n SliderWidget1.SetEnabled(False)\n SliderWidget1.AddObserver(\"InteractionEvent\", isovalueSliderHandler)\n \n ren.ResetCamera()\n ren.GetActiveCamera().SetClippingRange(417.55784439078775, 1491.5763714138557)\n ren.GetActiveCamera().SetFocalPoint(118.72183980792761, 0.00012969970703125, 36.469017028808594)\n ren.GetActiveCamera().SetPosition(680.0192576650034, 16.65944318371372, 790.5781258299678)\n ren.GetActiveCamera().SetViewUp(-0.802117714199773, -0.005112780752923929, 0.5971440630533839)\n\n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__ == \"__main__\":\n Main()\n" }, { "alpha_fraction": 0.6374008655548096, "alphanum_fraction": 0.6682413816452026, "avg_line_length": 32.897281646728516, "blob_id": "e55a4434b2eeb5687ff39be5bb5caf8bdb415653", "content_id": "c6f1265fabc1335411245b082d867dae26cef95f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11219, "license_type": "no_license", "max_line_length": 160, "num_lines": 331, "path": "/project3/src/isogm.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport re\nimport vtk\nfrom vtk import vtkScalarBarActor\n\ncontinuousType = \"HSV\"\ndiscreateType = \"HSV\"\ncontinuousSize = 0\ndiscreateSize = 0\ncontinuousData = []\ndiscreteData = []\n\ndef loadContinuousFile(filename):\n global continuousType, continuousSize\n f = open(filename, \"r\")\n first_flag = True\n for line in f:\n line = line.lstrip()\n if line[0] != '#' and first_flag:\n first_flag = False\n \n m = re.match('(\\d+)[ \\t]*([A-Za-z]+)', line)\n if m:\n continuousSize = m.group(1)\n continuousType = m.group(2)\n \n elif line[0] != '#' and not first_flag:\n m = re.match('([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)', line)\n if m:\n continuousData.append([m.group(1), m.group(2), m.group(3), m.group(4)])\n f.close()\n\ndef loadDiscreteFile(filename):\n global discreateType, discreateSize\n f = open(filename, \"r\")\n first_flag = True\n for line in f:\n line = line.lstrip()\n if line[0] != '#' and first_flag:\n first_flag = False\n m = re.match('(\\d+)[ \\t]*([A-Za-z]+)', line)\n if m:\n discreateSize = m.group(1)\n discreateType = m.group(2)\n \n elif line[0] != '#' and not first_flag:\n m = re.match('([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)', line)\n if m:\n discreteData.append([m.group(1), m.group(2), m.group(3), m.group(4)])\n f.close()\n\ndef loadIsovalueFile(filename):\n f = open(filename, \"r\")\n isovalues = []\n for line in f:\n isovalues.append(float(line.strip()))\n f.close()\n \n return isovalues\n\ndef updateUI(mode=0):\n lut.RemoveAllPoints()\n print \"[Color Map]\"\n if mode==0: #continuous\n print continuousType\n if continuousType == \"HSV\":\n lut.SetColorSpaceToHSV()\n for p in continuousData:\n print p\n lut.AddHSVPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,0)\n elif continuousType == \"RGB\":\n lut.SetColorSpaceToRGB()\n for p in continuousData:\n print p\n lut.AddRGBPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,0)\n elif mode==1: #discrete\n print discreateType\n if discreateType == \"HSV\":\n lut.SetColorSpaceToHSV()\n for p in discreteData:\n print p\n lut.AddHSVPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,1)\n elif discreateType == \"RGB\":\n lut.SetColorSpaceToRGB()\n for p in discreteData:\n print p\n lut.AddRGBPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,1)\n\ndef showContinuous():\n updateUI(0)\n renWin.Render()\n\ndef showDiscrete():\n updateUI(1)\n renWin.Render()\n\ndef clipXSliderHandler(obj, event):\n global clipX\n clipX = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipYSliderHandler(obj, event):\n global clipY\n clipY = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipZSliderHandler(obj, event):\n global clipZ\n clipZ = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef updateCT():\n global clipX, clipY, clipZ, plane1, plane2, plane3, planeSource1, planeSource2, planeSource3, clipper1, clipper2, clipper3\n planeSource1.SetOrigin(clipX,0,0)\n planeSource2.SetOrigin(0,clipY,0)\n planeSource3.SetOrigin(0,0,clipZ)\n plane1.SetOrigin(planeSource1.GetOrigin())\n plane2.SetOrigin(planeSource2.GetOrigin())\n plane3.SetOrigin(planeSource3.GetOrigin())\n clipper1.Update()\n clipper2.Update()\n clipper3.Update()\n print \"clip (%f,%f,%f)\" %(clipX,clipY,clipZ)\n\ndef Main():\n global isovalues, contours, planeSource1, planeSource2, planeSource3, plane1, plane2, plane3, clipper1, clipper2, clipper3, clipX, clipY, clipZ, lut, renWin\n\n print \"data: %s\" % sys.argv[1]\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(sys.argv[1])\n reader.Update()\n \n print \"gradientmag: %s\" % sys.argv[2]\n gmreader = vtk.vtkStructuredPointsReader()\n gmreader.SetFileName(sys.argv[2])\n gmreader.Update()\n \n clipX = 0\n clipY = 0\n clipZ = 0\n \n r = reader.GetOutput().GetScalarRange()\n datamin = r[0]\n datamax = r[1]\n \n print \"isoval: %s\" % sys.argv[3]\n isovalues = loadIsovalueFile(sys.argv[3])\n \n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(0,0,1,1)\n \n for i in range(4, len(sys.argv)):\n if sys.argv[i] == \"--cmap\":\n print \"colors file: %s\" % sys.argv[i+1]\n loadContinuousFile(sys.argv[i+1])\n updateUI(0)\n if sys.argv[i] == \"--clip\":\n print \"clip (%s,%s,%s)\" % (sys.argv[i+1],sys.argv[i+2],sys.argv[i+3])\n clipX = float(sys.argv[i+1])\n clipY = float(sys.argv[i+2])\n clipZ = float(sys.argv[i+3])\n \n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(reader.GetOutputPort());\n contours.ComputeNormalsOn()\n \n for i in range(0, len(isovalues)):\n contours.SetValue(i, isovalues[i])\n\n planeSource1 = vtk.vtkPlaneSource()\n planeSource1.SetNormal(1,0,0)\n planeSource1.SetOrigin(clipX,0,0)\n planeSource2 = vtk.vtkPlaneSource()\n planeSource2.SetNormal(0,1,0)\n planeSource2.SetOrigin(0,clipY,0)\n planeSource3 = vtk.vtkPlaneSource()\n planeSource3.SetNormal(0,0,1)\n planeSource3.SetOrigin(0,0,clipZ)\n\n plane1 = vtk.vtkPlane()\n plane1.SetNormal(planeSource1.GetNormal())\n plane1.SetOrigin(planeSource1.GetOrigin())\n clipper1 = vtk.vtkClipPolyData()\n clipper1.SetClipFunction(plane1)\n clipper1.SetInputConnection(contours.GetOutputPort())\n clipper1.Update()\n\n plane2 = vtk.vtkPlane()\n plane2.SetNormal(planeSource2.GetNormal())\n plane2.SetOrigin(planeSource2.GetOrigin())\n clipper2 = vtk.vtkClipPolyData()\n clipper2.SetClipFunction(plane2)\n clipper2.SetInputConnection(clipper1.GetOutputPort())\n clipper2.Update()\n\n plane3 = vtk.vtkPlane()\n plane3.SetNormal(planeSource3.GetNormal())\n plane3.SetOrigin(planeSource3.GetOrigin())\n clipper3 = vtk.vtkClipPolyData()\n clipper3.SetClipFunction(plane3)\n clipper3.SetInputConnection(clipper2.GetOutputPort())\n clipper3.Update()\n\n probeFilter = vtk.vtkProbeFilter()\n probeFilter.SetInputConnection(0, clipper3.GetOutputPort())\n probeFilter.SetInputConnection(1, gmreader.GetOutputPort())\n probeFilter.Update()\n\n clipperMapper = vtk.vtkPolyDataMapper()\n clipperMapper.SetLookupTable(lut)\n clipperMapper.SetInputConnection(probeFilter.GetOutputPort())\n clipperMapper.SetScalarRange(probeFilter.GetOutput().GetScalarRange())\n\n colorBar = vtkScalarBarActor()\n colorBar.SetLookupTable(clipperMapper.GetLookupTable())\n colorBar.SetTitle(\"gradient magnitude\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.1)\n colorBar.SetHeight(0.7)\n\n clipperActor=vtk.vtkActor()\n clipperActor.GetProperty().SetRepresentationToWireframe()\n clipperActor.SetMapper(clipperMapper)\n\n backFaces = vtk.vtkProperty()\n backFaces.SetSpecular(0)\n backFaces.SetDiffuse(0)\n backFaces.SetAmbient(0)\n backFaces.SetAmbientColor(1,0,0)\n clipperActor.SetBackfaceProperty(backFaces)\n\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n ren.AddActor(clipperActor)\n ren.AddActor(colorBar)\n ren.ResetCamera()\n ren.SetBackground(0.2,0.3,0.4)\n ren.ResetCameraClippingRange()\n renWin.SetSize(1200, 600)\n\n clipXSlider = vtk.vtkSliderRepresentation2D()\n clipXSlider.SetMinimumValue(0)\n clipXSlider.SetMaximumValue(300)\n clipXSlider.SetValue(clipX)\n clipXSlider.SetTitleText(\"X\")\n clipXSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint1Coordinate().SetValue(0.0, 0.3)\n clipXSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint2Coordinate().SetValue(0.2, 0.3)\n clipXSlider.SetSliderLength(0.02)\n clipXSlider.SetSliderWidth(0.03)\n clipXSlider.SetEndCapLength(0.01)\n clipXSlider.SetEndCapWidth(0.03)\n clipXSlider.SetTubeWidth(0.005)\n clipXSlider.SetLabelFormat(\"%1.2lf\")\n clipXSlider.SetTitleHeight(0.02)\n clipXSlider.SetLabelHeight(0.02)\n SliderWidget2 = vtk.vtkSliderWidget()\n SliderWidget2.SetInteractor(iren)\n SliderWidget2.SetRepresentation(clipXSlider)\n SliderWidget2.KeyPressActivationOff()\n SliderWidget2.SetAnimationModeToAnimate()\n SliderWidget2.SetEnabled(True)\n SliderWidget2.AddObserver(\"InteractionEvent\", clipXSliderHandler)\n\n clipYSlider = vtk.vtkSliderRepresentation2D()\n clipYSlider.SetMinimumValue(0)\n clipYSlider.SetMaximumValue(300)\n clipYSlider.SetValue(clipY)\n clipYSlider.SetTitleText(\"Y\")\n clipYSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint1Coordinate().SetValue(0.0, 0.2)\n clipYSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint2Coordinate().SetValue(0.2, 0.2)\n clipYSlider.SetSliderLength(0.02)\n clipYSlider.SetSliderWidth(0.03)\n clipYSlider.SetEndCapLength(0.01)\n clipYSlider.SetEndCapWidth(0.03)\n clipYSlider.SetTubeWidth(0.005)\n clipYSlider.SetLabelFormat(\"%1.2lf\")\n clipYSlider.SetTitleHeight(0.02)\n clipYSlider.SetLabelHeight(0.02)\n SliderWidget3 = vtk.vtkSliderWidget()\n SliderWidget3.SetInteractor(iren)\n SliderWidget3.SetRepresentation(clipYSlider)\n SliderWidget3.KeyPressActivationOff()\n SliderWidget3.SetAnimationModeToAnimate()\n SliderWidget3.SetEnabled(True)\n SliderWidget3.AddObserver(\"InteractionEvent\", clipYSliderHandler)\n\n clipZSlider = vtk.vtkSliderRepresentation2D()\n clipZSlider.SetMinimumValue(0)\n clipZSlider.SetMaximumValue(300)\n clipZSlider.SetValue(clipZ)\n clipZSlider.SetTitleText(\"Z\")\n clipZSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint1Coordinate().SetValue(0.0, 0.1)\n clipZSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint2Coordinate().SetValue(0.2, 0.1)\n clipZSlider.SetSliderLength(0.02)\n clipZSlider.SetSliderWidth(0.03)\n clipZSlider.SetEndCapLength(0.01)\n clipZSlider.SetEndCapWidth(0.03)\n clipZSlider.SetTubeWidth(0.005)\n clipZSlider.SetLabelFormat(\"%1.2lf\")\n clipZSlider.SetTitleHeight(0.02)\n clipZSlider.SetLabelHeight(0.02)\n SliderWidget4 = vtk.vtkSliderWidget()\n SliderWidget4.SetInteractor(iren)\n SliderWidget4.SetRepresentation(clipZSlider)\n SliderWidget4.KeyPressActivationOff()\n SliderWidget4.SetAnimationModeToAnimate()\n SliderWidget4.SetEnabled(True)\n SliderWidget4.AddObserver(\"InteractionEvent\", clipZSliderHandler)\n\n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.5953470468521118, "alphanum_fraction": 0.6136537194252014, "avg_line_length": 29.852941513061523, "blob_id": "3ba50f06c43bc576c5e1fd469d1f4b570cee91d3", "content_id": "48acf35457732fd84a8436be41ace6cc65a0c520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5244, "license_type": "no_license", "max_line_length": 139, "num_lines": 170, "path": "/project2/src/colormap.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport re\nimport vtk\nfrom vtk import vtkScalarBarActor, vtkTextProperty\n\nimport Tkinter\nfrom vtk.tk.vtkTkRenderWindowInteractor import vtkTkRenderWindowInteractor\n\ncontinuousType = \"HSV\"\ndiscreateType = \"HSV\"\ncontinuousSize = 0\ndiscreateSize = 0\ncontinuousData = []\ndiscreteData = []\n\ndef loadContinuousFile(filename):\n global continuousType, continuousSize\n f = open(filename, \"r\")\n first_flag = True\n for line in f:\n line = line.lstrip()\n if line[0] != '#' and first_flag:\n first_flag = False\n\n m = re.match('(\\d+)[ \\t]*([A-Za-z]+)', line)\n if m:\n continuousSize = m.group(1)\n continuousType = m.group(2)\n\n elif line[0] != '#' and not first_flag:\n m = re.match('([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)', line)\n if m:\n continuousData.append([m.group(1), m.group(2), m.group(3), m.group(4)])\n f.close()\n\ndef loadDiscreteFile(filename):\n global discreateType, discreateSize\n f = open(filename, \"r\")\n first_flag = True\n for line in f:\n line = line.lstrip()\n if line[0] != '#' and first_flag:\n first_flag = False\n m = re.match('(\\d+)[ \\t]*([A-Za-z]+)', line)\n if m:\n discreateSize = m.group(1)\n discreateType = m.group(2)\n\n elif line[0] != '#' and not first_flag:\n m = re.match('([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)', line)\n if m:\n discreteData.append([m.group(1), m.group(2), m.group(3), m.group(4)])\n f.close()\n\ndef updateUI(mode=0):\n lut.RemoveAllPoints()\n\n if mode==0: #continuous\n print continuousType\n if continuousType == \"HSV\":\n for p in continuousData:\n print p\n lut.AddHSVPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,0)\n elif continuousType == \"RGB\":\n for p in continuousData:\n print p\n lut.AddRGBPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,0)\n elif mode==1: #discrete\n print discreateType\n if discreateType == \"HSV\":\n for p in discreteData:\n print p\n lut.AddHSVPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,1)\n elif discreateType == \"RGB\":\n for p in discreteData:\n print p\n lut.AddRGBPoint(float(p[0]),float(p[1]),float(p[2]),float(p[3]),0.5,1)\n\ndef showContinuous():\n updateUI(0)\n renWin.Render()\n\ndef showDiscrete():\n updateUI(1)\n renWin.Render()\n\ndef Main():\n global datamin, datamax, lut, renWin\n\n # Load bathymetry dataset\n bathymetryReader = vtk.vtkStructuredPointsReader()\n bathymetryReader.SetFileName(sys.argv[1])\n bathymetryReader.Update()\n r = bathymetryReader.GetOutput().GetPointData().GetScalars().GetRange()\n datamin = r[0]\n datamax = r[1]\n\n loadContinuousFile(sys.argv[2])\n loadDiscreteFile(sys.argv[3])\n\n # Setup color mapping\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n\n # Load bathymetry data into Geometry Filter\n geometry = vtk.vtkImageDataGeometryFilter()\n geometry.SetInputConnection(bathymetryReader.GetOutputPort())\n\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(geometry.GetOutputPort())\n mapper.SetLookupTable(lut)\n mapper.ImmediateModeRenderingOff()\n\n # Setup color mapping bar\n colorBar = vtkScalarBarActor()\n colorBar.SetLookupTable(mapper.GetLookupTable())\n colorBar.SetTitle(\"color map\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.07)\n colorBar.SetHeight(0.8)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create renderer stuff\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n renWin.SetSize(1280, 800)\n\n # Add the actors to the renderer, set the background and size\n ren.AddActor(actor)\n ren.AddActor(colorBar)\n ren.ResetCamera()\n ren.SetBackground(0, 0, 0)\n ren.ResetCameraClippingRange()\n\n root = Tkinter.Tk()\n root.title('Task 2. MRI Data')\n frame = Tkinter.Frame(root)\n frame.pack(fill=Tkinter.BOTH, expand=\"false\", side=Tkinter.TOP)\n\n mode = Tkinter.IntVar()\n mode.set(1)\n Tkinter.Radiobutton(frame, text=\"Continuous color map\", padx=20, variable=mode, value=1, command=showContinuous).pack(anchor=Tkinter.W)\n Tkinter.Radiobutton(frame, text=\"Discrete color map\", padx=20, variable=mode, value=2, command=showDiscrete).pack(anchor=Tkinter.W)\n\n # Setup for rendering window interactor \n renWinInteract = vtkTkRenderWindowInteractor(frame,rw=renWin, width=1280, height=800)\n \n # Specify interaction with 2D image\n style = vtk.vtkInteractorStyleImage()\n style.SetInteractionModeToImage2D()\n renWinInteract.SetInteractorStyle(style)\n\n renWinInteract.Initialize()\n renWinInteract.pack(side='top', fill='both', expand=\"false\")\n renWinInteract.Start()\n\n updateUI(0)\n renWin.Render()\n\n root.mainloop()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.5845464468002319, "alphanum_fraction": 0.6514557600021362, "avg_line_length": 34.72999954223633, "blob_id": "7b4517a3ef150b2444a23c90279a3c360eed2147", "content_id": "9c19074e1ed0e2d92c7709f8bc41a7f326333255", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3572, "license_type": "no_license", "max_line_length": 97, "num_lines": 100, "path": "/project4/src/dvr_cfd.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\nfrom vtk.util.misc import vtkGetDataRoot\n\ndef print_camera_settings(obj, event):\n global ren\n # ---------------------------------------------------------------\n # Print out the current settings of the camera\n # ---------------------------------------------------------------\n camera = ren.GetActiveCamera()\n print \"Camera settings:\"\n print \" * position: %s\" % (camera.GetPosition(),)\n print \" * focal point: %s\" % (camera.GetFocalPoint(),)\n print \" * up vector: %s\" % (camera.GetViewUp(),)\n print \" * clipping range: %s\" % (camera.GetClippingRange(),)\n\ndef after_print_camera_settings(obj, event):\n print \"\"\n\ndef CheckAbort(obj, event):\n if obj.GetEventPending() != 0:\n obj.SetAbortRender(1)\n\ndef Main():\n global ren\n print \"data: %s\" % sys.argv[1]\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(sys.argv[1])\n \n # Create the standard renderer, render window and interactor\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n \n iren.RemoveObservers('RightButtonPressEvent')\n iren.AddObserver('RightButtonPressEvent', print_camera_settings, 1.0)\n iren.AddObserver('RightButtonPressEvent', after_print_camera_settings, -1.0)\n \n datapoints = [\n [0,0.66,0,1,0],\n [900,0.66,0.1,1,0.009],\n [10000,0.66,0.3,1,0.01],\n [20000,0.66,0.5,1,0.3],\n [30000,0.66,1,1,1]\n ]\n \n # Create transfer mapping scalar value to opacity\n opacityTransferFunction = vtk.vtkPiecewiseFunction()\n for p in datapoints:\n opacityTransferFunction.AddPoint(p[0], p[4])\n\n # Create transfer mapping scalar value to color\n colorTransferFunction = vtk.vtkColorTransferFunction()\n for p in datapoints:\n colorTransferFunction.AddHSVPoint(p[0], p[1], p[2], p[3])\n\n # The property describes how the data will look\n volumeProperty = vtk.vtkVolumeProperty()\n volumeProperty.SetColor(colorTransferFunction)\n volumeProperty.SetScalarOpacity(opacityTransferFunction)\n volumeProperty.ShadeOn()\n volumeProperty.SetInterpolationTypeToLinear()\n \n # The mapper / ray cast function know how to render the data\n compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()\n volumeMapper = vtk.vtkVolumeRayCastMapper()\n volumeMapper.SetVolumeRayCastFunction(compositeFunction)\n volumeMapper.SetInputConnection(reader.GetOutputPort())\n volumeMapper.SetSampleDistance(0.1)\n print \"sample distance: %f\"%volumeMapper.GetSampleDistance()\n \n # The volume holds the mapper and the property and\n # can be used to position/orient the volume\n volume = vtk.vtkVolume()\n volume.SetMapper(volumeMapper)\n volume.SetProperty(volumeProperty)\n \n ren.AddVolume(volume)\n ren.SetBackground(0, 0, 0)\n \n ren.GetActiveCamera().SetViewUp(0.2320640509325283, 0.6216278154231228, 0.748147803149258)\n ren.GetActiveCamera().SetPosition(-86.30842917477719, -55.80182530081589, 297.63908735650085)\n ren.GetActiveCamera().SetFocalPoint(199.18608617782593, 149.5, 38.5)\n ren.GetActiveCamera().SetClippingRange(4.197001562269691, 982.4499004768599)\n \n renWin.SetSize(1600, 900)\n renWin.Render()\n \n renWin.AddObserver(\"AbortCheckEvent\", CheckAbort)\n \n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.7609934210777283, "alphanum_fraction": 0.8170425891876221, "avg_line_length": 31.036497116088867, "blob_id": "e089707b4ff1a32896ab58cc58370075508150c3", "content_id": "8dea17f6e551a4f80534c78c0fe638330f4ddff0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4389, "license_type": "no_license", "max_line_length": 76, "num_lines": 137, "path": "/project1/src/colormap.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\nfrom vtk import vtkScalarBarActor, vtkTextProperty\n\n# Callback function for vtkSliderWidgets\ndef vtkSliderCallback1(obj, event):\n sliderRepres1 = obj.GetRepresentation()\n pos = sliderRepres1.GetValue()\n global colorVar1\n lut.RemovePoint(colorVar1)\n colorVar1 = pos\n lut.AddRGBPoint(colorVar1,0,0.25,0.5);\n\ndef vtkSliderCallback2(obj, event):\n sliderRepres2 = obj.GetRepresentation()\n pos = sliderRepres2.GetValue()\n global colorVar2\n lut.RemovePoint(colorVar2)\n colorVar2 = pos\n lut.AddRGBPoint(colorVar2,1,1,0.5);\n\n# Load bathymetry dataset\nbathymetryReader = vtk.vtkStructuredPointsReader()\nbathymetryReader.SetFileName(sys.argv[1])\n\ncolorVar1 = -4500\ncolorVar2 = 2000\n\n# Setup color mapping\nlut = vtk.vtkColorTransferFunction()\nlut.SetColorSpaceToRGB()\nlut.AddRGBPoint(-9000,0,0,0);\nlut.AddRGBPoint(colorVar1,0,0.25,0.5);\nlut.AddRGBPoint(-1,0,1,1);\nlut.AddRGBPoint(0,0.25,0.75,0);\nlut.AddRGBPoint(colorVar2,1,1,0.5);\nlut.AddRGBPoint(4000,1,1,1);\n\n# Load bathymetry data into Geometry Filter\ngeometry = vtk.vtkImageDataGeometryFilter()\ngeometry.SetInputConnection(bathymetryReader.GetOutputPort())\n\nmapper = vtk.vtkDataSetMapper()\nmapper.SetInputConnection(geometry.GetOutputPort())\nmapper.SetLookupTable(lut)\nmapper.SetScalarRange(0, 255)\nmapper.ImmediateModeRenderingOff()\n\n# Setup color mapping bar\ncolorBar = vtkScalarBarActor()\ncolorBar.SetLookupTable(mapper.GetLookupTable())\ncolorBar.SetTitle(\"color map\")\ncolorBar.SetNumberOfLabels(6)\ncolorBar.SetLabelFormat(\"%6.0f\")\ncolorBar.SetPosition(0.89, 0.1)\ncolorBar.SetWidth(0.08)\ncolorBar.SetHeight(0.7)\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\n# Create renderer stuff\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# Add the actors to the renderer, set the background and size\nren.AddActor(actor)\nren.AddActor(colorBar)\nren.ResetCamera()\nren.SetBackground(0.1, 0.2, 0.4)\nren.ResetCameraClippingRange()\nrenWin.SetSize(800, 600)\n\n# Add vtkSliderWidget\nSliderRepres1 = vtk.vtkSliderRepresentation2D()\nmin = -8999 #ImageViewer.GetSliceMin()\nmax = -2 #ImageViewer.GetSliceMax()\nSliderRepres1.SetMinimumValue(min)\nSliderRepres1.SetMaximumValue(max)\nSliderRepres1.SetValue(-4500)\nSliderRepres1.SetTitleText(\"negetive scale\")\nSliderRepres1.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres1.GetPoint1Coordinate().SetValue(0.5, 0.2)\nSliderRepres1.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres1.GetPoint2Coordinate().SetValue(0.8, 0.2)\nSliderRepres1.SetSliderLength(0.02)\nSliderRepres1.SetSliderWidth(0.03)\nSliderRepres1.SetEndCapLength(0.01)\nSliderRepres1.SetEndCapWidth(0.03)\nSliderRepres1.SetTubeWidth(0.005)\nSliderRepres1.SetLabelFormat(\"%3.0lf\")\nSliderRepres1.SetTitleHeight(0.02)\nSliderRepres1.SetLabelHeight(0.02)\nSliderWidget1 = vtk.vtkSliderWidget()\nSliderWidget1.SetInteractor(iren)\nSliderWidget1.SetRepresentation(SliderRepres1)\nSliderWidget1.KeyPressActivationOff()\nSliderWidget1.SetAnimationModeToAnimate()\nSliderWidget1.SetEnabled(True)\nSliderWidget1.AddObserver(\"InteractionEvent\", vtkSliderCallback1)\n\n# Add vtkSliderWidget\nSliderRepres2 = vtk.vtkSliderRepresentation2D()\nmin = 1 #ImageViewer.GetSliceMin()\nmax = 3999 #ImageViewer.GetSliceMax()\nSliderRepres2.SetMinimumValue(min)\nSliderRepres2.SetMaximumValue(max)\nSliderRepres2.SetValue(2000)\nSliderRepres2.SetTitleText(\"positive scale\")\nSliderRepres2.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres2.GetPoint1Coordinate().SetValue(0.5, 0.1)\nSliderRepres2.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres2.GetPoint2Coordinate().SetValue(0.8, 0.1)\nSliderRepres2.SetSliderLength(0.02)\nSliderRepres2.SetSliderWidth(0.03)\nSliderRepres2.SetEndCapLength(0.01)\nSliderRepres2.SetEndCapWidth(0.03)\nSliderRepres2.SetTubeWidth(0.005)\nSliderRepres2.SetLabelFormat(\"%3.0lf\")\nSliderRepres2.SetTitleHeight(0.02)\nSliderRepres2.SetLabelHeight(0.02)\nSliderWidget2 = vtk.vtkSliderWidget()\nSliderWidget2.SetInteractor(iren)\nSliderWidget2.SetRepresentation(SliderRepres2)\nSliderWidget2.KeyPressActivationOff()\nSliderWidget2.SetAnimationModeToAnimate()\nSliderWidget2.SetEnabled(True)\nSliderWidget2.AddObserver(\"InteractionEvent\", vtkSliderCallback2)\n\niren.Initialize()\nrenWin.Render()\niren.Start()\n" }, { "alpha_fraction": 0.7082539200782776, "alphanum_fraction": 0.7427098751068115, "avg_line_length": 36.07853317260742, "blob_id": "54fab44c1aa1a9d140330d2e69fe38431549af51", "content_id": "f911555c72ee8f5128482d5b65c17ae04098bb78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14163, "license_type": "no_license", "max_line_length": 221, "num_lines": 382, "path": "/project3/src/iso2dtf.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport re\nimport vtk\nfrom vtk import vtkScalarBarActor\n\ndef isovalueSliderHandler(obj, event):\n global isovalue, contours\n isovalue = obj.GetRepresentation().GetValue()\n contours.SetValue(0, isovalue)\n print \"Change Isovalue: %f\"%isovalue\n\ndef gminSliderHandler(obj, event):\n global gmin, gmax, gminSlider, gmclipper1\n gmin = obj.GetRepresentation().GetValue()\n if gmin>=gmax:\n gmin = gmax-1\n gminSlider.SetValue(gmin)\n \n gmclipper1.SetValue(gmin)\n gmclipper1.Update()\n print \"gmin: %f, gmax: %f\"%(gmin,gmax)\n\ndef gmaxSliderHandler(obj, event):\n global gmin, gmax, gmaxSlider, gmclipper2\n gmax = obj.GetRepresentation().GetValue()\n if gmin>=gmax:\n gmax = gmin+1\n gmaxSlider.SetValue(gmax)\n \n gmclipper2.SetValue(gmax)\n gmclipper2.Update()\n print \"gmin: %f, gmax: %f\"%(gmin,gmax)\n\ndef clipXSliderHandler(obj, event):\n global clipX\n clipX = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipYSliderHandler(obj, event):\n global clipY\n clipY = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipZSliderHandler(obj, event):\n global clipZ\n clipZ = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef updateCT():\n global clipX, clipY, clipZ, plane1, plane2, plane3, planeSource1, planeSource2, planeSource3, clipper1, clipper2, clipper3\n planeSource1.SetOrigin(clipX,0,0)\n planeSource2.SetOrigin(0,clipY,0)\n planeSource3.SetOrigin(0,0,clipZ)\n plane1.SetOrigin(planeSource1.GetOrigin())\n plane2.SetOrigin(planeSource2.GetOrigin())\n plane3.SetOrigin(planeSource3.GetOrigin())\n clipper1.Update()\n clipper2.Update()\n clipper3.Update()\n print \"clip (%f,%f,%f)\" %(clipX,clipY,clipZ)\n\ndef Main():\n global isovalue, contours, planeSource1, planeSource2, planeSource3, plane1, plane2, plane3, clipper1, clipper2, clipper3, clipX, clipY, clipZ, lut, gmin, gmax, min, max, gminSlider, gmaxSlider, gmclipper1, gmclipper2\n\n print \"data: %s\" % sys.argv[1]\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(sys.argv[1])\n reader.Update()\n \n print \"gradientmag: %s\" % sys.argv[2]\n gmreader = vtk.vtkStructuredPointsReader()\n gmreader.SetFileName(sys.argv[2])\n gmreader.Update()\n \n clipX = 0\n clipY = 0\n clipZ = 0\n \n r = reader.GetOutput().GetScalarRange()\n datamin = r[0]\n datamax = r[1]\n isovalue = (datamax+datamin)/2.0\n\n for i in range(3, len(sys.argv)):\n if sys.argv[i] == \"--val\":\n print \"isovalue %s\" % sys.argv[i+1]\n isovalue = float(sys.argv[i+1])\n if sys.argv[i] == \"--clip\":\n print \"clip (%s,%s,%s)\" % (sys.argv[i+1],sys.argv[i+2],sys.argv[i+3])\n clipX = float(sys.argv[i+1])\n clipY = float(sys.argv[i+2])\n clipZ = float(sys.argv[i+3])\n \n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(reader.GetOutputPort());\n contours.ComputeNormalsOn()\n contours.SetValue(0, isovalue)\n\n planeSource1 = vtk.vtkPlaneSource()\n planeSource1.SetNormal(1,0,0)\n planeSource1.SetOrigin(clipX,0,0)\n planeSource2 = vtk.vtkPlaneSource()\n planeSource2.SetNormal(0,1,0)\n planeSource2.SetOrigin(0,clipY,0)\n planeSource3 = vtk.vtkPlaneSource()\n planeSource3.SetNormal(0,0,1)\n planeSource3.SetOrigin(0,0,clipZ)\n\n plane1 = vtk.vtkPlane()\n plane1.SetNormal(planeSource1.GetNormal())\n plane1.SetOrigin(planeSource1.GetOrigin())\n clipper1 = vtk.vtkClipPolyData()\n clipper1.SetClipFunction(plane1)\n clipper1.SetInputConnection(contours.GetOutputPort())\n clipper1.Update()\n\n plane2 = vtk.vtkPlane()\n plane2.SetNormal(planeSource2.GetNormal())\n plane2.SetOrigin(planeSource2.GetOrigin())\n clipper2 = vtk.vtkClipPolyData()\n clipper2.SetClipFunction(plane2)\n clipper2.SetInputConnection(clipper1.GetOutputPort())\n clipper2.Update()\n\n plane3 = vtk.vtkPlane()\n plane3.SetNormal(planeSource3.GetNormal())\n plane3.SetOrigin(planeSource3.GetOrigin())\n clipper3 = vtk.vtkClipPolyData()\n clipper3.SetClipFunction(plane3)\n clipper3.SetInputConnection(clipper2.GetOutputPort())\n clipper3.Update()\n\n probeFilter = vtk.vtkProbeFilter()\n probeFilter.SetInputConnection(0, clipper3.GetOutputPort())\n probeFilter.SetInputConnection(1, gmreader.GetOutputPort())\n probeFilter.Update()\n\n gmrange = probeFilter.GetOutput().GetScalarRange()\n gmin = gmrange[0]\n gmax = gmrange[1]\n \n gmclipper1 = vtk.vtkClipPolyData()\n gmclipper1.SetInputConnection(probeFilter.GetOutputPort())\n gmclipper1.InsideOutOff()\n gmclipper1.SetValue(int(gmin))\n gmclipper1.Update()\n\n gmclipper2 = vtk.vtkClipPolyData()\n gmclipper2.SetInputConnection(gmclipper1.GetOutputPort())\n gmclipper2.InsideOutOn()\n gmclipper2.SetValue(int(gmax))\n gmclipper2.Update()\n\n # display the data in rainbow color scale\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.RemoveAllPoints()\n dis = (gmax-gmin)/7\n for i in range(0,8):\n lut.AddHSVPoint(gmin+dis*i,0.1*i,1,1)\n\n clipperMapper = vtk.vtkPolyDataMapper()\n clipperMapper.SetLookupTable(lut)\n clipperMapper.SetInputConnection(gmclipper2.GetOutputPort())\n\n colorBar = vtkScalarBarActor()\n colorBar.SetLookupTable(clipperMapper.GetLookupTable())\n colorBar.SetTitle(\"gradient magnitude \")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.1)\n colorBar.SetHeight(0.7)\n\n clipperActor=vtk.vtkActor()\n clipperActor.GetProperty().SetRepresentationToWireframe()\n clipperActor.SetMapper(clipperMapper)\n\n backFaces = vtk.vtkProperty()\n backFaces.SetSpecular(0)\n backFaces.SetDiffuse(0)\n backFaces.SetAmbient(0)\n backFaces.SetAmbientColor(1,0,0)\n clipperActor.SetBackfaceProperty(backFaces)\n\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n ren.AddActor(clipperActor)\n ren.AddActor(colorBar)\n ren.ResetCamera()\n ren.SetBackground(0.2,0.3,0.4)\n ren.ResetCameraClippingRange()\n renWin.SetSize(1200, 600)\n\n gminSlider = vtk.vtkSliderRepresentation2D()\n gminSlider.SetMinimumValue(gmrange[0])\n gminSlider.SetMaximumValue(gmrange[1])\n gminSlider.SetValue(gmin)\n gminSlider.SetTitleText(\"gradmin\")\n gminSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n gminSlider.GetPoint1Coordinate().SetValue(0.0, 0.6)\n gminSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n gminSlider.GetPoint2Coordinate().SetValue(0.2, 0.6)\n gminSlider.SetSliderLength(0.02)\n gminSlider.SetSliderWidth(0.03)\n gminSlider.SetEndCapLength(0.01)\n gminSlider.SetEndCapWidth(0.03)\n gminSlider.SetTubeWidth(0.005)\n gminSlider.SetLabelFormat(\"%3.0lf\")\n gminSlider.SetTitleHeight(0.02)\n gminSlider.SetLabelHeight(0.02)\n gminSliderWidget = vtk.vtkSliderWidget()\n gminSliderWidget.SetInteractor(iren)\n gminSliderWidget.SetRepresentation(gminSlider)\n gminSliderWidget.KeyPressActivationOff()\n gminSliderWidget.SetAnimationModeToAnimate()\n gminSliderWidget.SetEnabled(True)\n gminSliderWidget.AddObserver(\"InteractionEvent\", gminSliderHandler)\n\n gmaxSlider = vtk.vtkSliderRepresentation2D()\n gmaxSlider.SetMinimumValue(gmrange[0])\n gmaxSlider.SetMaximumValue(gmrange[1])\n gmaxSlider.SetValue(gmax)\n gmaxSlider.SetTitleText(\"gradmax\")\n gmaxSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n gmaxSlider.GetPoint1Coordinate().SetValue(0.0, 0.5)\n gmaxSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n gmaxSlider.GetPoint2Coordinate().SetValue(0.2, 0.5)\n gmaxSlider.SetSliderLength(0.02)\n gmaxSlider.SetSliderWidth(0.03)\n gmaxSlider.SetEndCapLength(0.01)\n gmaxSlider.SetEndCapWidth(0.03)\n gmaxSlider.SetTubeWidth(0.005)\n gmaxSlider.SetLabelFormat(\"%3.0lf\")\n gmaxSlider.SetTitleHeight(0.02)\n gmaxSlider.SetLabelHeight(0.02)\n gmaxSliderWidget = vtk.vtkSliderWidget()\n gmaxSliderWidget.SetInteractor(iren)\n gmaxSliderWidget.SetRepresentation(gmaxSlider)\n gmaxSliderWidget.KeyPressActivationOff()\n gmaxSliderWidget.SetAnimationModeToAnimate()\n gmaxSliderWidget.SetEnabled(True)\n gmaxSliderWidget.AddObserver(\"InteractionEvent\", gmaxSliderHandler)\n \n isovalueSlider = vtk.vtkSliderRepresentation2D()\n isovalueSlider.SetMinimumValue(datamin)\n isovalueSlider.SetMaximumValue(datamax)\n isovalueSlider.SetValue(isovalue)\n isovalueSlider.SetTitleText(\"isovalue\")\n isovalueSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint1Coordinate().SetValue(0.0, 0.4)\n isovalueSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint2Coordinate().SetValue(0.2, 0.4)\n isovalueSlider.SetSliderLength(0.02)\n isovalueSlider.SetSliderWidth(0.03)\n isovalueSlider.SetEndCapLength(0.01)\n isovalueSlider.SetEndCapWidth(0.03)\n isovalueSlider.SetTubeWidth(0.005)\n isovalueSlider.SetLabelFormat(\"%3.0lf\")\n isovalueSlider.SetTitleHeight(0.02)\n isovalueSlider.SetLabelHeight(0.02)\n SliderWidget1 = vtk.vtkSliderWidget()\n SliderWidget1.SetInteractor(iren)\n SliderWidget1.SetRepresentation(isovalueSlider)\n SliderWidget1.KeyPressActivationOff()\n SliderWidget1.SetAnimationModeToAnimate()\n SliderWidget1.SetEnabled(True)\n SliderWidget1.AddObserver(\"InteractionEvent\", isovalueSliderHandler)\n\n isovalueSlider = vtk.vtkSliderRepresentation2D()\n isovalueSlider.SetMinimumValue(datamin)\n isovalueSlider.SetMaximumValue(datamax)\n isovalueSlider.SetValue(isovalue)\n isovalueSlider.SetTitleText(\"isovalue\")\n isovalueSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint1Coordinate().SetValue(0.0, 0.4)\n isovalueSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n isovalueSlider.GetPoint2Coordinate().SetValue(0.2, 0.4)\n isovalueSlider.SetSliderLength(0.02)\n isovalueSlider.SetSliderWidth(0.03)\n isovalueSlider.SetEndCapLength(0.01)\n isovalueSlider.SetEndCapWidth(0.03)\n isovalueSlider.SetTubeWidth(0.005)\n isovalueSlider.SetLabelFormat(\"%3.0lf\")\n isovalueSlider.SetTitleHeight(0.02)\n isovalueSlider.SetLabelHeight(0.02)\n SliderWidget1 = vtk.vtkSliderWidget()\n SliderWidget1.SetInteractor(iren)\n SliderWidget1.SetRepresentation(isovalueSlider)\n SliderWidget1.KeyPressActivationOff()\n SliderWidget1.SetAnimationModeToAnimate()\n SliderWidget1.SetEnabled(True)\n SliderWidget1.AddObserver(\"InteractionEvent\", isovalueSliderHandler)\n\n clipXSlider = vtk.vtkSliderRepresentation2D()\n clipXSlider.SetMinimumValue(0)\n clipXSlider.SetMaximumValue(300)\n clipXSlider.SetValue(clipX)\n clipXSlider.SetTitleText(\"X\")\n clipXSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint1Coordinate().SetValue(0.0, 0.3)\n clipXSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint2Coordinate().SetValue(0.2, 0.3)\n clipXSlider.SetSliderLength(0.02)\n clipXSlider.SetSliderWidth(0.03)\n clipXSlider.SetEndCapLength(0.01)\n clipXSlider.SetEndCapWidth(0.03)\n clipXSlider.SetTubeWidth(0.005)\n clipXSlider.SetLabelFormat(\"%1.2lf\")\n clipXSlider.SetTitleHeight(0.02)\n clipXSlider.SetLabelHeight(0.02)\n SliderWidget2 = vtk.vtkSliderWidget()\n SliderWidget2.SetInteractor(iren)\n SliderWidget2.SetRepresentation(clipXSlider)\n SliderWidget2.KeyPressActivationOff()\n SliderWidget2.SetAnimationModeToAnimate()\n SliderWidget2.SetEnabled(True)\n SliderWidget2.AddObserver(\"InteractionEvent\", clipXSliderHandler)\n\n clipYSlider = vtk.vtkSliderRepresentation2D()\n clipYSlider.SetMinimumValue(0)\n clipYSlider.SetMaximumValue(300)\n clipYSlider.SetValue(clipY)\n clipYSlider.SetTitleText(\"Y\")\n clipYSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint1Coordinate().SetValue(0.0, 0.2)\n clipYSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint2Coordinate().SetValue(0.2, 0.2)\n clipYSlider.SetSliderLength(0.02)\n clipYSlider.SetSliderWidth(0.03)\n clipYSlider.SetEndCapLength(0.01)\n clipYSlider.SetEndCapWidth(0.03)\n clipYSlider.SetTubeWidth(0.005)\n clipYSlider.SetLabelFormat(\"%1.2lf\")\n clipYSlider.SetTitleHeight(0.02)\n clipYSlider.SetLabelHeight(0.02)\n SliderWidget3 = vtk.vtkSliderWidget()\n SliderWidget3.SetInteractor(iren)\n SliderWidget3.SetRepresentation(clipYSlider)\n SliderWidget3.KeyPressActivationOff()\n SliderWidget3.SetAnimationModeToAnimate()\n SliderWidget3.SetEnabled(True)\n SliderWidget3.AddObserver(\"InteractionEvent\", clipYSliderHandler)\n\n clipZSlider = vtk.vtkSliderRepresentation2D()\n clipZSlider.SetMinimumValue(0)\n clipZSlider.SetMaximumValue(300)\n clipZSlider.SetValue(clipZ)\n clipZSlider.SetTitleText(\"Z\")\n clipZSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint1Coordinate().SetValue(0.0, 0.1)\n clipZSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint2Coordinate().SetValue(0.2, 0.1)\n clipZSlider.SetSliderLength(0.02)\n clipZSlider.SetSliderWidth(0.03)\n clipZSlider.SetEndCapLength(0.01)\n clipZSlider.SetEndCapWidth(0.03)\n clipZSlider.SetTubeWidth(0.005)\n clipZSlider.SetLabelFormat(\"%1.2lf\")\n clipZSlider.SetTitleHeight(0.02)\n clipZSlider.SetLabelHeight(0.02)\n SliderWidget4 = vtk.vtkSliderWidget()\n SliderWidget4.SetInteractor(iren)\n SliderWidget4.SetRepresentation(clipZSlider)\n SliderWidget4.KeyPressActivationOff()\n SliderWidget4.SetAnimationModeToAnimate()\n SliderWidget4.SetEnabled(True)\n SliderWidget4.AddObserver(\"InteractionEvent\", clipZSliderHandler)\n\n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.7934451103210449, "alphanum_fraction": 0.8193597793579102, "avg_line_length": 29.870588302612305, "blob_id": "f0f0009f5e29d967d69ab493608ff7230aa5f9f3", "content_id": "4337575ffc96b219ffdb0f2ba0e0f7826f1f44b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2624, "license_type": "no_license", "max_line_length": 75, "num_lines": 85, "path": "/project1/src/heightfield.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\n\n# Callback function for vtkSliderWidget\ndef vtkSliderCallback(obj, event):\n sliderRepres = obj.GetRepresentation()\n pos = sliderRepres.GetValue()\n warp.SetScaleFactor(pos/1000)\n\n# Read in the image and bathymetry dataset.\nimageReader = vtk.vtkJPEGReader()\nimageReader.SetFileName(sys.argv[2])\nbathymetryReader = vtk.vtkStructuredPointsReader()\nbathymetryReader.SetFileName(sys.argv[1])\n\n# Load bathymetry data into Geometry Filter\ngeometry = vtk.vtkImageDataGeometryFilter()\ngeometry.SetInputConnection(bathymetryReader.GetOutputPort())\nwarp = vtk.vtkWarpScalar()\nwarp.SetInputConnection(geometry.GetOutputPort())\nwarp.SetScaleFactor(0)\n\n# Create texture object from satellite picture\ntexture = vtk.vtkTexture()\ntexture.SetInputConnection(imageReader.GetOutputPort())\n\n# Create mapper\nmapper = vtk.vtkDataSetMapper()\nmapper.SetInputConnection(warp.GetOutputPort())\nmapper.SetScalarRange(0, 255)\nmapper.ScalarVisibilityOff()\nmapper.ImmediateModeRenderingOff()\n\n# Create actor and set the mapper and texture\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\nactor.SetTexture(texture)\n\n# Create renderer stuff\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# Add the actors to the renderer, set the background and size\nren.AddActor(actor)\nren.ResetCamera()\nren.SetBackground(0.1, 0.2, 0.4)\n\nrenWin.SetSize(800, 600)\n\n# Add vtkSliderWidget\nSliderRepres = vtk.vtkSliderRepresentation2D()\nmin = 0 #ImageViewer.GetSliceMin()\nmax = 100 #ImageViewer.GetSliceMax()\nSliderRepres.SetMinimumValue(min)\nSliderRepres.SetMaximumValue(max)\nSliderRepres.SetValue(min)\nSliderRepres.SetTitleText(\"scale factor\")\nSliderRepres.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres.GetPoint1Coordinate().SetValue(0.1, 0.1)\nSliderRepres.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\nSliderRepres.GetPoint2Coordinate().SetValue(0.4, 0.1)\nSliderRepres.SetSliderLength(0.02)\nSliderRepres.SetSliderWidth(0.03)\nSliderRepres.SetEndCapLength(0.01)\nSliderRepres.SetEndCapWidth(0.03)\nSliderRepres.SetTubeWidth(0.005)\nSliderRepres.SetLabelFormat(\"%3.0lf / 1000\")\nSliderRepres.SetTitleHeight(0.02)\nSliderRepres.SetLabelHeight(0.02)\nSliderWidget = vtk.vtkSliderWidget()\nSliderWidget.SetInteractor(iren)\nSliderWidget.SetRepresentation(SliderRepres)\nSliderWidget.KeyPressActivationOff()\nSliderWidget.SetAnimationModeToAnimate()\nSliderWidget.SetEnabled(True)\nSliderWidget.AddObserver(\"InteractionEvent\", vtkSliderCallback)\n\niren.Initialize()\nrenWin.Render()\niren.Start()\n" }, { "alpha_fraction": 0.574499785900116, "alphanum_fraction": 0.6409110426902771, "avg_line_length": 33.043479919433594, "blob_id": "3a7507531b6044e557bcf959bd493d4a7557ed54", "content_id": "799f6702235b69cc4044a7d883a211d27c49b016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4698, "license_type": "no_license", "max_line_length": 100, "num_lines": 138, "path": "/project5/src/streamtubes.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\n\ndef print_camera_settings(obj, event):\n global ren\n # ---------------------------------------------------------------\n # Print out the current settings of the camera\n # ---------------------------------------------------------------\n camera = ren.GetActiveCamera()\n print \"Camera settings:\"\n print \" * position: %s\" % (camera.GetPosition(),)\n print \" * focal point: %s\" % (camera.GetFocalPoint(),)\n print \" * up vector: %s\" % (camera.GetViewUp(),)\n print \" * clipping range: %s\" % (camera.GetClippingRange(),)\n \ndef after_print_camera_settings(obj, event):\n print \"\"\n\ndef Main():\n global ren\n # Create the RenderWindow, Renderer and both Actors\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.SetMultiSamples(0)\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n iren.RemoveObservers('RightButtonPressEvent')\n iren.AddObserver('RightButtonPressEvent', print_camera_settings, 1.0)\n iren.AddObserver('RightButtonPressEvent', after_print_camera_settings, -1.0)\n \n print \"data: %s %s\" % (sys.argv[1], sys.argv[2])\n \n cfdreader = vtk.vtkStructuredPointsReader()\n cfdreader.SetFileName(sys.argv[1])\n \n # setup wing data\n wingReader = vtk.vtkUnstructuredGridReader()\n wingReader.SetFileName(sys.argv[2])\n wingReader.Update()\n wingMapper = vtk.vtkDataSetMapper()\n wingMapper.SetInputConnection(wingReader.GetOutputPort())\n wingActor = vtk.vtkActor()\n wingActor.SetMapper(wingMapper)\n wingActor.GetProperty().SetColor(.4, .4, .4)\n \n bRakesToActor = [True, True, True]\n bWingToActor = True\n \n datamin = 0\n datamax = 230\n \n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(datamin, 0, 1, 1)\n dis = float(datamax - datamin) / 7\n for i in range(0, 8):\n lut.AddHSVPoint(float(datamin + dis * i), 0.1 * i, 1, 1)\n \n colorBar = vtk.vtkScalarBarActor()\n colorBar.SetLookupTable(lut)\n colorBar.SetTitle(\"\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.05)\n colorBar.SetHeight(0.4)\n ren.AddActor(colorBar)\n \n rakes = [\n vtk.vtkLineSource(),\n vtk.vtkLineSource(),\n vtk.vtkLineSource()\n ]\n rakes[0].SetPoint1(-230, -230, 0)\n rakes[0].SetPoint2(230, 230, 0)\n rakes[0].SetResolution(50)\n \n rakes[1].SetPoint1(230, -230, 0)\n rakes[1].SetPoint2(-230, 230, 0)\n rakes[1].SetResolution(50)\n \n# rakes[2].SetPoint1(0, -200, 10)\n# rakes[2].SetPoint2(0, 200, 10)\n# rakes[2].SetResolution(50)\n \n for i in range(0, len(rakes)):\n integ = vtk.vtkRungeKutta4()\n streamLine = vtk.vtkStreamLine()\n streamLine.SetInputConnection(cfdreader.GetOutputPort())\n streamLine.SetSourceConnection(rakes[i].GetOutputPort())\n streamLine.SetMaximumPropagationTime(50);\n streamLine.SetIntegrationStepLength(.1);\n streamLine.SetStepLength(0.001);\n streamLine.SetIntegrationDirectionToForward();\n streamLine.SetIntegrator(integ)\n streamLine.SpeedScalarsOn()\n \n streamTube = vtk.vtkTubeFilter()\n streamTube.SetInputConnection(streamLine.GetOutputPort())\n streamTube.SetRadius(3)\n streamTube.SetNumberOfSides(12)\n streamTube.SetVaryRadiusToVaryRadiusByVector()\n \n streamTubeMapper = vtk.vtkPolyDataMapper()\n streamTubeMapper.SetInputConnection(streamTube.GetOutputPort())\n streamTubeMapper.SetLookupTable(lut)\n \n streamTubeActor = vtk.vtkActor()\n streamTubeActor.SetMapper(streamTubeMapper)\n streamTubeActor.GetProperty().BackfaceCullingOn()\n streamTubeActor.GetProperty().SetColor(0, 1, 0)\n streamTubeActor.GetProperty().SetOpacity(0.5);\n \n if bRakesToActor[i]:\n ren.AddActor(streamTubeActor)\n \n if bWingToActor:\n ren.AddActor(wingActor)\n \n ren.SetBackground(0, 0, 0)\n renWin.SetSize(1600, 900)\n \n ren.ResetCamera()\n ren.GetActiveCamera().SetClippingRange(417.55784439078775, 1491.5763714138557)\n ren.GetActiveCamera().SetFocalPoint(118.72183980792761, 0.00012969970703125, 36.469017028808594)\n ren.GetActiveCamera().SetPosition(680.0192576650034, 16.65944318371372, 790.5781258299678)\n ren.GetActiveCamera().SetViewUp(-0.802117714199773, -0.005112780752923929, 0.5971440630533839)\n\n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__ == \"__main__\":\n Main()\n" }, { "alpha_fraction": 0.5379812717437744, "alphanum_fraction": 0.6142906546592712, "avg_line_length": 30.010753631591797, "blob_id": "fd06590e5be575f6a7e839174cc970c767ba2aa7", "content_id": "627341a99e51961641c59a9ce4dce12fb3f93d02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2883, "license_type": "no_license", "max_line_length": 97, "num_lines": 93, "path": "/project4/src/salient_cfd.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\nfrom vtk import vtkScalarBarActor\n\ndef print_camera_settings(obj, event):\n global ren\n # ---------------------------------------------------------------\n # Print out the current settings of the camera\n # ---------------------------------------------------------------\n camera = ren.GetActiveCamera()\n print \"Camera settings:\"\n print \" * position: %s\" % (camera.GetPosition(),)\n print \" * focal point: %s\" % (camera.GetFocalPoint(),)\n print \" * up vector: %s\" % (camera.GetViewUp(),)\n print \" * clipping range: %s\" % (camera.GetClippingRange(),)\n\ndef after_print_camera_settings(obj, event):\n print \"\"\n\ndef Main():\n global ren\n print \"data: %s\" % sys.argv[1]\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(sys.argv[1])\n \n isosurfaces = [\n [3413,0.1,0.2,1,0.4],\n [18090,0.1,0.6,1,0.6],\n [41983,0.1,0.9,1,0.9],\n ]\n \n ren = vtk.vtkRenderer()\n \n for surface in isosurfaces:\n \n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n lut.AddHSVPoint(surface[0],surface[1],surface[2],surface[3])\n \n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(reader.GetOutputPort());\n contours.ComputeNormalsOn()\n\n contours.SetValue(0, surface[0])\n \n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(contours.GetOutputPort())\n mapper.SetLookupTable(lut)\n mapper.ImmediateModeRenderingOff()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetOpacity(surface[4])\n\n ren.AddActor(actor)\n\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n \n iren.RemoveObservers('RightButtonPressEvent')\n iren.AddObserver('RightButtonPressEvent', print_camera_settings, 1.0)\n iren.AddObserver('RightButtonPressEvent', after_print_camera_settings, -1.0)\n\n # for depth peeling\n ren.SetUseDepthPeeling(1)\n ren.SetMaximumNumberOfPeels(4) # default 4\n ren.SetOcclusionRatio(0) # default 0\n \n ren.ResetCamera()\n ren.SetBackground(0,0,0)\n\n ren.GetActiveCamera().SetViewUp(0.2320640509325283, 0.6216278154231228, 0.748147803149258)\n ren.GetActiveCamera().SetPosition(-86.30842917477719, -55.80182530081589, 297.63908735650085)\n ren.GetActiveCamera().SetFocalPoint(199.18608617782593, 149.5, 38.5)\n ren.GetActiveCamera().SetClippingRange(4.197001562269691, 982.4499004768599)\n \n # for depth peeling\n renWin.SetAlphaBitPlanes(1)\n renWin.SetMultiSamples(0)\n\n renWin.SetSize(1600, 900)\n \n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.6929787397384644, "alphanum_fraction": 0.7308287620544434, "avg_line_length": 32.2149543762207, "blob_id": "fd9bb0e9807a5ea0bd96e251b998dc2713d6214d", "content_id": "c23edb9b914c382b811adb8c6a5ee8cbd1d9924d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7107, "license_type": "no_license", "max_line_length": 98, "num_lines": 214, "path": "/project2/src/browser.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport vtk\nfrom vtk import vtkScalarBarActor, vtkTextProperty\n\n# Callback function for vtkSliderWidgets\ndef vminSliderHandler(obj, event):\n sliderRepres1 = obj.GetRepresentation()\n pos = sliderRepres1.GetValue()\n global vmin\n vmin = pos\n if vmin>=vmax-1:\n vmin = vmax-1\n updateUI()\n\ndef vmaxSliderHandler(obj, event):\n sliderRepres2 = obj.GetRepresentation()\n pos = sliderRepres2.GetValue()\n global vmax\n vmax = pos\n if vmax<=vmin+1:\n vmax = vmin+1\n updateUI()\n\ndef wSliderHandler(obj, event):\n sliderRepres3 = obj.GetRepresentation()\n pos = sliderRepres3.GetValue()\n global w_value\n w_value = pos\n\n if w_value + vmax >= datamax-1:\n w_value = datamax-1-vmax\n if vmin - w_value <= datamin+1:\n w_value = vmin-datamin-1\n\n updateUI()\n\ndef updateUI():\n global vmin, vmax, w_value, lut, SliderRepres1, SliderRepres2, SliderRepres3\n\n if vmax+w_value>=datamax-1:\n vmax = datamax-w_value-1\n if vmin+w_value>=datamax-2:\n vmin = datamax-w_value-2\n\n if vmax-w_value<=datamin+2:\n vmax = w_value+datamin+2\n if vmin-w_value<=datamin+1:\n vmin = w_value+datamin+1\n\n SliderRepres1.SetValue(vmin)\n SliderRepres2.SetValue(vmax)\n SliderRepres3.SetValue(w_value)\n\n lut.RemoveAllPoints()\n lut.AddHSVPoint(datamin,0,0,0)\n lut.AddHSVPoint(vmin,0,1,1)\n lut.AddHSVPoint(vmax,0,1,1)\n lut.AddHSVPoint(datamax,0,0,1)\n lut.AddHSVPoint(vmin-w_value,0,0,(vmin-w_value-datamin)/(datamax-datamin))\n lut.AddHSVPoint(vmax+w_value,0,0,(vmax+w_value-datamin)/(datamax-datamin))\n \ndef Main():\n global vmin, vmax, w_value, datamin, datamax, lut, SliderRepres1, SliderRepres2, SliderRepres3\n # Load bathymetry dataset\n bathymetryReader = vtk.vtkStructuredPointsReader()\n bathymetryReader.SetFileName(sys.argv[1])\n bathymetryReader.Update()\n r = bathymetryReader.GetOutput().GetPointData().GetScalars().GetRange()\n\n datamin = r[0]\n datamax = r[1]\n\n vmin = datamin + (datamax-datamin)*0.4\n vmax = datamin + (datamax-datamin)*0.6\n w_value = (datamax-datamin)/20\n\n # Setup color mapping\n lut = vtk.vtkColorTransferFunction()\n lut.SetColorSpaceToHSV()\n\n # Load bathymetry data into Geometry Filter\n geometry = vtk.vtkImageDataGeometryFilter()\n geometry.SetInputConnection(bathymetryReader.GetOutputPort())\n\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(geometry.GetOutputPort())\n mapper.SetLookupTable(lut)\n mapper.ImmediateModeRenderingOff()\n\n # Setup color mapping bar\n colorBar = vtkScalarBarActor()\n colorBar.SetLookupTable(mapper.GetLookupTable())\n colorBar.SetTitle(\"color map\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.1)\n colorBar.SetHeight(0.7)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create renderer stuff\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n\n # Specify interaction with 2D image\n style = vtk.vtkInteractorStyleImage()\n style.SetInteractionModeToImage2D()\n iren.SetInteractorStyle(style)\n\n iren.SetRenderWindow(renWin)\n\n # Add the actors to the renderer, set the background and size\n ren.AddActor(actor)\n ren.AddActor(colorBar)\n ren.ResetCamera()\n ren.SetBackground(0, 0, 0)\n ren.ResetCameraClippingRange()\n renWin.SetSize(1280, 800)\n\n # Add vtkSliderWidget\n SliderRepres1 = vtk.vtkSliderRepresentation2D()\n min = datamin+1\n max = datamax-2\n SliderRepres1.SetMinimumValue(min)\n SliderRepres1.SetMaximumValue(max)\n SliderRepres1.SetTitleText(\"vmin\")\n SliderRepres1.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres1.GetPoint1Coordinate().SetValue(0.1, 0.1)\n SliderRepres1.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres1.GetPoint2Coordinate().SetValue(0.3, 0.1)\n SliderRepres1.SetSliderLength(0.02)\n SliderRepres1.SetSliderWidth(0.03)\n SliderRepres1.SetEndCapLength(0.01)\n SliderRepres1.SetEndCapWidth(0.03)\n SliderRepres1.SetTubeWidth(0.005)\n SliderRepres1.SetLabelFormat(\"%3.0lf\")\n SliderRepres1.SetTitleHeight(0.02)\n SliderRepres1.SetLabelHeight(0.02)\n SliderWidget1 = vtk.vtkSliderWidget()\n SliderWidget1.SetInteractor(iren)\n SliderWidget1.SetRepresentation(SliderRepres1)\n SliderWidget1.KeyPressActivationOff()\n SliderWidget1.SetAnimationModeToAnimate()\n SliderWidget1.SetEnabled(True)\n SliderWidget1.AddObserver(\"InteractionEvent\", vminSliderHandler)\n\n # Add vtkSliderWidget\n SliderRepres2 = vtk.vtkSliderRepresentation2D()\n min = datamin+2\n max = datamax-1\n SliderRepres2.SetMinimumValue(min)\n SliderRepres2.SetMaximumValue(max)\n SliderRepres2.SetTitleText(\"vmax\")\n SliderRepres2.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres2.GetPoint1Coordinate().SetValue(0.4, 0.1)\n SliderRepres2.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres2.GetPoint2Coordinate().SetValue(0.6, 0.1)\n SliderRepres2.SetSliderLength(0.02)\n SliderRepres2.SetSliderWidth(0.03)\n SliderRepres2.SetEndCapLength(0.01)\n SliderRepres2.SetEndCapWidth(0.03)\n SliderRepres2.SetTubeWidth(0.005)\n SliderRepres2.SetLabelFormat(\"%3.0lf\")\n SliderRepres2.SetTitleHeight(0.02)\n SliderRepres2.SetLabelHeight(0.02)\n SliderWidget2 = vtk.vtkSliderWidget()\n SliderWidget2.SetInteractor(iren)\n SliderWidget2.SetRepresentation(SliderRepres2)\n SliderWidget2.KeyPressActivationOff()\n SliderWidget2.SetAnimationModeToAnimate()\n SliderWidget2.SetEnabled(True)\n SliderWidget2.AddObserver(\"InteractionEvent\", vmaxSliderHandler)\n\n # Add vtkSliderWidget\n SliderRepres3 = vtk.vtkSliderRepresentation2D()\n min = 1\n max = (datamax-datamin)/10\n SliderRepres3.SetMinimumValue(min)\n SliderRepres3.SetMaximumValue(max)\n SliderRepres3.SetTitleText(\"w\")\n SliderRepres3.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres3.GetPoint1Coordinate().SetValue(0.7, 0.1)\n SliderRepres3.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n SliderRepres3.GetPoint2Coordinate().SetValue(0.9, 0.1)\n SliderRepres3.SetSliderLength(0.02)\n SliderRepres3.SetSliderWidth(0.03)\n SliderRepres3.SetEndCapLength(0.01)\n SliderRepres3.SetEndCapWidth(0.03)\n SliderRepres3.SetTubeWidth(0.005)\n SliderRepres3.SetLabelFormat(\"%3.0lf\")\n SliderRepres3.SetTitleHeight(0.02)\n SliderRepres3.SetLabelHeight(0.02)\n SliderWidget3 = vtk.vtkSliderWidget()\n SliderWidget3.SetInteractor(iren)\n SliderWidget3.SetRepresentation(SliderRepres3)\n SliderWidget3.KeyPressActivationOff()\n SliderWidget3.SetAnimationModeToAnimate()\n SliderWidget3.SetEnabled(True)\n SliderWidget3.AddObserver(\"InteractionEvent\", wSliderHandler)\n\n updateUI()\n\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.6327683329582214, "alphanum_fraction": 0.6631569862365723, "avg_line_length": 35.28260803222656, "blob_id": "038543890a51bf9729701ede8c569cb39de2490e", "content_id": "4d328b49dbd58c8ba9f65aa083a89e94482ec6c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11682, "license_type": "no_license", "max_line_length": 198, "num_lines": 322, "path": "/project3/src/complete.py", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nimport re\nimport vtk\nfrom vtk import vtkScalarBarActor\n\ndef clipXSliderHandler(obj, event):\n global clipX\n clipX = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipYSliderHandler(obj, event):\n global clipY\n clipY = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef clipZSliderHandler(obj, event):\n global clipZ\n clipZ = obj.GetRepresentation().GetValue()\n updateCT()\n\ndef updateCT():\n global clipX, clipY, clipZ, plane1s, plane2s, plane3s, planeSource1s, planeSource2s, planeSource3s, clipper1s, clipper2s, clipper3s\n for i in range(0,len(clipper1s)):\n planeSource1s[i].SetOrigin(clipX,0,0)\n planeSource2s[i].SetOrigin(0,clipY,0)\n planeSource3s[i].SetOrigin(0,0,clipZ)\n plane1s[i].SetOrigin(planeSource1s[i].GetOrigin())\n plane2s[i].SetOrigin(planeSource2s[i].GetOrigin())\n plane3s[i].SetOrigin(planeSource3s[i].GetOrigin())\n clipper1s[i].Update()\n clipper2s[i].Update()\n clipper3s[i].Update()\n print \"clip (%f,%f,%f)\" %(clipX,clipY,clipZ)\n\ndef loadParamsFile(filename):\n global continuousType, continuousSize, continuousData\n f = open(filename, \"r\")\n first_flag = True\n for line in f:\n line = line.lstrip()\n if line[0] != '#' and first_flag:\n first_flag = False\n \n m = re.match('(\\d+)[ \\t]*([A-Za-z]+)', line)\n if m:\n continuousSize = m.group(1)\n continuousType = m.group(2)\n \n elif line[0] != '#' and not first_flag:\n m = re.match('([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)[ \\t]*([-\\./\\d]+)', line)\n if m:\n #<isovalue> <grad_min> <grad_max> <x> <y> <z> <alpha>\n continuousData.append(\n [float(m.group(1)),float(m.group(2)),float(m.group(3)),float(m.group(4)),\n float(m.group(5)),float(m.group(6)),float(m.group(7))]\n )\n f.close()\n\ndef Main():\n global continuousSize, continuousType, continuousData, contours, planeSource1s, planeSource2s, planeSource3s, plane1s, plane2s, plane3s, clipper1s, clipper2s, clipper3s, clipX, clipY, clipZ, lut\n\n print \"data: %s\" % sys.argv[1]\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(sys.argv[1])\n reader.Update()\n \n print \"gradientmag: %s\" % sys.argv[2]\n gmreader = vtk.vtkStructuredPointsReader()\n gmreader.SetFileName(sys.argv[2])\n gmreader.Update()\n \n continuousSize = 0\n continuousType = \"HSV\"\n continuousData = []\n \n print \"params: %s\" % sys.argv[3]\n loadParamsFile(sys.argv[3])\n \n clipX = 0\n clipY = 0\n clipZ = 0\n \n r = reader.GetOutput().GetScalarRange()\n datamin = r[0]\n datamax = r[1]\n\n for i in range(4, len(sys.argv)):\n if sys.argv[i] == \"--clip\":\n print \"clip (%s,%s,%s)\" % (sys.argv[i+1],sys.argv[i+2],sys.argv[i+3])\n clipX = float(sys.argv[i+1])\n clipY = float(sys.argv[i+2])\n clipZ = float(sys.argv[i+3])\n \n clipperActors = []\n planeSource1s = []\n planeSource2s = []\n planeSource3s = []\n plane1s = []\n plane2s = []\n plane3s = []\n clipper1s = []\n clipper2s = []\n clipper3s = []\n\n for i in range(0,len(continuousData)):\n contours = vtk.vtkContourFilter()\n contours.SetInputConnection(reader.GetOutputPort());\n contours.ComputeNormalsOn()\n contours.SetValue(0, float(continuousData[i][0]))\n\n planeSource1s.append(vtk.vtkPlaneSource())\n planeSource1s[i].SetNormal(1,0,0)\n planeSource1s[i].SetOrigin(clipX,0,0)\n plane1s.append(vtk.vtkPlane())\n plane1s[i].SetNormal(planeSource1s[i].GetNormal())\n plane1s[i].SetOrigin(planeSource1s[i].GetOrigin())\n clipper1s.append(vtk.vtkClipPolyData())\n clipper1s[i].SetClipFunction(plane1s[i])\n clipper1s[i].SetInputConnection(contours.GetOutputPort())\n clipper1s[i].Update()\n\n planeSource2s.append(vtk.vtkPlaneSource())\n planeSource2s[i].SetNormal(0,1,0)\n planeSource2s[i].SetOrigin(0,clipY,0)\n plane2s.append(vtk.vtkPlane())\n plane2s[i].SetNormal(planeSource2s[i].GetNormal())\n plane2s[i].SetOrigin(planeSource2s[i].GetOrigin())\n clipper2s.append(vtk.vtkClipPolyData())\n clipper2s[i].SetClipFunction(plane2s[i])\n clipper2s[i].SetInputConnection(clipper1s[i].GetOutputPort())\n clipper2s[i].Update()\n\n planeSource3s.append(vtk.vtkPlaneSource())\n planeSource3s[i].SetNormal(0,0,1)\n planeSource3s[i].SetOrigin(0,0,clipZ)\n plane3s.append(vtk.vtkPlane())\n plane3s[i].SetNormal(planeSource3s[i].GetNormal())\n plane3s[i].SetOrigin(planeSource3s[i].GetOrigin())\n clipper3s.append(vtk.vtkClipPolyData())\n clipper3s[i].SetClipFunction(plane3s[i])\n clipper3s[i].SetInputConnection(clipper2s[i].GetOutputPort())\n clipper3s[i].Update()\n\n probeFilter = vtk.vtkProbeFilter()\n probeFilter.SetInputConnection(0, clipper3s[i].GetOutputPort())\n probeFilter.SetInputConnection(1, gmreader.GetOutputPort())\n probeFilter.Update()\n\n gmrange = probeFilter.GetOutput().GetScalarRange()\n gmin = gmrange[0]\n gmax = gmrange[1]\n\n gmclipper1 = vtk.vtkClipPolyData()\n gmclipper1.SetInputConnection(probeFilter.GetOutputPort())\n gmclipper1.InsideOutOff()\n gmclipper1.SetValue(int(continuousData[i][1]))\n gmclipper1.Update()\n\n gmclipper2 = vtk.vtkClipPolyData()\n gmclipper2.SetInputConnection(gmclipper1.GetOutputPort())\n gmclipper2.InsideOutOn()\n gmclipper2.SetValue(int(continuousData[i][2]))\n gmclipper2.Update()\n\n lut = vtk.vtkColorTransferFunction()\n if continuousType == \"HSV\":\n lut.SetColorSpaceToHSV()\n p = continuousData[i]\n print \"Color: %s\"%p\n lut.AddHSVPoint(p[0],p[3],p[4],p[5])\n elif continuousType == \"RGB\":\n lut.SetColorSpaceToRGB()\n p = continuousData[i]\n print \"Color: %s\"%p\n lut.AddRGBPoint(p[0],p[3],p[4],p[5])\n\n clipperMapper = vtk.vtkPolyDataMapper()\n clipperMapper.SetLookupTable(lut)\n clipperMapper.SetInputConnection(gmclipper2.GetOutputPort())\n\n clipperActors.append(vtk.vtkActor())\n clipperActors[i].GetProperty().SetRepresentationToWireframe()\n clipperActors[i].SetMapper(clipperMapper)\n \n backFaces = vtk.vtkProperty()\n backFaces.SetSpecular(0)\n backFaces.SetDiffuse(0)\n backFaces.SetAmbient(0)\n backFaces.SetAmbientColor(1,0,0)\n clipperActors[i].SetBackfaceProperty(backFaces)\n\n lut_color = vtk.vtkColorTransferFunction()\n for d in continuousData:\n if continuousType == \"HSV\":\n lut_color.SetColorSpaceToHSV()\n lut_color.AddHSVPoint(d[0],d[3],d[4],d[5])\n elif continuousType == \"RGB\":\n lut_color.SetColorSpaceToRGB()\n lut_color.AddRGBPoint(d[0],d[3],d[4],d[5])\n\n colorMapper = vtk.vtkPolyDataMapper()\n colorMapper.SetLookupTable(lut_color)\n\n colorBar = vtkScalarBarActor()\n colorBar.SetLookupTable(colorMapper.GetLookupTable())\n colorBar.SetTitle(\"isovalue\")\n colorBar.SetNumberOfLabels(6)\n colorBar.SetLabelFormat(\"%4.0f\")\n colorBar.SetPosition(0.9, 0.1)\n colorBar.SetWidth(0.1)\n colorBar.SetHeight(0.7)\n\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n for i in range(0,len(clipperActors)):\n clipperActors[i].GetProperty().SetOpacity(continuousData[i][6]) # load opacity for actor\n ren.AddActor(clipperActors[i])\n ren.AddActor(colorBar)\n\n # for depth peeling\n ren.SetUseDepthPeeling(1)\n ren.SetMaximumNumberOfPeels(2) # default 4\n ren.SetOcclusionRatio(0.1) # default 0\n\n ren.ResetCamera()\n ren.SetBackground(0.2,0.3,0.4)\n ren.ResetCameraClippingRange()\n\n # for depth peeling\n renWin.SetAlphaBitPlanes(1)\n renWin.SetMultiSamples(0)\n\n renWin.SetSize(1200, 600)\n\n clipXSlider = vtk.vtkSliderRepresentation2D()\n clipXSlider.SetMinimumValue(0)\n clipXSlider.SetMaximumValue(300)\n clipXSlider.SetValue(clipX)\n clipXSlider.SetTitleText(\"X\")\n clipXSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint1Coordinate().SetValue(0.0, 0.3)\n clipXSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipXSlider.GetPoint2Coordinate().SetValue(0.2, 0.3)\n clipXSlider.SetSliderLength(0.02)\n clipXSlider.SetSliderWidth(0.03)\n clipXSlider.SetEndCapLength(0.01)\n clipXSlider.SetEndCapWidth(0.03)\n clipXSlider.SetTubeWidth(0.005)\n clipXSlider.SetLabelFormat(\"%1.2lf\")\n clipXSlider.SetTitleHeight(0.02)\n clipXSlider.SetLabelHeight(0.02)\n SliderWidget2 = vtk.vtkSliderWidget()\n SliderWidget2.SetInteractor(iren)\n SliderWidget2.SetRepresentation(clipXSlider)\n SliderWidget2.KeyPressActivationOff()\n SliderWidget2.SetAnimationModeToAnimate()\n SliderWidget2.SetEnabled(True)\n SliderWidget2.AddObserver(\"InteractionEvent\", clipXSliderHandler)\n\n clipYSlider = vtk.vtkSliderRepresentation2D()\n clipYSlider.SetMinimumValue(0)\n clipYSlider.SetMaximumValue(300)\n clipYSlider.SetValue(clipY)\n clipYSlider.SetTitleText(\"Y\")\n clipYSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint1Coordinate().SetValue(0.0, 0.2)\n clipYSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipYSlider.GetPoint2Coordinate().SetValue(0.2, 0.2)\n clipYSlider.SetSliderLength(0.02)\n clipYSlider.SetSliderWidth(0.03)\n clipYSlider.SetEndCapLength(0.01)\n clipYSlider.SetEndCapWidth(0.03)\n clipYSlider.SetTubeWidth(0.005)\n clipYSlider.SetLabelFormat(\"%1.2lf\")\n clipYSlider.SetTitleHeight(0.02)\n clipYSlider.SetLabelHeight(0.02)\n SliderWidget3 = vtk.vtkSliderWidget()\n SliderWidget3.SetInteractor(iren)\n SliderWidget3.SetRepresentation(clipYSlider)\n SliderWidget3.KeyPressActivationOff()\n SliderWidget3.SetAnimationModeToAnimate()\n SliderWidget3.SetEnabled(True)\n SliderWidget3.AddObserver(\"InteractionEvent\", clipYSliderHandler)\n\n clipZSlider = vtk.vtkSliderRepresentation2D()\n clipZSlider.SetMinimumValue(0)\n clipZSlider.SetMaximumValue(300)\n clipZSlider.SetValue(clipZ)\n clipZSlider.SetTitleText(\"Z\")\n clipZSlider.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint1Coordinate().SetValue(0.0, 0.1)\n clipZSlider.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()\n clipZSlider.GetPoint2Coordinate().SetValue(0.2, 0.1)\n clipZSlider.SetSliderLength(0.02)\n clipZSlider.SetSliderWidth(0.03)\n clipZSlider.SetEndCapLength(0.01)\n clipZSlider.SetEndCapWidth(0.03)\n clipZSlider.SetTubeWidth(0.005)\n clipZSlider.SetLabelFormat(\"%1.2lf\")\n clipZSlider.SetTitleHeight(0.02)\n clipZSlider.SetLabelHeight(0.02)\n SliderWidget4 = vtk.vtkSliderWidget()\n SliderWidget4.SetInteractor(iren)\n SliderWidget4.SetRepresentation(clipZSlider)\n SliderWidget4.KeyPressActivationOff()\n SliderWidget4.SetAnimationModeToAnimate()\n SliderWidget4.SetEnabled(True)\n SliderWidget4.AddObserver(\"InteractionEvent\", clipZSliderHandler)\n\n # Render\n iren.Initialize()\n renWin.Render()\n iren.Start()\n\nif __name__==\"__main__\":\n Main()" }, { "alpha_fraction": 0.7879503965377808, "alphanum_fraction": 0.8163024187088013, "avg_line_length": 69.54166412353516, "blob_id": "c459d92930e3166c43f7aed370f613a2d4c015d4", "content_id": "450da6ec40b4911f247a0463946b72dd372c270f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1693, "license_type": "no_license", "max_line_length": 281, "num_lines": 24, "path": "/README.md", "repo_name": "tomelf/CS530-Projects-Scientific-Visualization", "src_encoding": "UTF-8", "text": "# CS530-Projects-Scientific-Visualization\nPython projects for scientific visualization with [VTK](https://www.vtk.org/).\n\n\n## Project 1, 2 - Interpolation and Color Visualization\nThe two projects involve exploring interpolation schemes and the use of color mapping to visualize two-dimensional scalar datasets. \n\n[Project 1 Report](https://github.com/tomelf/CS530-Projects-Scientific-Visualization/blob/master/project1/yang798.pdf) \n[Project 2 Report](https://github.com/tomelf/CS530-Projects-Scientific-Visualization/blob/master/project2/yang798.pdf)\n\n## Project 3 - Isosurfaces\nThe project considers isosurfaces to visualize three-dimensional scalar datasets. Other topics covered in this context are transparency, clipping, and multi-dimensional transfer functions. \n\n[Project 3 Report](https://github.com/tomelf/CS530-Projects-Scientific-Visualization/blob/master/project3/yang798.pdf)\n\n## Project 4 - Volume Rendering and Transfer Functions\nThe topic of the project is direct volume rendering. A transfer function design approach is presented that exploits the close connection between isosurfacing and volume rendering. \n\n[Project 4 Report](https://github.com/tomelf/CS530-Projects-Scientific-Visualization/blob/master/project4/yang798.pdf)\n\n## Project 5 - Vector Field Visualization\nThe project is concerned with flow visualization. Your task consists in applying the main vector field visualization techniques (glyphs, streamlines, and stream surfaces) to a CFD velocity dataset. An optional task that counts toward extra credit is on tensor field visualization. \n\n[Project 5 Report](https://github.com/tomelf/CS530-Projects-Scientific-Visualization/blob/master/project5/yang798.pdf)\n" } ]
13
somchaisomph/pymraa
https://github.com/somchaisomph/pymraa
bf6b2c36be863319cd6a74ba440a807d40b93878
34e7ef69a4b065e75a487c29bbb7bd7147343fd8
8717d004660f3680b063021a7cceeeb2a2b515ff
refs/heads/master
2021-05-03T16:56:01.488371
2016-10-27T06:10:52
2016-10-27T06:10:52
72,001,234
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.692307710647583, "avg_line_length": 16.33333396911621, "blob_id": "26bcfe9995ded537061d4e2964c5664a35cd23d0", "content_id": "f514ba1290d3384b3268a9c42049a28b18bd2768", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 104, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/README.md", "repo_name": "somchaisomph/pymraa", "src_encoding": "UTF-8", "text": "# pymraa\ncollection of python3 mraa\n<ul>\n<li>LED blink.</li>\n<li>Simple switch with asyncio.</li>\n</ul>\n" }, { "alpha_fraction": 0.6728624701499939, "alphanum_fraction": 0.6926889419555664, "avg_line_length": 32.58333206176758, "blob_id": "554e705cc9880be929afeddd0a0e07c4674e75b4", "content_id": "83d5a890f38402df7f63654a364c9774999dc967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 93, "num_lines": 24, "path": "/asynchronous/switch_with_asyncio.py", "repo_name": "somchaisomph/pymraa", "src_encoding": "UTF-8", "text": "import mraa\nimport asyncio\n#Tested with only python 3.4 installed on Raspbian\[email protected]\ndef operate():\n global sw, led\n state = sw.read() # sw waits for signal flowing from pin 1 every time switch is pressed\n led.write(state)\n yield from asyncio.sleep(0.1)\n asyncio.async(operate())\n \nif __name__ == \"__main__\":\n sw = mraa.Gpio(12) # attach pin 12 to one side of switch and pin 1 (3.3v) to another side .\n led = mraa.Gpio(40) # attach pin 40 to LED\n sw.dir(mraa.DIR_IN) \n led.dir(mraa.DIR_OUT) \n loop = asynio.get_event_loop()\n try:\n asyncio.async(operate()) #prepare calling operate function\n loop.run_for_ever() # enter eternal loop \n except KeyboardInterrupt : # loop is broken when user press Ctrl-C\n pass\n finally:\n loop.close() # finally loop is completely closed.\n\n" }, { "alpha_fraction": 0.446373850107193, "alphanum_fraction": 0.45965269207954407, "avg_line_length": 24.763158798217773, "blob_id": "0438d98f19fc7b8f8b7952dc4d33a73e2f0c7eb0", "content_id": "d7aaf271c37ea2d1911edaee0620df23f207c3c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 979, "license_type": "no_license", "max_line_length": 48, "num_lines": 38, "path": "/light/led.py", "repo_name": "somchaisomph/pymraa", "src_encoding": "UTF-8", "text": "import mraa\nimport time\nimport threading\n\nclass LED(threading.Thread):\n def __init__(self,pin_number):\n threading.Thread.__init__(self)\n try:\n self._led = mraa.Gpio(pin_number)\n self._led.dir(mraa.DIR_OUT)\n self.stop_flag = False\n except :\n raise\n \n def run(self):\n self.stop_flag = False\n while not self.stop_flag :\n pass\n \n def blink(self,interval=0.1,times=2):\n for i in range(times):\n self._led.write(1)\n time.sleep(interval)\n self._led.write(0)\n time.sleep(interval)\n \n def end(self):\n self.stop_flag = True\n self.join()\n \n \n \nif __name__ == \"__main__\":\n led = LED(37) # connect pin 37 to LED\n led.start()\n for i in range(5):\n led.blink(interval=0.5,times=5) \n led.end()\n" } ]
3
Archideus/django-storages
https://github.com/Archideus/django-storages
2e93fed90f6774174b18369a1c905e8e8a5b2366
043b91b10ebfebdf7e752d743ae630f0daa2c4f4
fb465e12775e7906526c4eca05a9f45d3ac8abcb
refs/heads/master
2023-02-23T09:24:40.060461
2017-03-13T19:55:29
2017-03-13T19:55:29
86,492,127
0
0
BSD-3-Clause
2017-03-28T18:12:10
2017-03-28T18:12:11
2023-02-14T17:10:18
Python
[ { "alpha_fraction": 0.7099099159240723, "alphanum_fraction": 0.7099099159240723, "avg_line_length": 33.6875, "blob_id": "1340894e95777aed0e6894a77eabeac3270e1e06", "content_id": "2e804b25e313e01f58c8e6ea388ce52612c2282f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "permissive", "max_line_length": 60, "num_lines": 16, "path": "/tests/test_utils.py", "repo_name": "Archideus/django-storages", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom storages import utils\n\n\nclass SettingTest(TestCase):\n def test_get_setting(self):\n value = utils.setting('SECRET_KEY')\n self.assertEqual(settings.SECRET_KEY, value)\n\n def test_setting_unfound(self):\n self.assertIsNone(utils.setting('FOO'))\n self.assertEqual(utils.setting('FOO', 'bar'), 'bar')\n with self.assertRaises(ImproperlyConfigured):\n utils.setting('FOO', strict=True)\n" } ]
1
hitmoon/ML
https://github.com/hitmoon/ML
548ba4b730ecb2572d839c0aba4746e83f755e24
c3f406f486af8e9392f8a2cbdda9b8b1fa55a722
b9fae84a2345810c3210c830222d8b754c69c351
refs/heads/master
2020-03-28T19:59:03.678943
2017-10-29T02:35:56
2017-10-29T02:35:56
94,605,866
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.606049120426178, "alphanum_fraction": 0.6332703232765198, "avg_line_length": 30.867469787597656, "blob_id": "1f82aa9c7773e86f166b5110deaefe672fa94959", "content_id": "1bd412d6ecc62fa14fea0f37994c5ea76784f7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8671, "license_type": "no_license", "max_line_length": 135, "num_lines": 249, "path": "/tensorflow/dogs/rec-dog-2.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "#/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport glob\nfrom itertools import groupby\nfrom collections import defaultdict\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom tensorflow.contrib.layers.python.layers import fully_connected, convolution2d\n\nimport tensorflow as tf\nimport sys\n\nimage_dir='/home/hitmoon/DOGS-images/Images'\nimage_filenames = glob.glob('{dir}/{glob}'.format(dir=image_dir, glob='n02*/*.jpg'))\n#print(image_filenames[0:2])\n\ntrain_glob = './output/training-images/*.tfrecords'\ntest_glob = './output/testing-images/*.tfrecords'\n\n# 依据品种对图像进行分组\ndef group_image(filenames):\n\n training_dataset = defaultdict(list)\n testing_dataset = defaultdict(list)\n\n # 将文件名分解为品种和相应的文件名,品种对应于文件夹名称\n image_filename_with_breed = map(lambda filename:\n (filename.split('/')[5], filename), filenames)\n #print(image_filename_with_breed)\n\n for dog_breed, breed_images in groupby(image_filename_with_breed, lambda x: x[0]):\n # 将每个品种的20% 划入到测试集中\n for i, breed_images in enumerate(breed_images):\n if i % 5 == 0:\n testing_dataset[dog_breed].append(breed_images[1])\n else:\n training_dataset[dog_breed].append(breed_images[1])\n\n\n # 检查每个品种的测试集图像是否至少有全部图像的18%\n breed_training_count = len(training_dataset[dog_breed])\n breed_testing_count = len(testing_dataset[dog_breed])\n assert round(breed_testing_count / (breed_training_count + breed_testing_count), 2) > 0.18, \"Not enough testing images.\"\n return training_dataset, testing_dataset\n\n#print(testing_dataset['n02085620-Chihuahua'])\n\n\ndef write_records_file(dataset, record_location):\n '''\n 用dataset中的图像填充一个TFRecord文件,并将类别包含进来\n\n 参数:\n dataset : dict(list)\n 这个字典的键对应于其值中文件名列表对应的标签\n \n record_location: str\n 存储TFRecord的输出路径\n '''\n\n current_index = 0\n writer = None\n # 枚举dataset, 因为当前索引用于对文件的划分,每隔100幅图像,训练样本的信息就被写入一个新的TFRecord文件中,以加快操作进程\n for breed, images_filenames in dataset.items():\n for image_filename in images_filenames:\n if current_index % 100 == 0:\n if writer:\n writer.close()\n record_filename = '{record_location}.{index}.tfrecords'.format(record_location = record_location, index= current_index)\n print(\"record_filename = \", record_filename)\n writer = tf.python_io.TFRecordWriter(record_filename)\n\n # 利用PIL 打开文件\n image = Image.open(image_filename)\n\n # 转换为灰度图会减少计算量和内存占用,但这并不是必须的\n image = image.convert('L')\n\n image_bytes = image.resize((250,151)).tobytes()\n # 将标签按照字符串存储比较高效\n image_label = breed.encode('utf-8')\n\n example = tf.train.Example(features = tf.train.Features(feature = {\n 'label':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_label])),\n 'image':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))}))\n writer.write(example.SerializeToString())\n\n current_index += 1\n if writer:\n writer.close()\n\n\n\ndef encode_to_tf_records():\n\n training_dataset, testing_dataset = group_image(image_filenames)\n write_records_file(testing_dataset, './output/testing-images/testing-image')\n write_records_file(training_dataset, './output/training-images/training-image')\n\n\ndef decode_tf_records(glob_pat):\n # 读取保存的TFRecord记录文件\n print(\"reading TFRecords files ...\")\n filenames = glob.glob(glob_pat)\n filename_queue = tf.train.string_input_producer(filenames)\n reader = tf.TFRecordReader()\n _, serialized = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized,\n features={\n 'label': tf.FixedLenFeature([], tf.string),\n 'image': tf.FixedLenFeature([], tf.string),\n })\n\n record_image = tf.decode_raw(features['image'], tf.uint8)\n # 修改图像有助于训练和输出的可视化\n image = tf.reshape(record_image, [250, 151, 1])\n label = tf.cast(features['label'], tf.string)\n print(\"get image and label\")\n print(\"label = \", label)\n return image,label\n\ndef get_batch(batch_size, glob_pat):\n\n image, label = decode_tf_records(glob_pat)\n\n # 获取一个batch\n print(\"get a batch\")\n image_batch, label_batch = tf.train.shuffle_batch([image, label], batch_size = batch_size,\n capacity = 50000, min_after_dequeue = 10000, num_threads = 3)\n\n # 将图像转换为灰度值位于[0,1)的浮点数,与convolution2d 期望的输入匹配\n float_image_batch = tf.image.convert_image_dtype(image_batch, tf.float32)\n\n return float_image_batch, label_batch\n\ndef weight_variable(shape):\n value = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(value)\n\ndef bias_variable(shape):\n value = tf.constant(0.1, shape=shape)\n return tf.Variable(value)\n\ndef conv2d(x, W, strides):\n return tf.nn.conv2d(x, W, strides=strides, padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\ndef rec_cnn(input_image):\n print(\"string define the graph...\")\n\n # 卷积层1\n W_c1 = weight_variable([5, 5, 1, 32])\n b_c1 = bias_variable([32])\n\n h_c1 = tf.nn.relu(conv2d(input_image, W_c1, [1, 2, 2, 1]) + b_c1)\n h_pool1 = max_pool_2x2(h_c1)\n\n # 卷积层2\n W_c2 = weight_variable([5, 5, 32, 64])\n b_c2 = bias_variable([64])\n\n h_c2 = tf.nn.relu(conv2d(h_pool1, W_c2, [1, 1, 1, 1]) + b_c2)\n h_pool2 = max_pool_2x2(h_c2)\n\n # densely layer\n W_fc1 = weight_variable([32 * 19 * 64, 512])\n b_fc1 = bias_variable([512])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 32 * 19 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # 对一些神经元进行dropout,削减他们在模型中的重要性\n h_fc1_drop = tf.nn.dropout(h_fc1, 0.1)\n\n # 输出是前面的层与训练中可用的120个不同狗品种的全连接\n W_fc2 = weight_variable([512, 120])\n b_fc2 = bias_variable([120])\n \n final = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n print(\"graph is ready\")\n return final\n\n\ndef train_rec_dog():\n\n # encode_to_tf_records()\n\n # 训练数据\n image_batch, label_batch = get_batch(10, train_glob)\n\n # 找出所有狗的品种\n labels = list(map(lambda c: c.split('/')[-1], glob.glob('{dir}/*'.format(dir=image_dir))))\n # 匹配每个来自label_batch的标签并返回他们在类别列表中的索引\n train_labels = tf.map_fn(lambda l: tf.where(tf.equal(labels, l))[0, 0:1][0], label_batch, dtype=tf.int64)\n\n out = rec_cnn(image_batch)\n\n # 定义损失函数\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=train_labels, logits = out))\n\n train_op = tf.train.AdamOptimizer(1e-4).minimize(loss)\n\n print('train_labels shape =', train_labels.get_shape())\n print('fully_connected shape =', out.get_shape())\n predict = tf.argmax(out, 1)\n corr = tf.equal(train_labels, predict)\n acc = tf.reduce_mean(tf.cast(corr, tf.float32))\n\n # 测试数据\n test_image_batch, test_label_batch = get_batch(10, test_glob)\n test_labels = tf.map_fn(lambda l: tf.where(tf.equal(labels, l))[0, 0:1][0], test_label_batch, dtype=tf.int64)\n test_out = rec_cnn(test_image_batch)\n\n test_predict = tf.argmax(test_out, 1)\n test_corr = tf.equal(test_labels, test_predict)\n test_acc = tf.reduce_mean(tf.cast(test_corr, tf.float32))\n\n steps = 100000\n\n print(\"start training ...\")\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess = sess, coord = coord)\n\n for step in range(steps):\n #print('training ...')\n sess.run(train_op)\n if step % 10 == 0:\n _acc, _test_acc, _loss = sess.run([acc, test_acc, loss])\n print(\"step: %d, loss: %9f, train accuracy: %9f, test accuracy: %9f\" % (step, _loss, _acc, _test_acc))\n\n coord.request_stop()\n coord.join(threads)\n print(\"train done!\")\n sess.close()\n\n\nif __name__ == '__main__':\n print(\"start .....\")\n train_rec_dog()\n" }, { "alpha_fraction": 0.5626477599143982, "alphanum_fraction": 0.6406619548797607, "avg_line_length": 21.263158798217773, "blob_id": "91b99665e326a7a312a9c16aec36e73cbf274851", "content_id": "7ee58f2534c1a341becd304202ec3a448501ad9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/tensorflow/line-plot.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n# 1000 random points around y= 0.1x + 0.3\n\nnum_points = 1000\nvectors_set = []\nfor i in range(num_points):\n x1 = np.random.normal(0.0, 0.55)\n y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)\n vectors_set.append([x1,y1])\n\n# generate samples\n\nx_data = [v[0] for v in vectors_set]\ny_data = [v[1] for v in vectors_set]\n\nplt.scatter(x_data, y_data, c='r')\nplt.show()\n" }, { "alpha_fraction": 0.5820770263671875, "alphanum_fraction": 0.623115599155426, "avg_line_length": 22.39215660095215, "blob_id": "051468b7b384e06c5964ff49f196e03b728c1172", "content_id": "3810775e1a36d960359303cb6c52daf35829f14e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 79, "num_lines": 51, "path": "/tensorflow/line-reg.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\n\n# 1000 random points around y= 0.1x + 0.3\n\nnum_points = 1000\nvectors_set = []\nfor i in range(num_points):\n x1 = np.random.normal(0.0, 0.55)\n y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)\n vectors_set.append([x1,y1])\n\n# generate samples\n\nx_data = [v[0] for v in vectors_set]\ny_data = [v[1] for v in vectors_set]\n\n\n# 生成一维的 w 矩阵, 取值是[-1, 1]之间的随机数\nW = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name = 'W')\n# 生成一维的 b 矩阵, 初始值是0\nb = tf.Variable(tf.zeros(1), name = 'b')\n# 经过计算得出预估值 y\ny = W * x_data + b\n\n# 以预估值y 和 y_data 之间的均方误差作为损失\nloss = tf.reduce_mean(tf.square(y - y_data), name = 'loss')\n\n# 采用梯度下降法来优化参数\noptimizer = tf.train.GradientDescentOptimizer(0.5)\n\n# 训练的过程就是最小化这个误差值\ntrain = optimizer.minimize(loss, name = 'train')\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# 输出初始的W 和 b 值 以及 loss\nprint ('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss))\n\n\n# 执行20次训练\nfor step in range(20):\n sess.run(train)\n # 输出 W, b, loss\n print ('W = ', sess.run(W), 'b = ', sess.run(b), 'loss = ', sess.run(loss))\n\n# writer = tf.train.SummaryWriter(\"./\", sess.graph)\n" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6411764621734619, "avg_line_length": 21.66666603088379, "blob_id": "65179b6805a705b762d56d3fd91a9eca3e78365b", "content_id": "7208edb5dc4c4c578fcb35dfe23cb2685287fb05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/tensorflow/train-saver.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n\nw = tf.Variable([[0.5, 1.0]])\nx = tf.Variable([[2.0], [1.0]])\ny = tf.matmul(w, x)\n\ninit_op = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(init_op)\n\n save_path = saver.save(sess, \"/Users/hitmoon/ML/model_save\")\n print (\"modele saved in file\", save_path)\n" }, { "alpha_fraction": 0.5522294640541077, "alphanum_fraction": 0.586172342300415, "avg_line_length": 29.465648651123047, "blob_id": "3242c26818bc9ce9d465b8b73baa11bd32f009c2", "content_id": "1d94c24b576dbcc9429e8da23c64b67b334d048b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8264, "license_type": "no_license", "max_line_length": 111, "num_lines": 262, "path": "/tensorflow/captcha-CNN.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom captcha.image import ImageCaptcha\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport random\n\nimport tensorflow as tf\nimport sys\n\n# 验证码字符\nnumber = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\ncharset = number\n\nCHAR_SET_LEN = len(charset)\nIMAGE_HEIGHT = 60\nIMAGE_WITDH = 160\nMAX_CAPTCHA = 4\n\n# 生成长度为4的随机字符序列\ndef random_captcha_text(charset = number, captcha_size = 4):\n captcha_text = []\n for i in range(captcha_size):\n c = random.choice(charset)\n captcha_text.append(c)\n\n return captcha_text\n\n# 生成对应的验证码\ndef gen_captcha_text_and_image():\n image = ImageCaptcha()\n\n captcha_text = random_captcha_text()\n captcha_text = ''.join(captcha_text)\n\n captcha = image.generate(captcha_text)\n\n captcha_image = Image.open(captcha)\n captcha_image = np.array(captcha_image)\n return captcha_text, captcha_image\n\n\n# 把彩色图转换为灰度图\ndef convert2gray(img):\n if len(img.shape) > 2:\n gray = np.mean(img, -1)\n '''\n 正规做法如下, 上面做法较快\n r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n '''\n return gray\n else:\n return img\n\n# 文本转向量\ndef text2vec(text):\n text_len = len(text)\n if (text_len > MAX_CAPTCHA):\n raise ValueError('验证码最长4个字符')\n\n vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)\n\n for i, c in enumerate(text):\n idx = i * CHAR_SET_LEN + int(c)\n vector[idx] = 1\n\n return vector\n\n\n# 向量转文本\ndef vec2text(vec):\n char_pos = vec.nonzero()[0]\n text = []\n for i, c in enumerate(char_pos):\n char_idx = c % CHAR_SET_LEN\n text.append(str(char_idx))\n\n return \"\".join(text)\n\n# 生成一个训练的batch\ndef get_next_batch(batch_size = 128):\n batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WITDH])\n batch_y = np.zeros([batch_size, MAX_CAPTCHA * CHAR_SET_LEN])\n\n # 有时生成的图像大小不是(60 * 160 * 3)\n def wrap_gen_captcha_text_and_image():\n while True:\n text, image = gen_captcha_text_and_image()\n if image.shape == (60, 160, 3):\n return text, image\n\n for i in range(batch_size):\n text, image = wrap_gen_captcha_text_and_image()\n image = convert2gray(image)\n batch_x[i,:] = image.flatten() / 255\n batch_y[i,:] = text2vec(text)\n\n return batch_x, batch_y\n\n\n\nX = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT * IMAGE_WITDH], name = 'x_input')\nY = tf.placeholder(tf.float32, [None, MAX_CAPTCHA * CHAR_SET_LEN], name = 'y_input')\nkeepratio = tf.placeholder(tf.float32)\n\n# CNN 定义\ndef crack_captcha_cnn(w_alpha = 0.01, b_alpha = 0.1):\n x = tf.reshape(X, shape = [-1, IMAGE_HEIGHT, IMAGE_WITDH, 1])\n\n # layer 1\n w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32]))\n #tf.summary.histogram('h1/weigght', w_c1)\n b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))\n #tf.summary.histogram('h1/bias', b_c1)\n conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))\n pool1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n dr1 = tf.nn.dropout(pool1, keepratio)\n\n\n # layer 2\n w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))\n #tf.summary.histogram('h2/weigght', w_c2)\n b_c2 = tf.Variable(b_alpha * tf.random_normal([64]))\n #tf.summary.histogram('h2/bias', b_c2)\n conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(dr1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))\n pool2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n dr2 = tf.nn.dropout(pool2, keepratio)\n\n # layer 3\n w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))\n #tf.summary.histogram('h3/weigght', w_c3)\n b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))\n #tf.summary.histogram('h3/bias', b_c3)\n conv3= tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(dr2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))\n pool3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n dr3 = tf.nn.dropout(pool3, keepratio)\n\n # fully connected layer\n w_d = tf.Variable(w_alpha * tf.random_normal([8 * 32 * 40, 1024]))\n #tf.summary.histogram('fc/weigght', w_d)\n b_d = tf.Variable(b_alpha * tf.random_normal([1024]))\n #tf.summary.histogram('fc/bias', b_d)\n dense = tf.reshape(dr3, [-1, w_d.get_shape().as_list()[0]])\n dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))\n dense = tf.nn.dropout(dense, keepratio)\n\n w_out = tf.Variable(w_alpha * tf.random_normal([1024, MAX_CAPTCHA * CHAR_SET_LEN]))\n #tf.summary.histogram('output/weigght', w_out)\n b_out = tf.Variable(b_alpha * tf.random_normal([MAX_CAPTCHA * CHAR_SET_LEN]))\n #tf.summary.histogram('output/bias', b_out)\n out = tf.add(tf.matmul(dense, w_out), b_out)\n return out\n\n# 训练\ndef train_crack_captcha_cnn():\n\n output = crack_captcha_cnn()\n\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = output, labels = Y))\n tf.summary.scalar('loss', loss)\n\n optimizer = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(loss)\n\n predict = tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])\n max_idx_p = tf.argmax(predict, 2)\n max_idx_l = tf.argmax(tf.reshape(Y, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)\n correct_pred = tf.equal(max_idx_p, max_idx_l)\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar('accuracy', acc)\n\n saver = tf.train.Saver()\n\n init = tf.global_variables_initializer()\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"tb\", sess.graph)\n\n # 开始运行\n sess.run(init)\n\n step = 0\n while True:\n print(\"step: %d \" % step)\n batch_x, batch_y = get_next_batch(64)\n _, result = sess.run([optimizer, merged], feed_dict = {X: batch_x, Y: batch_y, keepratio: 0.75})\n\n writer.add_summary(result, step)\n # 每迭代10次输出一次loss\n '''\n if step % 10 == 0:\n print(\"step: %d, loss: %03f\" % (step, loss_))\n '''\n\n # 每100 step计算一次准确率\n if step % 1000 == 0:\n batch_x_test, batch_y_test = get_next_batch(100)\n acc_ = sess.run(acc, feed_dict = {X: batch_x_test, Y: batch_y_test, keepratio: 1.})\n print(\"accuracy: %09f\" % acc_)\n\n # 如果准确率大于 50 %, 保存模型, 完成训练\n if acc_ > 0.96:\n print(\"accuracy: %09f\" % acc_)\n saver.save(sess, \"crack_captcha.model\", global_step = step)\n break\n\n step += 1\n\n# 用模型识别验证码\ndef crack_captcha(captcha_image):\n output = crack_captcha_cnn()\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, './crack_captcha.model-2500')\n\n predict = tf.argmax(tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)\n text_list = sess.run(predict, feed_dict = {X: [captcha_image], keepratio:1})\n text = text_list[0].tolist()\n return text\n\nif __name__ == '__main__':\n\n '''\n text, image = gen_captcha_text_and_image()\n\n f = plt.figure()\n ax = f.add_subplot(111)\n ax.text(0.1, 0.9, text, ha = 'center', va = 'center', transform = ax.transAxes)\n plt.imshow(image)\n plt.show()\n\n print(\"验证码图像Channel:\", image.shape)\n\n vector = text2vec(text)\n print (vector)\n t = vec2text(vector)\n print (t)\n\n\n print(\"string training ...\")\n train_crack_captcha_cnn();\n print(\"traing finished !\")\n '''\n if len(sys.argv) == 2:\n command = 'c'\n else:\n command = 't'\n\n if command == 'c':\n print(\"continue training...\")\n train_crack_captcha_cnn();\n\n else:\n print(\"Let's test the model ...\")\n # predict\n text, image = gen_captcha_text_and_image()\n image = convert2gray(image)\n image = image.flatten() / 255\n predict_text = crack_captcha(image)\n\n print(\"正确: {}, 预测: {}\".format(text, predict_text))\n\n" }, { "alpha_fraction": 0.6389575600624084, "alphanum_fraction": 0.6542350053787231, "avg_line_length": 29.217687606811523, "blob_id": "b49de69e26d4dd574579a053e166658982056db3", "content_id": "25491ed2d4c650b8202d1acd294c1fade492f1aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4451, "license_type": "no_license", "max_line_length": 89, "num_lines": 147, "path": "/tensorflow/mnist.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nprint (\"packs loaded\")\n\nprint (\"Download and Extract MNIST dataset\")\nmnist = input_data.read_data_sets('data/', one_hot = True)\n\nprint (\"type of 'mnist' is %s \" % (type(mnist)))\nprint (\"number of train data is %d\" % (mnist.train.num_examples))\nprint (\"number of test data is %d\" % (mnist.test.num_examples))\n\n# what does the data of MNIST look like?\n\nprint (\"what does the data of MNIST look like?\")\ntrainimg = mnist.train.images\ntrainlabel = mnist.train.labels\ntestimg = mnist.test.images\ntestlabel = mnist.test.labels\n\nprint (\"type of trainimg is %s\" % (type(trainimg)))\nprint (\"type of trainlabel is %s\" % (type(trainlabel)))\nprint (\"type of testimg is %s\" % (type(testimg)))\nprint (\"type of testlabel is %s\" % (type(testlabel)))\n\n\nprint (\"shape of trainimg is %s\" % (trainimg.shape,))\nprint (\"shape of trainlabel is %s\" % (trainlabel.shape,))\nprint (\"shape of testimg is %s\" % (testimg.shape,))\nprint (\"shape of testlabel is %s\" % (testlabel.shape,))\n\n\n# How the training data look like ?\n\nprint (\"How the training data look like ?\")\nnsample = 5\nrandidx = np.random.randint(trainimg.shape[0], size=nsample)\n\nfor i in randidx:\n curr_img = np.reshape(trainimg[i, :], (28, 28)) # 28 * 28 matrix\n curr_label = np.argmax(trainlabel[i, :]) # label\n plt.matshow(curr_img, cmap=plt.get_cmap('gray'))\n plt.title(\"\" + str(i) + \"th Training data Label is \" + str(curr_label))\n \n print (\"\" + str(i) + \"th Training data Label is \" + str(curr_label)) \n plt.show()\n\n# Batch Learning ?\nprint (\"Batch learning ?\")\n\nbatch_size = 100\nbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\nprint (\"type of 'batch_xs' is %s\" % (type(batch_xs)))\nprint (\"type of 'batch_ys' is %s\" % (type(batch_ys)))\nprint (\"shape of 'batch_xs' is %s\" % (batch_xs.shape,))\nprint (\"shape of 'batch_ys' is %s\" % (batch_ys.shape,))\n\n\n# Network topologies\n\nn_hidden_1 = 256\nn_hidden_2 = 128\nn_input = 784\nn_classes = 10\n\n\n# input and output\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n# Network parameters\nstddev = 0.1\nweights = {\n 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),\n 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev))\n}\n\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\nprint (\"Network ready!\")\n\ndef multilayer_perceptron(_x, _weights, _biases):\n layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_x, _weights['w1']), _biases['b1']))\n layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))\n result = tf.matmul(layer_2, _weights['out']) + _biases['out']\n return result\n\n\n# prediction\npred = multilayer_perceptron(x, weights, biases)\n\n# loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))\noptm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)\ncorr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccr = tf.reduce_mean(tf.cast(corr, \"float\"))\n\n# initializer\n\ninit = tf.global_variables_initializer()\nprint (\"functions ready!\")\n\ntraining_epochs = 20\nbatch_size = 100\ndisplay_step = 4\n\n# launch the graph\nsess = tf.Session()\nsess.run(init)\n\n# optimize\n\nprint (\"start optimization ...\")\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n\n # iteration\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n feeds = {x: batch_xs, y: batch_ys}\n sess.run(optm, feed_dict = feeds)\n avg_cost += sess.run(cost, feed_dict = feeds)\n\n avg_cost = avg_cost / total_batch\n\n # display\n\n if (epoch + 1) % display_step == 0:\n print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost))\n feeds = {x: batch_xs, y: batch_ys}\n train_acc = sess.run(accr, feed_dict = feeds)\n print (\"training accuracy: %.3f\" % train_acc)\n feeds = {x: mnist.test.images, y: mnist.test.labels}\n test_acc = sess.run(accr, feed_dict = feeds)\n print (\"test accuracy: %.3f\" % test_acc)\n\n\nprint (\"optimization finished!\")\n \n" }, { "alpha_fraction": 0.5832498669624329, "alphanum_fraction": 0.6223201751708984, "avg_line_length": 33.65972137451172, "blob_id": "87c2adcfa8ea1fec7d18e9031a3f0a5a1f19b419", "content_id": "b7b86fba45d385a235a47696d803c52565e328ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4991, "license_type": "no_license", "max_line_length": 102, "num_lines": 144, "path": "/tensorflow/mnist-CNN.py", "repo_name": "hitmoon/ML", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nprint (\"packs loaded\")\n\nprint (\"Download and Extract MNIST dataset\")\nmnist = input_data.read_data_sets('data/', one_hot = True)\n\n# what does the data of MNIST look like?\n\nprint (\"what does the data of MNIST look like?\")\ntrainimg = mnist.train.images\ntrainlabel = mnist.train.labels\ntestimg = mnist.test.images\ntestlabel = mnist.test.labels\n\nprint (\"type of trainimg is %s\" % (type(trainimg)))\nprint (\"type of trainlabel is %s\" % (type(trainlabel)))\nprint (\"type of testimg is %s\" % (type(testimg)))\nprint (\"type of testlabel is %s\" % (type(testlabel)))\n\n\nprint (\"shape of trainimg is %s\" % (trainimg.shape,))\nprint (\"shape of trainlabel is %s\" % (trainlabel.shape,))\nprint (\"shape of testimg is %s\" % (testimg.shape,))\nprint (\"shape of testlabel is %s\" % (testlabel.shape,))\n\n\n# How the training data look like ?\n\nprint (\"How the training data look like ?\")\nnsample = 5\nrandidx = np.random.randint(trainimg.shape[0], size=nsample)\n\nfor i in randidx:\n curr_img = np.reshape(trainimg[i, :], (28, 28)) # 28 * 28 matrix\n curr_label = np.argmax(trainlabel[i, :]) # label\n plt.matshow(curr_img, cmap=plt.get_cmap('gray'))\n plt.title(\"\" + str(i) + \"th Training data Label is \" + str(curr_label))\n \n print (\"\" + str(i) + \"th Training data Label is \" + str(curr_label)) \n plt.show()\n\nn_input = 784\nn_output = 10\n\nweights = {\n 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev = 0.1)),\n 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.1)),\n 'wd1': tf.Variable(tf.random_normal([7 * 7 * 128, 1024], stddev = 0.1)),\n 'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev = 0.1))\n}\n\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([64], stddev = 0.1)),\n 'bc2': tf.Variable(tf.random_normal([128], stddev = 0.1)),\n 'bd1': tf.Variable(tf.random_normal([1024], stddev = 0.1)),\n 'bd2': tf.Variable(tf.random_normal([n_output], stddev = 0.1))\n}\n\n\ndef conv_basic(_input, _w, _b, _keepartio):\n # INPUT\n _input_r = tf.reshape(_input, shape = [-1, 28, 28, 1])\n\n # CONV layer 1\n _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides = [1, 1, 1, 1], padding = 'SAME')\n _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1']))\n _pool1 = tf.nn.max_pool(_conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n _pool_dr1 = tf.nn.dropout(_pool1, _keepartio)\n\n # CONV layer 2\n _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides = [1, 1, 1, 1], padding = 'SAME')\n _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2']))\n _pool2 = tf.nn.max_pool(_conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n _pool_dr2 = tf.nn.dropout(_pool2, _keepartio)\n\n # vectorize\n _densel = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])\n\n # fully connected layer 1\n _fc1 = tf.nn.relu(tf.add(tf.matmul(_densel, _w['wd1']), _b['bd1']))\n _fc1_dr1 = tf.nn.dropout(_fc1, _keepartio)\n\n # fully connected layer 2\n _out = tf.add(tf.matmul(_fc1_dr1, _w['wd2']), _b['bd2'])\n\n # return\n out = {'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1, 'pool1_dr1': _pool_dr1,\n 'conv2': _conv2, 'pool2': _pool2, 'pool_dr2': _pool_dr2, 'densel': _densel,\n 'fc1': _fc1, 'fc_dr1': _fc1_dr1, 'out': _out\n }\n return out\n\nprint(\"CNN Network ready!\")\n\nx = tf.placeholder(tf.float32, [None, n_input])\ny = tf.placeholder(tf.float32, [None, n_output])\nkeepratio = tf.placeholder(tf.float32)\n\n# functions\n_pred = conv_basic(x, weights, biases, keepratio)['out']\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = _pred, labels = y))\noptm = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(cost)\n_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1))\naccr = tf.reduce_mean(tf.cast(_corr, tf.float32))\ninit = tf.global_variables_initializer()\n\n# SAVER\nprint(\"GRAPH READY!\")\n\nsess = tf.Session()\nsess.run(init)\n\ntraining_epochs = 15\nbatch_size = 16\ndisplay_step = 1\n\nfor epoch in range(training_epochs):\n avg_cost = 0\n #total_batch = int(mnist.train.num_examples / batch_size)\n total_batch = 10\n\n # loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(optm, feed_dict = {x: batch_xs, y: batch_ys, keepratio: 0.7})\n avg_cost += sess.run(cost, feed_dict = {x: batch_xs, y: batch_ys, keepratio: 1}) / total_batch\n\n # display logs\n if epoch % display_step == 0:\n print(\"Epoch: %03d / %03d, cost: %.9f\" % (epoch, training_epochs, avg_cost))\n train_acc = sess.run(accr, feed_dict = {x: batch_xs, y: batch_ys, keepratio: 1})\n print(\"Training accuracy: %.3f\" % (train_acc))\n\n test_acc = sess.run(accr, feed_dict = {x: testimg, y: testlabel, keepratio: 1})\n print(\"Test accuracy: %.3f\" % (test_acc))\n\nprint(\"OPTIMIZATION FINISH (CNN) !\")\n" } ]
7
its-acarn/functions_lab_2_homework
https://github.com/its-acarn/functions_lab_2_homework
e56eaa79a4bae0737b9f7f292c6296596034af51
c5c665a75d3789131d47e9762fbea664cb211c2b
e39ea6fade59d9d2f585a5b6fd2a509a2efac14e
refs/heads/master
2022-12-08T18:03:39.281021
2020-09-10T13:15:54
2020-09-10T13:15:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6343825459480286, "alphanum_fraction": 0.6444713473320007, "avg_line_length": 28.5, "blob_id": "34217663475cfff832f9f1dd8284ed7742fb5dd5", "content_id": "3ac2906368ea4644b2fbd4368715a1803cb28cd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2478, "license_type": "no_license", "max_line_length": 89, "num_lines": 84, "path": "/start_code/task_list.py", "repo_name": "its-acarn/functions_lab_2_homework", "src_encoding": "UTF-8", "text": "tasks = [\n { \"description\": \"Wash Dishes\", \"completed\": False, \"time_taken\": 10 },\n { \"description\": \"Clean Windows\", \"completed\": False, \"time_taken\": 15 },\n { \"description\": \"Make Dinner\", \"completed\": True, \"time_taken\": 30 },\n { \"description\": \"Feed Cat\", \"completed\": False, \"time_taken\": 5 },\n { \"description\": \"Walk Dog\", \"completed\": True, \"time_taken\": 60 },\n]\n\n#As a user, to manage my task list I would like a program that allows me to:\n\n#1. Print a list of uncompleted tasks\ndef print_uncompleted_tasks(tasklist):\n uncompleted_tasks = []\n for task in tasklist:\n if task[\"completed\"] == False:\n uncompleted_tasks.append(task)\n print(uncompleted_tasks)\n return uncompleted_tasks\n\n\nprint_uncompleted_tasks(tasks)\nprint(\"-----------------------\")\n\n#2. Print a list of completed tasks\ndef print_completed_tasks(tasklist):\n completed_tasks = []\n for task in tasklist:\n if task[\"completed\"] == True:\n completed_tasks.append(task)\n print(completed_tasks)\n return completed_tasks\n\n\nprint_completed_tasks(tasks)\nprint(\"-----------------------\")\n\n#3. Print a list of all task descriptions\ndef print_all_task_descriptions(tasklist):\n task_descriptions = []\n for task in tasklist:\n task_descriptions.append(task[\"description\"])\n return task_descriptions\n\n\nprint(print_all_task_descriptions(tasks))\nprint(\"-----------------------\")\n\n#4. Print a list of tasks where time_taken is at least a given time\n\n#5. Print any task with a given description\n\n### Extension\n\n# 6. Given a description update that task to mark it as complete.\ndef get_task_with_description(list, description):\n for task in list:\n if task[\"description\"] == description:\n return task\n return \"Task not found\"\n \n\nprint(get_task_with_description(tasks, \"Make Dinner\"))\nprint(\"-----------------------\")\n\n# 7. Add a task to the list\n\n# ### Further Extensions\n\n# 8. Use a while loop to display the following menu and allow the use to enter an option.\n\n# ```python\n# print(\"Menu:\")\n# print(\"1: Display All Tasks\")\n# print(\"2: Display Uncompleted Tasks\")\n# print(\"3: Display Completed Tasks\")\n# print(\"4: Mark Task as Complete\")\n# print(\"5: Get Tasks Which Take Longer Than a Given Time\")\n# print(\"6: Find Task by Description\")\n# print(\"7: Add a new Task to list\")\n# print(\"M or m: Display this menu\")\n# print(\"Q or q: Quit\")\n# ```\n\n# 9. Call the appropriate function depending on the users choice.\n" } ]
1
BaconTheRabbit/Cool-idle-display
https://github.com/BaconTheRabbit/Cool-idle-display
3b0d9a620428e01c0b59df59d9713d4cc4de9afc
5ec7dd0f41fc4ab0839b0c918472259f67ca9b80
6e981533f2766868920ab59512429ba411398394
refs/heads/master
2021-01-21T10:34:43.113431
2017-05-18T13:45:39
2017-05-18T13:45:39
91,698,196
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47615063190460205, "alphanum_fraction": 0.4920502007007599, "avg_line_length": 40.67856979370117, "blob_id": "e51cf27b621de9ab9cb9e795d32bfe8c73a0bee3", "content_id": "5ac82fbc35784e670f40248ebe32bae70a4ba8ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1195, "license_type": "no_license", "max_line_length": 143, "num_lines": 28, "path": "/fun character display.py", "repo_name": "BaconTheRabbit/Cool-idle-display", "src_encoding": "UTF-8", "text": "import string\r\ndef show(chicken,linepos=0):\r\n while 1:\r\n print(chicken)\r\n if linepos < chicken.count('')-1:\r\n chicken = chicken[:linepos] + string.ascii_lowercase[string.ascii_lowercase.find(chicken[linepos])+1] + chicken[linepos+1:]\r\n linepos += 1\r\n elif linepos == chicken.count('')-1 and chicken[linepos-1] == 'z':\r\n linepos = 0\r\n while linepos < chicken.count('')-1:\r\n chicken = chicken[:linepos] + 'a' + chicken[linepos+1:]\r\n linepos += 1\r\n if linepos != chicken.count('')-1:\r\n print(chicken)\r\n ##\t\tchicken = aline(linepos)\r\n ##\t\tlinepos = 0\r\n else:\r\n linepos = 0\r\n chicken = chicken[:linepos] + string.ascii_lowercase[string.ascii_lowercase.find(chicken[linepos])+1] + chicken[linepos+1:]\r\n linepos += 1\r\n\r\ndef aline(num):\r\n\taline = ''\r\n\tfor a in range(0,num):\r\n\t\taline += 'a'\r\n\treturn aline\r\nnum = int(input(\"How many characters should the super-cool show be in? \"))\r\nshow(aline(num))\r\n" }, { "alpha_fraction": 0.8091602921485901, "alphanum_fraction": 0.8091602921485901, "avg_line_length": 64.5, "blob_id": "aa134d4d89531965886d05aa0ceb65a4fb373091", "content_id": "881acf9aa5c86c0da716111b26f3ca8cfdfb85d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 131, "license_type": "no_license", "max_line_length": 110, "num_lines": 2, "path": "/README.md", "repo_name": "BaconTheRabbit/Cool-idle-display", "src_encoding": "UTF-8", "text": "# Cool-idle-display\nMakes a cool display in the Python idle by looping through lowercase characters in a line of specified length.\n" } ]
2
soldevenus/final
https://github.com/soldevenus/final
b5c2b7c98e93c561896dd7ba096cd844b918e11d
ced937b83475b8ecf8afaa06648ece07146de7f3
fdf4c83074e2bf19a392898cd4b6607e6c2c84bd
refs/heads/master
2016-09-13T15:00:42.987945
2016-04-22T00:12:36
2016-04-22T00:12:36
56,813,246
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5940409898757935, "alphanum_fraction": 0.6983240246772766, "avg_line_length": 28.88888931274414, "blob_id": "7c121f38651f0dc99c6a830db9c33b7f4a6d2dc9", "content_id": "07b1f8ad912e3475df8691040faedd534d0bf5bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 48, "num_lines": 18, "path": "/p1.py", "repo_name": "soldevenus/final", "src_encoding": "UTF-8", "text": "def credit_card_penalty(balance_ days late):\n result=0\n if 15card_card penalty:\n result balance 15*0.05\n elif 30credit_card_penalty:\n result= credit_ card_ penalty,30*0.10\n elif 60 credit_card_penalty:\n result=credit_card_penalty,60*0.15\n elif 60 credit_card_penalty:\n result=credit_card_penalty,60*0.20\n\nprint \"penalty 1:\",credit_card_penalty(15000,18)\n\nprint \"penalty2:\",credit_card_penalty(7000,31)\n\nprint \"penalty3:\",credit_card_penalty(300,70)\n\nprint\"penalty4:\",credit_card_penalty(1000,3)" }, { "alpha_fraction": 0.4159663915634155, "alphanum_fraction": 0.5126050710678101, "avg_line_length": 7.1724138259887695, "blob_id": "0028980f2fa91ad863e92acbad0ce859a8ceb12a", "content_id": "4cf32de46f84990cafd76ce66f07990bdd75f69d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 31, "num_lines": 29, "path": "/p2.py", "repo_name": "soldevenus/final", "src_encoding": "UTF-8", "text": "L=[9,-1,10,0,-2]\nx=19\n\n\n\ndef positive_sum(L):\n for n in L:\n if n==x:\n\n\nprint\n\n\n\nL=[-1,2,14,0,0,1]\n\n\nprint\"sum L:\", positive_sum(L)\n\n\nM=[1,1,2,8,1]\n\n\nprint \"sum M:\", positive_sum(M)\n\nN=[-1,-2,-3]\n\n\nprint\"sum N:\",positive_sum(N)\n\n" } ]
2
jessedeveloperinvestor/IOT
https://github.com/jessedeveloperinvestor/IOT
92a314acb332702f43eb9b88f32f2582f00f4c71
13c04377dc5e612a86ef9c6b30982d446c5e6f1e
c620f2ba6e7df325183c73f90741927e2dd8d11e
refs/heads/main
2023-06-11T12:15:37.629664
2021-06-25T16:27:30
2021-06-25T16:27:30
372,690,753
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6564299464225769, "alphanum_fraction": 0.6737043857574463, "avg_line_length": 30.5625, "blob_id": "419bc398e246102ed47ae3a40e60b5248cb08061", "content_id": "873d20fb51a3309b090fc89d645e3e578949e1ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "permissive", "max_line_length": 71, "num_lines": 16, "path": "/arduino.py", "repo_name": "jessedeveloperinvestor/IOT", "src_encoding": "UTF-8", "text": "#pip install pyserial\r\nimport serial\r\nfrom serial.tools import list_ports\r\nfor port in list_ports.comports():\r\n\tprint('Device: '+port.description+' - port: '+port.device)\r\n\t\r\nconnection=serial.Serial('COM3',115200)\r\naction=input(\"Type in:\\n'L' to turn on or\\n'D' to turn off: \").upper()\r\nwhile action=='L' or action=='D':\r\n\tif action=='L':\r\n\t\tconnection.write(b'1')\r\n\telse:\r\n\t\tconnection.write(b'0')\r\n\taction=input(\"Type in:\\n'L' to turn on or\\n'D' to turn off: \").upper()\r\nconnection.close()\r\nprint('Connection ended')\r\n" } ]
1
lcapdecomme/raspberry
https://github.com/lcapdecomme/raspberry
403b9139c0795060a7ab097c1c0280756a40b666
d16c7b20e11885b4db99cabcbee12c9e92a09472
a2a287e5a61af965db99535ce7ed16f762776fbe
refs/heads/master
2021-01-10T09:17:15.103723
2020-04-04T07:14:09
2020-04-04T07:14:09
50,565,701
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7678265571594238, "alphanum_fraction": 0.7729606628417969, "avg_line_length": 49, "blob_id": "32694627c63de7c032f0a1921ff53b78c7cf7d93", "content_id": "74d39e62aa8b4f2452d01c5ed46bc923148adcaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 182, "num_lines": 35, "path": "/webapps/temperatures/README.md", "repo_name": "lcapdecomme/raspberry", "src_encoding": "ISO-8859-1", "text": "# WebApp temperatures\n\nCette webapp permet d'afficher les températures captées par le #raspberryPi et stockées dans une base mLab (anciennement mongoLab).\n\n![Temperature1](https://github.com/lcapdecomme/raspberry/blob/master/img/temperature1.png)\n![Temperature2](https://github.com/lcapdecomme/raspberry/blob/master/img/temperature2.png)\n\n\n## Fonctionnement \n\nL'application est écrite en python et déployée sur Google AppEngine. Il est donc nécessaire de créer un compte sur cette plateforme.\n\nLes données sont récupérées de la bd mongoDb sur mLab. Un compte est là aussi nécessaire pour le stockage et la restitution des données.\n\nLa base mongoLab contient trois collections :\n\n1. **temperature** : relevés toutes les heures des températures pour l'ensemble des sondes\n\n2. **temperature_bilan** : données de la page d'accueil. Ces données sont calculées par le batch sauveTemperature.py. On trouve donc uniquement un enrgistrement par sonde.\n\n3. **temperature_stat** : données des pages statistiques. Ces données sont calculées également par le batch sauveTemperature.py. On trouve donc uniquement un enrgistrement par sonde.\n\n\n## Paramétrage\n\nPour les deux pages WEB, il faut juste renseigner les informations pour se connecter à l'api Rest de mlab.\nLes informations sont identiques sur les deux pages à l'exception du nom de la collection (cf ci-dessus)\n\n```python\nhttp_start = \"https://api.mongolab.com/api/1/\"\n# Ces deux propriétés <nomDataBase> et <nomCollection> sont à personnaliser (fournies par mongolab) !!!!\nhttp_type = \"databases/<nomDataBase>/collections/<nomCollection>\"\n# Cette propriété <clef> est à personnaliser (fournie par mongolab) !!!!\napi = {\"apiKey\": \"<clef>\",\"f\":\"{'_id': 0}\"}\nheaders = {\"Content-Type\": \"application/json\"}\n\n\n\n" }, { "alpha_fraction": 0.6790950894355774, "alphanum_fraction": 0.6849601864814758, "avg_line_length": 31.6849308013916, "blob_id": "5ed7c178d505e01d9ca44a3700e8070cf428684a", "content_id": "4f642d9f9b83622597a0d1eaab5502b7055aaef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2390, "license_type": "no_license", "max_line_length": 121, "num_lines": 73, "path": "/scripts/temperatures/listeTemperature.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# NAME: listeTemperature.py\n# AUTHOR: Lionel Capdecomme\n# DATE : 04/02/2016\n# COMMENT: Lecture des releves depuis la BD MongoLab et affiche les valeurs mini/maxi/courante d'une sonde en particulier\n\nimport serial, json, sys, pymongo, datetime, time\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\n# 0. Initialisation variable Mongo\n# Ces trois propriétés sont à personnaliser !!!!\nMONGODB_URI = 'mongodb://user:password@serveur:port/basededonnee' \nMONGODB_COLLECTION='collection'\n\n#3. Sauvegarde dans le compte mongolab\nclient = pymongo.MongoClient(MONGODB_URI)\ndb = client.get_default_database()\n# Choix de la collection \ntemperatures = db[MONGODB_COLLECTION]\n\n#4. Construction du bilan\nbilan = {}\ncursor = temperatures.find()\nfor document in cursor:\n #print(document['_id'])\n #print(document['sondes'])\n sonde=document['sondes']\n date=document['date']\n dateTraitement= datetime.strftime(date, \"%d-%m-%Y\")\n heureTraitement = datetime.strftime(date, \"%H:%M:%S\")\n for s in sonde:\n libelle = s[0]\n temp = float(s[2])\n \n if libelle in bilan :\n\t temperatureSonde = bilan[libelle]\n temperatureSonde['courant']=temp\n\t if float(temperatureSonde['mini']) > temp:\n\t temperatureSonde['mini'] = temp\n\t temperatureSonde['miniDate'] = dateTraitement\n\t temperatureSonde['miniHeure'] = heureTraitement\n\t if float(temperatureSonde['maxi']) < temp:\n\t temperatureSonde['maxi'] = temp\n\t temperatureSonde['maxiDate'] = dateTraitement\n\t temperatureSonde['maxiHeure'] = heureTraitement\n\n else:\n #Cette sonde n'existe pas dans le tableau \n\t temperatureSonde = {}\n temperatureSonde['courant']=temp\n temperatureSonde['mini']=temp\n temperatureSonde['maxi']=temp\n\t temperatureSonde['maxiDate'] = date\n\t temperatureSonde['maxiDate'] = date\n\n\t#Ajout de cette sone au tableau\n \tbilan[libelle]=temperatureSonde\n\nmaintenant=time.strftime('%d/%m/%Y')\nprint maintenant\nmaintenant=time.strftime('%H:%M:%S')\nprint maintenant\n\nlibelle='salon'\nprint '==============='\ntemperatureSonde = bilan[libelle]\nprint \"Il fait : \", temperatureSonde['courant']\nprint temperatureSonde['mini'], \" le \", temperatureSonde['miniDate']\nprint temperatureSonde['maxi'], \" le \", temperatureSonde['maxiDate']\nprint '==============='\nprint bilan\n\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 36.22222137451172, "blob_id": "1f639c82fecbcbb2c21e2a16ca14cf47e1346f8f", "content_id": "45253018b89ad780b0f5a290710f70731aa1a537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 340, "license_type": "no_license", "max_line_length": 85, "num_lines": 9, "path": "/README.md", "repo_name": "lcapdecomme/raspberry", "src_encoding": "ISO-8859-1", "text": "# raspberry\n\nCe repository me sert à sauver mes scripts et notes sur mes tests avec le RaspberryPi\n\n## Gestion d'une ou plusieurs sondes de température \nhttps://lcapdecomme.github.io/raspberry/temperature\n\n## Lecture et sauvegarde des trames de Téléinformation des compteurs EDF\nhttps://lcapdecomme.github.io/raspberry/teleinformation\n\n" }, { "alpha_fraction": 0.6552111506462097, "alphanum_fraction": 0.67512446641922, "avg_line_length": 46.720306396484375, "blob_id": "92a321a39192fe23ab0618fdacc000ad73083ff1", "content_id": "9d72a76727db15b4f2d25d761ef9c96f701f1c4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12454, "license_type": "no_license", "max_line_length": 129, "num_lines": 261, "path": "/webapps/electricite/pagePrincipale.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# [START imports]\nimport sys\nsys.path.insert(0, 'libs')\nimport os, jinja2, webapp2, requests, json\nfrom datetime import datetime\nfrom util import diffNombre, getJour, getSommesJour, getSommesMois, format_int,dateEnClair, convertir_euro, getMois, getMoisAn, \\\n\t\t\tgetJourHier, getjourMois, getSommesJourHier, Elec_bilan, getSommesMoisAn, getSommesAn, getJuillet, getValueMax, \\\n\t\t\trandomString, randomInt, getSommesAnPrec, getSommesAnPrec2, getLabelMoisUniquement, getEstimatioAnnuelle\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n# [END imports]\n\n#Clef dans profil user ou account de mlab\nCLEF=\"<clef>\"\n\nhttp_start = \"https://api.mongolab.com/api/1/\"\n\nhttp_type = \"databases/edf/collections/edf_bilan\"\nhttp_type_mensuel = \"databases/edf/collections/edf_bilan_mensuel\"\nhttp_type_annuel = \"databases/edf/collections/edf_bilan_annuel\"\nhttp_type_annuel_juillet = \"databases/edf/collections/edf_bilan_annuel_juillet\"\n\napi = {\"apiKey\": CLEF,\"f\":'{\"_id\": 0}'}\napi_order_mensuel = {\"apiKey\": CLEF,\"f\":'{\"_id\": 0}', \"s\":'{\"numMois\": 1}'}\napi_order_annuel = {\"apiKey\": CLEF,\"f\":'{\"_id\": 0}', \"s\":'{\"an\": 1}'}\n\nheaders = {\"Content-Type\": \"application/json\"}\n\n\n# [START main_page]\nclass pagePrincipale(webapp2.RequestHandler):\n\n\n\tdef get(self):\n\t\t# Request sur les valeurs/an\n\t\ta = requests.get(http_start+http_type_annuel, params=api_order_annuel, headers=headers)\n\t\tdata_annuel = json.loads(a.text)\n\n\t\t# Request sur les valeurs/an au mois de juillet\n\t\taj = requests.get(http_start+http_type_annuel_juillet, params=api_order_annuel, headers=headers)\n\t\tdata_annuel_juillet = json.loads(aj.text)\n\n\t\t# Request sur les valeurs/mois\n\t\tm = requests.get(http_start+http_type_mensuel, params=api_order_mensuel, headers=headers)\n\t\tdata_mensuel = json.loads(m.text)\n\t\t#self.response.write(data_mensuel)\n\n\t\t# Request sur les valeurs du bilan\n\t\tr = requests.get(http_start+http_type, params=api, headers=headers)\n\t\tdata = json.loads(r.text)\n\n\t\t#self.response.write(data)\n\t\tdateTraitement = datetime.strptime(data[0]['date']['$date'],'%Y-%m-%dT%H:%M:%S.%fZ') \n\t\theureTraitement = datetime.strftime(dateTraitement, \"%H\")\n\n\t\t#Array Json en un objet\n\t\tbilan_edf = [Elec_bilan(**k) for k in data]\n\t\tbilan=bilan_edf[0]\n\n\t\t#self.response.write(getminSemaine(listeSondes[0]).count(','))\n\t\tconsoJourHc = diffNombre(bilan.heuresCreuses, bilan.heuresCreusesHier) \n\t\tconsoJourHp = diffNombre(bilan.heuresPleines, bilan.heuresPleinesHier) \n\t\tconsoJourHcVeille = diffNombre(bilan.heuresCreusesHier, bilan.heuresCreusesAvantHier) \n\t\tconsoJourHpVeille = diffNombre(bilan.heuresPleinesHier, bilan.heuresPleinesAvantHier) \n\t\tconsoJourHcVeille = diffNombre(consoJourHc, consoJourHcVeille) \n\t\tconsoJourHpVeille = diffNombre(consoJourHp, consoJourHpVeille) \n\t\ttotalJour, minJour, maxJour, totalJourEuro = getSommesJour(bilan)\n\t\ttotalJourHier, minJourHier, maxJourHier, totalJourEuroHier = getSommesJourHier(bilan)\n\t\ttotalMois, minMois, maxMois, totalMoisEuro = getSommesMois(bilan)\n\t\ttotalMoisAn, minMoisAn, maxMoisAn, totalMoisAnEuro = getSommesMoisAn(bilan)\n\t\t\n\t\tlabelMoisUniquement = getLabelMoisUniquement(data_mensuel)\n\t\testimation = getEstimatioAnnuelle(data_mensuel)\n\t\t\n\t\tserieAn,labelMois, totalAn, totalAnEuros, minAn, maxAn = getSommesAn(data_mensuel)\n\t\tserieAnPrec, labelMoisPrec, totalAnPrec, totalAnPrecEuros, minAnPrec, maxAnPrec = getSommesAnPrec(data_mensuel)\n\t\tserieAnPrec2, labelMoisPrec2, totalAnPrec2, totalAnPrec2Euros, minAnPrec2, maxAnPrec2 = getSommesAnPrec2(data_mensuel)\n\t\t\t\n\t\tlabelJuillet, serieJuillet = getJuillet(data_annuel_juillet)\n\n\t\tmaxConsoMensuel, minConsoMensuel = getValueMax(data_mensuel)\n\t\t\n\t\t#Data du template\n\t\ttemplate_values = {\n\t\t\t'date' : dateEnClair(dateTraitement),\n\t\t\t'heure' : datetime.strftime(dateTraitement, \"%H:%M\"),\n\t\t\t#'puissanceWatt': format_nombre(int(bilan.intensiteInstant)*TENSION_VOLT), \n\t\t\t'puissanceWatt': format_int(int(bilan.puissanceApparente)), \n\t\t\t'puissanceEuro': convertir_euro(bilan.puissanceApparente,heureTraitement), \n\t\t\t\n\t\t\t'intensiteInstant': int(bilan.intensiteInstant), \n\t\t\t'intensiteSouscrit': int(bilan.intensiteSouscrit), \n \t'intensiteMaximum': int(bilan.intensiteMaximum), \n\t\t\t'optionTarif': bilan.optionTarif.replace('.',''), \n\t\t\t'periodeTarifaire': bilan.periodeTarifaire.replace('.',''), \n\t\t\t'heuresCreuses': format_int(bilan.heuresCreuses),\n \t'augmentationHc': format_int(consoJourHc), \n \t'augmentationHcVeille': consoJourHcVeille, \n \t'heuresPleines': format_int(bilan.heuresPleines),\n \t'augmentationHp': format_int(consoJourHp),\n \t'augmentationHpVeille': consoJourHpVeille, \n \t'adresseConcentrateur': bilan.adresseConcentrateur, \n \t'puissanceApparente': bilan.puissanceApparente, \n\n \t'totalJour' : format_int(totalJour),\n \t'totalJourEuro' : totalJourEuro,\n \t'minJour' : format_int(minJour),\n \t'maxJour' : format_int(maxJour),\n\t\t\t'serieJour' : getJour(bilan),\n\n\n \t'totalJourHier' : format_int(totalJourHier),\n \t'totalJourEuroHier' : totalJourEuroHier,\n \t'minJourHier' : format_int(minJourHier),\n \t'maxJourHier' : format_int(maxJourHier),\n\t\t\t'serieJourHier' : getJourHier(bilan),\n\n\t\t\t'totalMois' : format_int(totalMois),\n\t\t\t'totalMoisEuro' : totalMoisEuro,\n\t\t\t'minMois' : format_int(minMois),\n\t\t\t'maxMois' : format_int(maxMois),\n\t\t\t'totalMoisAn' : format_int(totalMoisAn),\n\t\t\t'totalMoisAnEuro' : totalMoisAnEuro,\n\t\t\t'minMoisAn' : format_int(minMoisAn),\n\t\t\t'maxMoisAn' : format_int(maxMoisAn),\n \t'serieMois' : getMois(bilan),\n \t'serieMoisAn' : getMoisAn(bilan),\n\t\t\t'jourMois' : getjourMois(dateTraitement, getMois(bilan)),\n\t\t\t\n\t\t\t'labelMoisUniquement' : labelMoisUniquement,\n\t\t\t\n\t\t\t'serieAn' : serieAn,\n\t\t\t'labelMois' : labelMois,\n\t\t\t'totalAn' : format_int(totalAn),\n\t\t\t'totalAnEuros' : totalAnEuros,\n\t\t\t'minAn' : format_int(minAn),\n\t\t\t'maxAn' : format_int(maxAn),\n\n\t\t\t'serieAnPrec' : serieAnPrec,\n\t\t\t'labelMoisPrec' : labelMoisPrec,\n\t\t\t'totalAnPrec' : format_int(totalAnPrec),\n\t\t\t'totalAnPrecEuros' : totalAnPrecEuros,\n\t\t\t'minAnPrec' : format_int(minAnPrec),\n\t\t\t'maxAnPrec' : format_int(maxAnPrec),\n\n\t\t\t'serieAnPrec2' : serieAnPrec2,\n\t\t\t'labelMoisPrec2' : labelMoisPrec2,\n\t\t\t'totalAnPrec2' : format_int(totalAnPrec2),\n\t\t\t'totalAnPrec2Euros' : totalAnPrec2Euros,\n\t\t\t'minAnPrec2' : format_int(minAnPrec2),\n\t\t\t'maxAnPrec2' : format_int(maxAnPrec2),\n\n\t\t\t'labelJuillet' : labelJuillet,\n\t\t\t'serieJuillet' : serieJuillet,\n\t\t\t\n\t\t\t'statMaxDate0' : dateEnClair(datetime.strptime(bilan.statMaxDate0['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate1' : dateEnClair(datetime.strptime(bilan.statMaxDate1['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate2' : dateEnClair(datetime.strptime(bilan.statMaxDate2['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate3' : dateEnClair(datetime.strptime(bilan.statMaxDate3['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate4' : dateEnClair(datetime.strptime(bilan.statMaxDate4['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate5' : dateEnClair(datetime.strptime(bilan.statMaxDate5['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate6' : dateEnClair(datetime.strptime(bilan.statMaxDate6['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n\t\t\t'statMaxDate7' : dateEnClair(datetime.strptime(bilan.statMaxDate7['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMaxTotal0' : bilan.statMaxTotal0,\n 'statMaxTotal1' : bilan.statMaxTotal1,\n 'statMaxTotal2' : bilan.statMaxTotal2,\n 'statMaxTotal3' : bilan.statMaxTotal3,\n 'statMaxTotal4' : bilan.statMaxTotal4,\n 'statMaxTotal5' : bilan.statMaxTotal5,\n 'statMaxTotal6' : bilan.statMaxTotal6,\n 'statMaxTotal7' : bilan.statMaxTotal7,\n 'statMaxhp0' : int(bilan.statMaxhp0)/1000,\n 'statMaxhp1' : int(bilan.statMaxhp1)/1000,\n 'statMaxhp2' : int(bilan.statMaxhp2)/1000,\n 'statMaxhp3' : int(bilan.statMaxhp3)/1000,\n 'statMaxhp4' : int(bilan.statMaxhp4)/1000,\n 'statMaxhp5' : int(bilan.statMaxhp5)/1000,\n 'statMaxhp6' : int(bilan.statMaxhp6)/1000,\n 'statMaxhp7' : int(bilan.statMaxhp7)/1000,\n 'statMaxhc0' : int(bilan.statMaxhc0)/1000,\n 'statMaxhc1' : int(bilan.statMaxhc1)/1000,\n 'statMaxhc2' : int(bilan.statMaxhc2)/1000,\n 'statMaxhc3' : int(bilan.statMaxhc3)/1000,\n 'statMaxhc4' : int(bilan.statMaxhc4)/1000,\n 'statMaxhc5' : int(bilan.statMaxhc5)/1000,\n 'statMaxhc6' : int(bilan.statMaxhc6)/1000,\n 'statMaxhc7' : int(bilan.statMaxhc7)/1000,\n 'statMinDate0' : dateEnClair(datetime.strptime(bilan.statMinDate0['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate1' : dateEnClair(datetime.strptime(bilan.statMinDate1['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate2' : dateEnClair(datetime.strptime(bilan.statMinDate2['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate3' : dateEnClair(datetime.strptime(bilan.statMinDate3['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate4' : dateEnClair(datetime.strptime(bilan.statMinDate4['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate5' : dateEnClair(datetime.strptime(bilan.statMinDate5['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate6' : dateEnClair(datetime.strptime(bilan.statMinDate6['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinDate7' : dateEnClair(datetime.strptime(bilan.statMinDate7['$date'],'%Y-%m-%dT%H:%M:%S.%fZ')),\n 'statMinTotal0' : bilan.statMinTotal0,\n 'statMinTotal1' : bilan.statMinTotal1,\n 'statMinTotal2' : bilan.statMinTotal2,\n 'statMinTotal3' : bilan.statMinTotal3,\n 'statMinTotal4' : bilan.statMinTotal4,\n 'statMinTotal5' : bilan.statMinTotal5,\n 'statMinTotal6' : bilan.statMinTotal6,\n 'statMinTotal7' : bilan.statMinTotal7,\n 'statMinhp0' : bilan.statMinhp0,\n 'statMinhp1' : bilan.statMinhp1,\n 'statMinhp2' : bilan.statMinhp2,\n 'statMinhp3' : bilan.statMinhp3,\n 'statMinhp4' : bilan.statMinhp4, \n 'statMinhp5' : bilan.statMinhp5, \n 'statMinhp6' : bilan.statMinhp6, \n 'statMinhp7' : bilan.statMinhp7, \n 'statMinhc0' : bilan.statMinhc0,\n 'statMinhc1' : bilan.statMinhc1,\n 'statMinhc2' : bilan.statMinhc2,\n 'statMinhc3' : bilan.statMinhc3,\n 'statMinhc4' : bilan.statMinhc4,\n 'statMinhc5' : bilan.statMinhc5,\n 'statMinhc6' : bilan.statMinhc6,\n 'statMinhc7' : bilan.statMinhc7,\n 'randomString' : randomString(),\n 'randomInt' : randomInt(),\n 'mensuel' : data_mensuel,\n 'maxConsoMensuel' : maxConsoMensuel,\n 'minConsoMensuel' : minConsoMensuel,\n 'annuel' : data_annuel,\n 'annuel_juillet' : data_annuel_juillet,\n \n 'estimation' : estimation\n\t\t\t#'libelle' : libelle,\n\t\t\t#'date' : dateEnClair(dateTraitement),\n\t\t\t#'heure' : datetime.strftime(dateTraitement, \"%H:%M\"),\n\t\t\t#'seriesJour' : getJour(listeSondes[0]),\n\t\t\t#'minJour' : getminSerie(getJour(listeSondes[0])),\n\t\t\t#'maxJour' : getmaxSerie(getJour(listeSondes[0])),\n\t\t\t#'minSeriesSemaine' : getminSemaine(listeSondes[0]),\n\t\t\t#'maxSeriesSemaine' : getmaxSemaine(listeSondes[0]),\n\t\t\t#'minSemaine' : getminSerie(getminSemaine(listeSondes[0])),\n\t\t\t#'maxSemaine' : getmaxSerie(getmaxSemaine(listeSondes[0])),\n\t\t\t#'jourSemaine' : getjourSemaine(dateTraitement, getminSemaine(listeSondes[0])),\n\t\t\t#'minSeriesMois' : getminMois(listeSondes[0]),\n\t\t\t#'maxSeriesMois' : getmaxMois(listeSondes[0]),\n\t\t\t#'minMois' : getminSerie(getminMois(listeSondes[0])),\n\t\t\t#'maxMois' : getmaxSerie(getmaxMois(listeSondes[0])),\n\t\t\t#'jourMois' : getjourMois(dateTraitement, getminMois(listeSondes[0])),\n\t\t\t#'minSeriesAnnee' : getminAnnee(listeSondes[0]),\n\t\t\t#'maxSeriesAnnee' : getmaxAnnee(listeSondes[0]),\n\t\t\t#'minAnnee' : getminSerie(getminAnnee(listeSondes[0])),\n\t\t\t#'maxAnnee' : getmaxSerie(getmaxAnnee(listeSondes[0])),\n\t\t\t#'nomMois' : getnomMois(dateTraitement, getminAnnee(listeSondes[0])),\n\t\t}\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('index.html')\n\t\tself.response.write(template.render(template_values))\n# [END main_page]\n\napp = webapp2.WSGIApplication([\n ('/', pagePrincipale),\n], debug=True)" }, { "alpha_fraction": 0.6321326494216919, "alphanum_fraction": 0.6540880799293518, "avg_line_length": 33.96799850463867, "blob_id": "680212e0984857ee2de25bfe9eea7a0e7028bac1", "content_id": "13f03fed951923cfd5dcf648b81da6a08b4fa35a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8764, "license_type": "no_license", "max_line_length": 124, "num_lines": 250, "path": "/scripts/temperatures/sauveTemperature.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# NAME: teleinfoERDF.py\n# AUTHOR: Lionel Capdecomme\n# DATE : 24/01/2016\n# COMMENT: Lecture des trames Teleinformation et sauvegarde dans BD MongoLab\n\nimport serial, json, sys, pymongo, datetime, time\nfrom pymongo import MongoClient\nfrom datetime import datetime, timedelta\n\n# 0. Initialisation variable Mongo\nMONGODB_URI = 'mongodb://user:password@serveur:port/basededonnee' \nMONGODB_COLLECTION='collection'\nMONGODB_COLLECTION_BILAN='collection_bilan'\nMONGODB_COLLECTION_STAT='collection_stat\n\n#Decalage horaire\ndecalage=2\nnbSondes=3\n\n# Initialisation temperature \nbase_dir = '/sys/bus/w1/devices/'\nnbSondes=3\nnbProps=3 # 1:lieu, 2: nom du fichier, 3:valeur, 4:ordre de tri\nsondes=[[0 for row in range(0,nbProps)] for col in range(0,nbSondes)]\nsondes[0][0]=\"salon\"\nsondes[0][1]=\"28-031574449aff\"\nsondes[1][0]=\"exterieur\"\nsondes[1][1]=\"28-0315747700ff\"\nsondes[2][0]=\"garage\"\nsondes[2][1]=\"28-03157474cdff\"\n\n# Fonction ouverture et lecture d'un fichier\ndef lireFichier(fichier):\n f = open(fichier, 'r')\n lignes = f.readlines()\n f.close()\n return lignes\n\ndata = {}\n# Lecture des temperatures\nfor i in range(nbSondes):\n sonde = base_dir + sondes[i][1] + \"/w1_slave\"\n lignes = lireFichier(sonde)\n while lignes[0].strip()[-3:] != 'YES': # lit les 3 derniers char de la ligne 0 et recommence si pas YES\n sleep(0.2)\n lignes = lireFichier(sonde)\n\n # Fichier ok, lecture seconde ligne \n temp_raw = int (lignes[1].split(\"=\")[1])\n value = round(temp_raw / 1000.0, 2)\n sondes[i][2] += value # le 2 arrondi a 2 chiffres apres la virgule\n print sondes[i][0],\"=\",sondes[i][2] \n\n# Le tableau sauvegarde en BD\ndata['sondes']=sondes\ndata['date']=datetime.now()+timedelta(hours=decalage)\n\n#3. Sauvegarde dans le compte mongolab de cet enrgistrement\nclient = pymongo.MongoClient(MONGODB_URI)\ndb = client.get_default_database()\n# Choix de la collection \ntemperatures = db[MONGODB_COLLECTION]\ntemperatures.insert(data)\n\n\n\ndef days_diff(a,b):\n A = a.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n B = b.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n return (A - B).days\n\n\ndef days_diff_an(a,b):\n A = a.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n B = b.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n if A.month > B.month:\n return A.month - B.month\n else:\n return B.month - A.month\n\n\ndef initStat():\n tableauStat = {}\n #Jour : Valeur min et max sur 24 heures\n for num in range(0,24): \n\ttableauStat[\"heure\" + str(num)]=\"\"\n \n #Semaine & mois : valeur min et max sur 30 derniers jours\n for num in range(1,31): \n\ttableauStat[\"minJour\" + str(num)]=\"\"\n\ttableauStat[\"maxJour\" + str(num)]=\"\"\n\n #Annee : valeur min et max sur 12 derniers mois\n for num in range(1,13): \n\ttableauStat[\"minMois\" + str(num)]=\"\"\n\ttableauStat[\"maxMois\" + str(num)]=\"\"\n\n return tableauStat\n\n\ndef testSuperieur(s, t):\n if s == \"\":\n return True\n if float(s) > t:\n return True\n return False\n\n\ndef testInferieur(s, t):\n if s == \"\":\n return True\n if float(s) < t:\n return True\n return False\n\n \n\n\n#4. Construction du bilan\nstat = {}\nbilan = {}\ncursor = temperatures.find()\ndateDuJour=datetime.now()+timedelta(hours=decalage)\nheure = datetime.strftime(dateDuJour, \"%H\")\njour = datetime.strftime(dateDuJour, \"%d\")\nmois = datetime.strftime(dateDuJour, \"%m\")\nannee = datetime.strftime(dateDuJour, \"%Y\")\nstart_date_mois = dateDuJour + timedelta(-29)\nstart_date_an = dateDuJour + timedelta(-360)\n#print \"h:\",heure,\" j:\", jour, \" m:\", mois, \" année:\", annee, \" => Passé:\", start_date_mois, \" ==> Passé an:\", start_date_an\nfor document in cursor:\n #print(document['_id'])\n #print(document['sondes'])\n sonde=document['sondes']\n date=document['date']\n dateTraitement= datetime.strftime(date, \"%d-%m-%Y\")\n heureTraitement = datetime.strftime(date, \"%H:%M\")\n for s in sonde:\n libelle = s[0]\n temp = float(s[2])\n\n\t#1. Calcul du bilan \n if libelle in bilan :\n\t temperatureSonde = bilan[libelle]\n temperatureSonde['libelle']=libelle\n temperatureSonde['courant']=temp\n temperatureSonde['dateTraitement']=dateDuJour\n\t if float(temperatureSonde['mini']) > temp:\n\t temperatureSonde['mini'] = temp\n\t temperatureSonde['miniDate'] = dateTraitement\n\t temperatureSonde['miniHeure'] = heureTraitement\n\t if float(temperatureSonde['maxi']) < temp:\n\t temperatureSonde['maxi'] = temp\n\t temperatureSonde['maxiDate'] = dateTraitement\n\t temperatureSonde['maxiHeure'] = heureTraitement\n\n else:\n #Cette sonde n'existe pas dans le tableau bilan \n\t temperatureSonde = {}\n temperatureSonde['libelle']=libelle\n temperatureSonde['courant']=temp\n temperatureSonde['mini']=temp\n temperatureSonde['maxi']=temp\n\t temperatureSonde['maxiDate'] = dateTraitement\n\t temperatureSonde['maxiHeure'] = heureTraitement\n\t temperatureSonde['miniDate'] = dateTraitement\n\t temperatureSonde['miniHeure'] = heureTraitement\n temperatureSonde['dateTraitement']=dateDuJour\n\n\t#Ajout de cette sone au tableau bilan\n \tbilan[libelle]=temperatureSonde\n\n\n\t\n #2. Calcul des statistiques \n if libelle in stat :\n\t statSonde = stat[libelle]\n else:\n #Cette sonde n'existe pas encore dans le tableau stat\n\t statSonde =initStat()\n\n statSonde['libelle']=libelle\n statSonde['dateTraitement']=dateDuJour\n # 1. Statistique aujourd'hui\n heureTrait = str(int(datetime.strftime(date, \"%H\")))\n jourTrait = datetime.strftime(date, \"%d\")\n moisTrait = datetime.strftime(date, \"%m\")\n anneeTrait = datetime.strftime(date, \"%Y\")\n if jourTrait == jour and moisTrait == mois and anneeTrait == annee:\n\t statSonde['heure'+str(heureTrait)] = temp\n\n # 2. Statistique 30 jours précédent \n if start_date_mois <= date :\n #Retourne l'index dans le tableau de 30 elements en fonction de la date du jour\n #Ex. on est le 6. Si jourTrait=6, cela retourne 30 (30+6-6),\n #si jourTrait=3, cela retourne 27 (30-6+3),\n #si jourTrtait=15, cela retourne 9 (30-6-15)\n jourTableau=30-days_diff(dateDuJour, date)\n #print dateDuJour,\",\",date,\",\",jourTableau\n #La temperature min. pour cette sonde est-elle supérieur à la température lue en BD ?\n if testSuperieur(statSonde['minJour'+str(jourTableau)], temp):\n statSonde['minJour'+str(jourTableau)]= temp\n #La temperature max. pour cette sonde est-elle supérieur à la température lue en BD ?\n if testInferieur(statSonde['maxJour'+str(jourTableau)], temp):\n statSonde['maxJour'+str(jourTableau)]= temp\n #if libelle==\"salon\" and jourTableau==30:\n # print \"ap.\",jourTableau, \":\", temp,\":\",statSonde['minJour'+str(jourTableau)]\n \n # 3. Statistique 360 jours précédent dans un tableau de 12 valeurs (2/mois) \n if start_date_an <= date :\n #Retourne l'index dans le tableau de 12 elements en fonction de la date du jour\n #Ex. on est en fevrier. cela retourne 12,\n #si moisTrait=3, cela retourne 1 (12-(3-2)),\n #si jourTrtait=9, cela retourne 7 (12-(7-2))\n jourTableau=12-days_diff_an(dateDuJour, date)\n #La temperature min. pour cette sonde est-elle supérieur à la température lue en BD ?\n if testSuperieur(statSonde['minMois'+str(jourTableau)], temp):\n statSonde['minMois'+str(jourTableau)]= temp\n #La temperature max. pour cette sonde est-elle supérieur à la température lue en BD ?\n if testInferieur(statSonde['maxMois'+str(jourTableau)], temp):\n statSonde['maxMois'+str(jourTableau)]= temp\n \n\t#Ajout de cette sone au tableau bilan\n \tstat[libelle]=statSonde\n\n\t\n\n#5. Sauvegarde dans le compte mongolab des nouvelles statistiques\ntemperature_stat = db[MONGODB_COLLECTION_STAT]\n#Suppression de tous les documents\ntemperature_stat.remove({});\n#Ajout d'un document par sonde dans l'ordre de declaration initiale\nfor i in range(nbSondes):\n\tfor key, value in stat.iteritems():\n\t\tif sonde[i][0] in key:\n\t\t\ttemperature_stat.insert(value)\n\n\n\n#5.bis Sauvegarde dans le compte mongolab du nouveau bilan\ntemperature_bilan = db[MONGODB_COLLECTION_BILAN]\n#Suppression de tous les documents\ntemperature_bilan.remove({});\n#Ajout d'un document par sonde dans l'ordre de declaration initiale\nfor i in range(nbSondes):\n\tfor key, value in bilan.iteritems():\n\t\tif sonde[i][0] in key:\n\t\t\ttemperature_bilan.insert(value)\n\n\n\n" }, { "alpha_fraction": 0.5899172425270081, "alphanum_fraction": 0.6561324596405029, "avg_line_length": 28.53333282470703, "blob_id": "723d5afe53595c3493b531db8ba2031d6272a201", "content_id": "3a6a94e7cc8a0003642346d5d616fe169617e148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "no_license", "max_line_length": 107, "num_lines": 45, "path": "/scripts/temperatures/voirTemperature.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# NAME: teleinfoERDF.py\n# AUTHOR: Lionel Capdecomme\n# DATE : 24/01/2016\n# COMMENT: Lecture des trames Teleinformation \n\n# Initialisation \nbase_dir = '/sys/bus/w1/devices/'\nnbSondes=3\nnbProps=3 # 1:lieu, 2: nom du fichier, 3:valeur\nsondes=[[0 for row in range(0,nbProps)] for col in range(0,nbSondes)]\nsondes[0][0]=\"salon\"\nsondes[0][1]=\"28-031574449aff\"\nsondes[1][0]=\"exterieur\"\nsondes[1][1]=\"28-0315747700ff\"\nsondes[2][0]=\"garage\"\nsondes[2][1]=\"28-03157474cdff\"\n\n# Fonction ouverture et lecture d'un fichier\ndef lireFichier(fichier):\n f = open(fichier, 'r')\n lignes = f.readlines()\n f.close()\n return lignes\n\nprint \"Les températures de la maison:\"\n\n# Lecture des temperatures\nfor i in range(nbSondes):\n sonde = base_dir + sondes[i][1] + \"/w1_slave\"\n lignes = lireFichier(sonde)\n while lignes[0].strip()[-3:] != 'YES': # lit les 3 derniers char de la ligne 0 et recommence si pas YES\n sleep(0.2)\n lignes = lireFichier(sonde)\n\n # Fichier ok, lecture seconde ligne \n temp_raw = int (lignes[1].split(\"=\")[1])\n value = round(temp_raw / 1000.0, 2)\n sondes[i][2] += value # le 2 arrondi a 2 chiffres apres la virgule\n result=sondes[i][0]+\" : \"+str(sondes[i][2])+\"°\"\n print result\n\nprint \"http://temperatures-1214.appspot.com\"\nprint \"#raspberryPi\"\n" }, { "alpha_fraction": 0.7037205100059509, "alphanum_fraction": 0.7091651558876038, "avg_line_length": 35.75, "blob_id": "5fa1a44d77f16024ea0e673cbb9f58ed7537145f", "content_id": "8b3402f98fdab7b9d7cc5fc6922e8bb38f226576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2210, "license_type": "no_license", "max_line_length": 117, "num_lines": 60, "path": "/webapps/temperatures/pagePrincipale.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "# [START imports]\nimport sys\nsys.path.insert(0, 'libs')\nimport os, jinja2, webapp2, requests, json, datetime\nfrom datetime import datetime\nfrom collections import namedtuple\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n# [END imports]\n\nhttp_start = \"https://api.mongolab.com/api/1/\"\n# Ces deux propriétés <nomDataBase> et <nomCollection> sont à personnaliser (fournies par mongolab) !!!!\nhttp_type = \"databases/<nomDataBase>/collections/<nomCollection>\"\n# Cette propriété <clef> est à personnaliser (fournie par mongolab) !!!!\napi = {\"apiKey\": \"<clef>\",\"f\":\"{'_id': 0}\"}\nheaders = {\"Content-Type\": \"application/json\"}\n\nSonde = namedtuple('Sonde', 'libelle, courant, maxi, maxiDate, maxiHeure, mini, miniDate, miniHeure, dateTraitement')\n\ndef convertDate(dateTraitement):\n\tday=datetime.strftime(dateTraitement, \"%d\")\n\tmonth = int(datetime.strftime(dateTraitement, \"%m\"))\n\tyear=datetime.strftime(dateTraitement, \"%Y\")\n\tmois=['Janvier','Fevrier','Mars','Avril','Mai','Juin','Juillet','Aout','Septembre','Octobre','Novembre','Decembre']\n\treturn day + \" \" + mois[month-1] + \" \" + year\n\n\ndef dateEnClair(dateTraitement):\n\tdateTemp=datetime.strftime(dateTraitement, \"%d-%m-%Y\")\n\tdateDuJour=datetime.strftime(datetime.now(), \"%d-%m-%Y\")\n\tif dateTemp == dateDuJour :\n\t\treturn \"Aujourd'hui\"\n\telse:\n\t\treturn convertDate(dateTraitement)\n\n# [START main_page]\nclass MainPage(webapp2.RequestHandler):\n\tdef get(self):\n\t\tr = requests.get(http_start+http_type, params=api, headers=headers)\n\t\tdata = json.loads(r.text)\n\t\tdateTraitement = datetime.strptime(data[0]['dateTraitement']['$date'],'%Y-%m-%dT%H:%M:%S.%fZ') \n\t\t#self.response.write(data)\n\t\t#Array Json en une liste d'objets\n\t\tlisteSondes = [Sonde(**k) for k in data]\n\n\t\t#Data du template\n\t\ttemplate_values = {\n\t\t\t'temperatures': listeSondes,\n\t\t\t'date' : dateEnClair(dateTraitement),\n\t\t\t'heure' : datetime.strftime(dateTraitement, \"%H:%M\"),\n\t\t}\n\t\ttemplate = JINJA_ENVIRONMENT.get_template('index.html')\n\t\tself.response.write(template.render(template_values))\n# [END main_page]\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)" }, { "alpha_fraction": 0.6424657702445984, "alphanum_fraction": 0.6739726066589355, "avg_line_length": 23.266666412353516, "blob_id": "366c3957361bcbafef36239d7df5629fe38181a4", "content_id": "181ca9c0e0b11144a6344412dbb27e27e2be6595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "no_license", "max_line_length": 52, "num_lines": 30, "path": "/scripts/teleinformation/voirTeleInformation.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": " #!/usr/bin/env python\n# NAME: teleinfoERDF.py\n# AUTHOR: Lionel Capdecomme\n# DATE : 24/01/2016\n# COMMENT: Lecture des trames Teleinformation \nimport serial, sys\n# 1. Ouverture du port serie\nSERIAL = '/dev/ttyAMA0'\ntry:\n ser = serial.Serial(\n port=SERIAL,\n baudrate = 1200,\n parity=serial.PARITY_EVEN,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.SEVENBITS,\n timeout=1)\nexcept:\n print \"Impossible d'ouvrir le port serie\" + SERIAL\n print sys.exc_info()\n sys.exit(1)\n\n# 2. Lecture d'une trame complete\ncompteur=0 \nwhile compteur<=1 :\n line=ser.readline().strip()\n array = line.split(' ')\n if len(array)>1 :\n header, value = array[0], array[1]\n print header + \":\" + value\n if header == \"ADCO\" : compteur=compteur+1 \n" }, { "alpha_fraction": 0.6057384014129639, "alphanum_fraction": 0.6256144642829895, "avg_line_length": 33.655555725097656, "blob_id": "3f70cc7b344c835f84bcf500c44c28d1cea60a0c", "content_id": "d104756c3499a361e014ee849bb65d0ee011ec5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18722, "license_type": "no_license", "max_line_length": 148, "num_lines": 540, "path": "/scripts/teleinformation/sauveTeleInformation.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# NAME: teleinfoERDF.py\n# AUTHOR: Lionel Capdecomme\n# DATE : 24/01/2016\n# COMMENT: Lecture des trames Teleinformation et sauvegarde dans BD MongoLab\n\nimport serial, json, sys, pymongo, datetime, time\nfrom pymongo import MongoClient\nfrom datetime import datetime, timedelta\n\n\n\nmonths = [\"Inconnu\",\n \"Jan.\",\n \"Fev.\",\n \"Mars\",\n \"Avril\",\n \"Mai\",\n \"Juin\",\n \"Juil.\",\n \"Aout\",\n \"Sep.\",\n \"Oct.\",\n \"Nov.\",\n \"Dec.\"]\n\n\n\n\n# 0. Init variable \nSERIAL = '/dev/ttyAMA0'\nMONGODB_URI = 'mongodb://<user>:<password>@<base>.mongolab.com:37415/<domaine>' \nMONGODB_COLLECTION='edf'\nMONGODB_COLLECTION_BILAN='edf_bilan'\nMONGODB_COLLECTION_MENSUEL='edf_bilan_mensuel'\nMONGODB_COLLECTION_ANNUEL='edf_bilan_annuel'\nMONGODB_COLLECTION_ANNUEL_JUILLET='edf_bilan_annuel_juillet'\n\n# 1. Ouverture du port serie\ntry:\n ser = serial.Serial(\n port=SERIAL,\n baudrate = 1200,\n parity=serial.PARITY_EVEN,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.SEVENBITS,\n timeout=1)\nexcept:\n print \"Impossible d'ouvrir le port serie\" + SERIAL\n print sys.exc_info()\n sys.exit(1)\n\n# 2. Lecture d'une trame complete\ndata = {}\nwhile True :\n line=ser.readline().strip()\n array = line.split(' ')\n if len(array)>1 :\n header, value = array[0], array[1]\n # Si ADCO 2 fois alors tour complet\n if header == \"ADCO\" : \n if 'adresseConcentrateur' in data : break \n data['adresseConcentrateur']=value\n elif header == \"OPTARIF\" : data['optionTarif']=value\n elif header == \"PTEC\" : data['periodeTarifaire']=value\n elif header == \"IINST\" : data['intensiteInstant']=value\n elif header == \"ADPS\" : data['avertissementDepassement']=value\n elif header == \"PAPP\" : data['puissanceApparente']=value\n elif header == \"IMAX\" : data['intensiteMaximum']=value\n elif header == \"ISOUSC\" : data['intensiteSouscrit']=value\n elif header == \"HCHC\" : data['heuresCreuses']=value\n elif header == \"HCHP\" : data['heuresPleines']=value\n\n#Decalage horaire\ndecalage=1\n\ndateDuJour=datetime.now()+timedelta(hours=decalage)\ndata['date']=dateDuJour\n\n#3. Sauvegarde dans le compte mongolab\nclient = pymongo.MongoClient(MONGODB_URI)\ndb = client.get_default_database()\n# Choix de la collection \nteleinfo = db[MONGODB_COLLECTION]\nteleinfo.insert(data)\n\n\ndef consommation(d):\n try:\n hc=int(d['heuresCreuses'])\n hp=int(d['heuresPleines'])\n except ValueError:\n hc=0\n hp=0\n return hp + hc\n\n\ndef days_diff(a,b):\n A = a.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n B = b.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n return (A - B).days\n\n\ndef isVeille(a,b):\n A = a.replace(minute = 0, second = 0, microsecond = 0)\n B = b.replace(minute = 0, second = 0, microsecond = 0)\n diff=(A-B).total_seconds()\n if diff == 86400:\n return True\n else:\n return False\n\n\ndef isAvantVeille(a,b):\n A = a.replace(minute = 0, second = 0, microsecond = 0)\n B = b.replace(minute = 0, second = 0, microsecond = 0)\n diff=(A-B).total_seconds()\n if diff == (86400*2):\n return True\n else:\n return False\n\n\ndef days_diff_an(a,b):\n A = a.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n B = b.replace(hour = 0, minute = 0, second = 0, microsecond = 0)\n if A.month > B.month:\n return A.month - B.month\n else:\n return B.month - A.month\n\n\ndef initStat():\n #Jour : Valeur min et max sur 24 heures\n for num in range(0,24): \n\tdata[\"heure\" + str(num)]=\"\"\n \n #Jour -1 : Valeur min et max sur 24 heures\n for num in range(0,24): \n\tdata[\"heureHier\" + str(num)]=\"\"\n \n #Semaine & mois : valeur sur 30 derniers jours\n for num in range(1,31): \n\tdata[\"jour\" + str(num)]=\"\"\n\n #Semaine & mois : valeur sur 30 derniers jours il y a un an\n for num in range(1,31): \n\tdata[\"jourUnAn\" + str(num)]=\"\"\n\n #Annee : valeur min et max sur 12 derniers mois\n for num in range(1,13): \n\tdata[\"mois\" + str(num)]=\"\"\n\n\ndef completeStat():\n #Semaine & mois : valeur min et max sur 30 derniers jours\n total=0\n nbElements=0\n depart=0\n valDepart=0\n valFin=0\n\n for num in range(1,31): \n # On cherche des jours qui n'ont pas de valeurs\n if num>1 and data[\"jour\" + str(num)] == \"\" and depart==0:\n depart=num\n\n if data[\"jour\" + str(num)] <> \"\" and depart<>0:\n total=0\n nbElements=0\n print \"bouche les trous du jour \", depart, \" au jour \", num - 1\n valDepart= data[\"jour\" + str(depart-1)]\n valFin= data[\"jour\" + str(num)]\n total=valFin-valDepart\n nbElements=num-depart+1\n moyennePeriode=total/nbElements\n print \"Ajoute en moyenne \", moyennePeriode, \" - elements : \", nbElements\n\n for num2 in range(depart,num): \n \t data[\"jour\" + str(num2)]= data[\"jour\" + str(num2-1)] + moyennePeriode;\n\n depart=0\n valDepart=0\n valFin=0\n\n for num in range(1,31): \n # On cherche des jours qui n'ont pas de valeurs\n if num>1 and data[\"jourUnAn\" + str(num)] == \"\" and depart==0:\n depart=num\n\n if data[\"jourUnAn\" + str(num)] <> \"\" and depart<>0:\n total=0\n nbElements=0\n print \"bouche les trous du jour UnAn\", depart, \" au jour \", num - 1\n valDepart= data[\"jourUnAn\" + str(depart-1)]\n valFin= data[\"jourUnAn\" + str(num)]\n if valDepart != \"\" :\n print \"======== [\", valFin, \"] - [\", valDepart, \"]\"\n total=valFin-valDepart\n nbElements=num-depart+1\n moyennePeriode=total/nbElements\n print \"Ajoute en moyenne UnAn\", moyennePeriode, \" - elements : \", nbElements\n for num2 in range(depart,num): \n \t data[\"jourUnAn\" + str(num2)]= data[\"jourUnAn\" + str(num2-1)] + moyennePeriode;\n\n depart=0\n valDepart=0\n valFin=0\n\n\ndef completeStatMensuel():\n depart=0\n index=0\n obj_mensuel_prec=0\n print \"Bouche les trous dans les mois\"\n # Premier passage pour boucher les trous\n for obj_mensuel in list_data_mensuel:\n if obj_mensuel_prec <> 0: \n # Si trou ? on les bouche\n if (int(obj_mensuel_prec[\"numMois\"])+1) <> int(obj_mensuel[\"numMois\"]):\n nbElements=int(obj_mensuel[\"numMois\"])-int(obj_mensuel_prec[\"numMois\"])\n total=int(obj_mensuel[\"value\"])-int(obj_mensuel_prec[\"value\"])\n moyennePeriode=total/nbElements\n for num2 in range(1,nbElements): \n data_mensuel = {}\n anneeTrait=int(obj_mensuel_prec[\"an\"])\n moisTrait=int(obj_mensuel_prec[\"mo\"])+num2\n if (moisTrait>12):\n moisTrait=moisTrait-12; \n anneeTrait=anneeTrait+1;\n data_mensuel['an']= str(anneeTrait)\n data_mensuel['mo']= str(moisTrait)\n data_mensuel['numMois']= (int(obj_mensuel_prec[\"numMois\"]+num2))\n data_mensuel['value']= str((moyennePeriode*num2)+int(obj_mensuel_prec[\"value\"])) \n list_data_mensuel2.append(data_mensuel)\n # print index,\":Ajout de \", data_mensuel\n\n list_data_mensuel2.append(obj_mensuel)\n # print index,\":Analyse de :\", obj_mensuel\n obj_mensuel_prec = obj_mensuel\n index=index+1\n\n obj_mensuel_prec=0\n # Deuxieme passage pour boucher les trous et mettre le libelle du mois\n for obj_mensuel in list_data_mensuel2:\n if obj_mensuel_prec <> 0: \n difference=int(obj_mensuel[\"value\"])-int(obj_mensuel_prec[\"value\"])\n obj_mensuel['diff']= int(difference)/1000\n else:\n obj_mensuel['diff']= 0 \n\n obj_mensuel['mois']= (months[int(obj_mensuel['mo'])]) \n #print \"Mensuel :\", obj_mensuel\n obj_mensuel_prec = obj_mensuel\n\n\ndef completeStatAnnuel():\n obj_annuel_prec=0\n # Passage pour calculer les différences des stats annuel\n for obj_annuel in list_data_annuel:\n if obj_annuel_prec <> 0: \n difference=int(obj_annuel[\"value\"])-int(obj_annuel_prec[\"value\"])\n obj_annuel['diff']= int(difference)/1000\n else:\n obj_annuel['diff']= 0\n\n #print \"Annuel :\", obj_annuel\n obj_annuel_prec = obj_annuel\n list_data_annuel2.append(obj_annuel)\n\n\n obj_annuel_juillet_prec=0\n # Passage pour calculer les différences des stats annuel au mois de juillet\n for obj_annuel_juillet in list_data_annuel_juillet:\n if obj_annuel_juillet_prec <> 0: \n difference=int(obj_annuel_juillet[\"value\"])-int(obj_annuel_juillet_prec[\"value\"])\n obj_annuel_juillet['diff']= int(difference)/1000\n else:\n obj_annuel_juillet['diff']= 0\n\n #print \"Annuel :\", obj_annuel_juillet\n obj_annuel_juillet_prec = obj_annuel_juillet\n list_data_annuel_juillet2.append(obj_annuel_juillet)\n\n\n\ndef testSuperieur(s, t):\n if s == \"\":\n return True\n if float(s) > t:\n return True\n return False\n\n\ndef testInferieur(s, t):\n if s == \"\":\n return True\n if float(s) < t:\n return True\n return False\n\n \n\n\n#4. Construction du bilan\nstat = []\ncursor = teleinfo.find()\nheure = datetime.strftime(dateDuJour, \"%H\")\njour = datetime.strftime(dateDuJour, \"%d\")\nmois = datetime.strftime(dateDuJour, \"%m\")\nannee = datetime.strftime(dateDuJour, \"%Y\")\nstart_date_mois = dateDuJour + timedelta(-30)\nstart_date_mois_min = dateDuJour + timedelta(-395)\nstart_date_mois_max = dateDuJour + timedelta(-365)\nstart_date_an = dateDuJour + timedelta(-360)\nprint \"h:\",heure,\" j:\", jour, \" m:\", mois, \" année:\", annee, \" => Passé:\", start_date_mois, \" ==> Passé an:\", start_date_an\ndateHier=dateDuJour + timedelta(-1)\njourHier = datetime.strftime(dateHier, \"%d\")\nmoisHier = datetime.strftime(dateHier, \"%m\")\nanneeHier = datetime.strftime(dateHier, \"%Y\")\nhcHier=0\nhpHier=0\n\nlist_data_mensuel=[]\nlist_data_mensuel2=[]\n\nlist_data_annuel=[]\nlist_data_annuel2=[]\n\nlist_data_annuel_juillet=[]\nlist_data_annuel_juillet2=[]\n\ninitStat()\n\nfirstRecord=True\n\nfor document in cursor:\n #print(document['_id'])\n date=document['date']\n dateTraitement= datetime.strftime(date, \"%d-%m-%Y\")\n heureTraitement = datetime.strftime(date, \"%H:%M\")\n if isVeille(dateDuJour, date):\n data['heuresCreusesHier']=document['heuresCreuses']\n data['heuresPleinesHier']=document['heuresPleines']\n if isAvantVeille(dateDuJour, date):\n data['heuresCreusesAvantHier']=document['heuresCreuses']\n data['heuresPleinesAvantHier']=document['heuresPleines']\n \n minuteTrait = int(datetime.strftime(date, \"%M\"))\n heureTrait = str(int(datetime.strftime(date, \"%H\")))\n jourTrait = datetime.strftime(date, \"%d\")\n moisTrait = datetime.strftime(date, \"%m\")\n anneeTrait = datetime.strftime(date, \"%Y\")\n \n # 1. Statistique aujourd'hui\n if jourTrait == jour and moisTrait == mois and anneeTrait == annee and minuteTrait == 0:\n data['heure'+str(heureTrait)] = consommation(document)\n print \"Heure \",str(heureTrait), \":\", consommation(document)\n\n \n # 1.bis Statistique hier\n if jourTrait == jourHier and moisTrait == moisHier and anneeTrait == anneeHier and minuteTrait == 0:\n data['heureHier'+str(heureTrait)] = consommation(document)\n print \"Heure Hier\",str(heureTrait), \":\", consommation(document)\n\n # 2. Statistique 30 jours precedents\n if start_date_mois <= date :\n #Retourne l'index dans le tableau de 30 elements en fonction de la date du jour\n #Ex. on est le 6. Si jourTrait=6, cela retourne 30 (30+6-6),\n #si jourTrait=3, cela retourne 27 (30-6+3),\n #si jourTrtait=15, cela retourne 9 (30-6-15)\n jourTableau=30-days_diff(dateDuJour, date)\n #Si minuit, on sauve la consommation ?\n if int(heureTrait) == 0 and int(minuteTrait) == 0:\n data['jour'+str(jourTableau)]= consommation(document)\n # print \"Jour \",str(jourTableau), \":\", consommation(document)\n\n # 2. bis Statistique 30 jours precedents il y a un an\n if start_date_mois_min <= date and start_date_mois_max >= date :\n #Retourne l'index dans le tableau de 30 elements en fonction de la date du jour\n #Ex. on est le 6. Si jourTrait=6, cela retourne 30 (30+6-6),\n #si jourTrait=3, cela retourne 27 (30-6+3),\n #si jourTrtait=15, cela retourne 9 (30-6-15)\n jourTableau=395-days_diff(dateDuJour, date)\n #Si minuit, on sauve la consommation ?\n if int(heureTrait) == 0 and int(minuteTrait) == 0:\n data['jourUnAn'+str(jourTableau)]= consommation(document)\n print date, \" -- Jour il y a un an : \",str(jourTableau), \":\", consommation(document)\n\n # 3. Statistique 360 jours precedent dans un tableau de 12 valeurs\n if start_date_an <= date :\n #Retourne l'index dans le tableau de 12 elements en fonction de la date du jour\n #Ex. on est en fevrier. cela retourne 12,\n #si moisTrait=3, cela retourne 1 (12-(3-2)),\n #si jourTrtait=9, cela retourne 7 (12-(7-2))\n jourTableau=12-days_diff_an(dateDuJour, date)\n #Si minuit le jour 1, on sauve la consommation\n if int(heureTrait) == 0 and int(jourTrait)==1 and minuteTrait == 0:\n data['mois'+str(jourTableau)]= consommation(document)\n #print \"Mois \",str(jourTableau), \":\", consommation(document)\n\n # 4. Recherche des consommations max. \n if int(heureTrait) == 0 and int(minuteTrait) == 0 :\n try:\n hc=int(document['heuresCreuses'])\n hp=int(document['heuresPleines'])\n except ValueError:\n print date, \"Erreur : hc:\", hc, \" : hp:\", hp\n if hcHier != 0 and hpHier != 0 and isVeille(date,dateHier):\n diffHc = hc-hcHier\n diffHp = hp-hpHier\n # print date, \"-\", dateHier,\":hc auj:\", hc, \", hc hier:\", hcHier,\"-hp auj:\", hp,\", hp hier:\", hpHier\n # On élimine les erreurs de captation\n if hc<>hcHier and hp<>hpHier and hc>1000 and hp>1000 and hpHier>1000 and hcHier>1000: \n stat.append([diffHc+diffHp, date, diffHc, diffHp])\n \n dateHier=date\n hcHier=hc\n hpHier=hp\n\n\n # 5. Recherche des consommations en debut de chaque mois \n if int(jourTrait) == 1 and int(heureTrait) == 0 and int(minuteTrait) == 0:\n yy=int(anneeTrait)\n mm=int(moisTrait)-1\n if mm == 0:\n mm=12\n yy=yy-1\n data_mensuel = {}\n data_mensuel['an']= str(yy)\n data_mensuel['mo']= str(mm)\n data_mensuel['numMois']= (yy*12)+mm\n data_mensuel['value']= consommation(document)\n list_data_mensuel.append(data_mensuel)\n print \"Stat Mensuel trouve\", str(yy)+str(mm),\":\", document['heuresCreuses'],\"-\", document['heuresPleines'],\" =Conso :\",consommation(document)\n #print data_mensuel\n\n\n\n # 6. Recherche des consommations en debut de chaque annee \n if firstRecord or (int(moisTrait) == 1 and int(jourTrait) == 1 and int(heureTrait) == 0):\n data_annuel = {}\n data_annuel['an']= str(int(anneeTrait)-1)\n data_annuel['value']= consommation(document)\n list_data_annuel.append(data_annuel) \n #print \"Stat Annuel trouve\", str(anneeTrait),\":\", document['heuresCreuses'],\"-\", document['heuresPleines']\n\n\n # 6. Recherche des consommations en debut de chaque mois de novembre \n # Vu que le premier enregistrement de la BD est en mars, on prendra comme premier enreg celui de novembre 2019\n if int(moisTrait) == 11 and int(jourTrait) == 1 and int(heureTrait) == 0:\n data_annuel_juillet = {}\n data_annuel_juillet['an']= str(int(anneeTrait))\n data_annuel_juillet['liban']= \"Nov. \"+str(int(anneeTrait)-1)+\"-Oct. \"+str(int(anneeTrait))\n data_annuel_juillet['value']= consommation(document)\n list_data_annuel_juillet.append(data_annuel_juillet) \n #print \"Stat Annuel trouve\", str(anneeTrait),\":\", document['heuresCreuses'],\"-\", document['heuresPleines']\n\n firstRecord=False\n\n\n\n# 6.bis On ajoute enfin la derniere valeur de l'annee en cours\ndata_annuel = {}\ndata_annuel['an']= str(int(anneeTrait))\ndata_annuel['value']= consommation(document)\nlist_data_annuel.append(data_annuel) \n# 6.ter On ajoute enfin la derniere valeur de l'annee en cours pour la collection de juillet\ndata_annuel = {}\ndata_annuel['an']= str(int(anneeTrait))\ndata_annuel['value']= consommation(document)\ndata_annuel['liban']= \"Nov. \"+str(int(anneeTrait)-1)+\"-Auj.\"\nlist_data_annuel_juillet.append(data_annuel) \n# print \"Stat Annuel trouve\", str(anneeTrait),\":\", document['heuresCreuses'],\"-\", document['heuresPleines']\n\n\n# Recherche des consommations max.\nliste = sorted(stat, key=lambda x: x[0], reverse=True)\ni=0\nfor max in liste :\n data['statMaxTotal'+str(i)]=max[0]\n data['statMaxDate'+str(i)]=max[1]\n data['statMaxhc'+str(i)]=max[2]\n data['statMaxhp'+str(i)]=max[3]\n print \"Max \", str(i), \" donne \", max[0]\n i+=1\n if i==8 : \n break\n\n\n# Recherche des consommations min.\nliste = sorted(stat, key=lambda x: x[0])\ni=0\nfor min in liste :\n data['statMinTotal'+str(i)]=min[0]\n data['statMinDate'+str(i)]=min[1]\n data['statMinhc'+str(i)]=min[2]\n data['statMinhp'+str(i)]=min[3]\n print \"Min \", str(i), \" donne \", min[0]\n i+=1\n if i==8 : \n break\n\n\n# Bouche les eventuels trous dans les tableaux\ncompleteStat()\ncompleteStatMensuel()\ncompleteStatAnnuel()\n\n\n#5.bis Sauvegarde dans le compte mongolab du nouveau bilan\nedf_bilan = db[MONGODB_COLLECTION_BILAN]\n#Suppression de tous les documents\nedf_bilan.remove({});\n#Ajout d'un document par sonde dans l'ordre de declaration initiale\nedf_bilan.insert(data)\n\n#5.ter Sauvegarde dans le compte mongolab du nouveau bilan mensuel\nedf_bilan_mensuel = db[MONGODB_COLLECTION_MENSUEL]\n#Suppression de tous les documents\nedf_bilan_mensuel.remove({});\nfor obj_mensuel in list_data_mensuel2:\n edf_bilan_mensuel.insert(obj_mensuel)\n\n#5.qua Sauvegarde dans le compte mongolab du nouveau bilan annuel\nedf_bilan_annuel = db[MONGODB_COLLECTION_ANNUEL]\n#Suppression de tous les documents\nedf_bilan_annuel.remove({});\nfor obj_annuel in list_data_annuel2:\n print obj_annuel\n edf_bilan_annuel.insert(obj_annuel)\n\n#5.cinq Sauvegarde dans le compte mongolab du nouveau bilan annuel au mois de juillet\nedf_bilan_annuel_juillet = db[MONGODB_COLLECTION_ANNUEL_JUILLET]\n#Suppression de tous les documents\nedf_bilan_annuel_juillet.remove({});\nfor obj_annuel_juillet in list_data_annuel_juillet2:\n print obj_annuel_juillet\n edf_bilan_annuel_juillet.insert(obj_annuel_juillet)\n\n\n" }, { "alpha_fraction": 0.7152777910232544, "alphanum_fraction": 0.7152777910232544, "avg_line_length": 21, "blob_id": "ea2c3a08a9f08f438705edf11a7ec3884d829a29", "content_id": "454a80fde983ea56a2d19966b4fad2b60a733233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "no_license", "max_line_length": 193, "num_lines": 13, "path": "/scripts/twitter/README.md", "repo_name": "lcapdecomme/raspberry", "src_encoding": "ISO-8859-1", "text": "# Scripts twitter\n\n\n## piTweet.py \n\nCe script lit ce qu'il reçoit en entrée (pipe) et le tweet. Pour la configuration twitter, un document clair et détaillé (http://www.makeuseof.com/tag/how-to-build-a-raspberry-pi-twitter-bot/)\n\n\n### utilisation \n\nPar exemple : \n\n*date | piTweet.py*\n\n\n" }, { "alpha_fraction": 0.5699061751365662, "alphanum_fraction": 0.6110756993293762, "avg_line_length": 39.50922393798828, "blob_id": "8c68d918215f99b60d541f5c2f2a122073dc8ffe", "content_id": "4b238a42e343b034924acf9a6e0bac9777187757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10987, "license_type": "no_license", "max_line_length": 151, "num_lines": 271, "path": "/webapps/temperatures/pageStatistique.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "# [START imports]\nimport sys\nsys.path.insert(0, 'libs')\nimport os, jinja2, webapp2, requests, json, datetime, re\nfrom datetime import datetime, timedelta\nfrom collections import namedtuple\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n# [END imports]\n\nhttp_start = \"https://api.mongolab.com/api/1/\"\n# Ces deux propriétés <nomDataBase> et <nomCollection> sont à personnaliser (fournies par mongolab) !!!!\nhttp_type = \"databases/<nomDataBase>/collections/<nomCollection>\"\n# Cette propriété <clef> est à personnaliser (fournie par mongolab) !!!!\napi = {\"apiKey\": \"<clef>\",\"f\":\"{'_id': 0}\"}\nheaders = {\"Content-Type\": \"application/json\"}\n\nSonde = namedtuple('Sonde', 'heure0,heure1,heure2,heure3,heure4,heure5,heure6,heure7,heure8,heure9,heure10,heure11,heure12,heure13, \\\n heure14,heure15,heure16,heure17,heure18,heure19,heure20,heure21,heure22,heure23, \\\n minJour1,maxJour1,minJour2,maxJour2,minJour3,maxJour3,minJour4,maxJour4, \\\n minJour5,maxJour5,minJour6,maxJour6,minJour7,maxJour7,minJour8,maxJour8,minJour9,maxJour9,minJour10,maxJour10, \\\n minJour11,maxJour11,minJour12,maxJour12,minJour13,maxJour13,minJour14,maxJour14, \\\n minJour15,maxJour15,minJour16,maxJour16,minJour17,maxJour17,minJour18,maxJour18, \\\n minJour19,maxJour19,minJour20,maxJour20,minJour21,maxJour21,minJour22,maxJour22, \\\n minJour23,maxJour23,minJour24,maxJour24,minJour25,maxJour25,minJour26,maxJour26, \\\n minJour27,maxJour27,minJour28,maxJour28,minJour29,maxJour29,minJour30,maxJour30, \\\n minMois1,maxMois1,minMois2,maxMois2,minMois3,maxMois3,minMois4,maxMois4,minMois5,maxMois5,minMois6,maxMois6, \\\n minMois7,maxMois7,minMois8,maxMois8,minMois9,maxMois9,minMois10,maxMois10,minMois11,maxMois11,minMois12,maxMois12, \\\n libelle,dateTraitement')\n\ndef convertDate(dateTraitement):\n\tday=datetime.strftime(dateTraitement, \"%d\")\n\tmonth = int(datetime.strftime(dateTraitement, \"%m\"))\n\tyear=datetime.strftime(dateTraitement, \"%Y\")\n\tmois=['Janvier','Fevrier','Mars','Avril','Mai','Juin','Juillet','Aout','Septembre','Octobre','Novembre','Decembre']\n\treturn day + \" \" + mois[month-1] + \" \" + year\n\n\ndef dateEnClair(dateTraitement):\n\tdateTemp=datetime.strftime(dateTraitement, \"%d-%m-%Y\")\n\tdateDuJour=datetime.strftime(datetime.now(), \"%d-%m-%Y\")\n\tif dateTemp == dateDuJour :\n\t\treturn \"Aujourd'hui\"\n\telse:\n\t\treturn convertDate(dateTraitement)\n\ndef getJour(s):\n return removeFirstComma((str(s.heure0)+\",\"+str(s.heure1)+\",\"+str(s.heure2)+\",\"+str(s.heure3)+\",\"+str(s.heure4)+\",\"+str(s.heure5)+\",\"+ \\\n\t\t str(s.heure6)+\",\"+str(s.heure7)+\",\"+str(s.heure8)+\",\"+str(s.heure9)+\",\"+str(s.heure10)+\",\"+str(s.heure11)+\",\"+ \\\n\t\t str(s.heure12)+\",\"+str(s.heure13)+\",\"+str(s.heure14)+\",\"+str(s.heure15)+\",\"+str(s.heure16)+\",\"+str(s.heure17)+\",\"+ \\\n\t\t str(s.heure18)+\",\"+str(s.heure19)+\",\"+str(s.heure20)+\",\"+str(s.heure21)+\",\"+str(s.heure22)+\",\"+str(s.heure23)).replace(\",,\", \"\"))\n\n\t\ndef getminSemaine(s):\n\treturn removeFirstComma((str(s.minJour24)+\",\"+str(s.minJour25)+\",\"+str(s.minJour26)+\",\"+str(s.minJour27)+\",\"+str(s.minJour28)+\",\"+ \\\n\t str(s.minJour29)+\",\"+str(s.minJour30)).replace(\",,\", \"\"))\n\n\ndef getmaxSemaine(s):\n\treturn removeFirstComma((str(s.maxJour24)+\",\"+str(s.maxJour25)+\",\"+str(s.maxJour26)+\",\"+str(s.maxJour27)+\",\"+str(s.maxJour28)+\",\"+ \\\n\t str(s.maxJour29)+\",\"+str(s.maxJour30)).replace(\",,\", \"\"))\n\n\ndef getminMois(s):\n\treturn removeFirstComma((str(s.minJour1)+\",\"+str(s.minJour2)+\",\"+str(s.minJour3)+\",\"+str(s.minJour4)+\",\"+str(s.minJour5)+\",\"+str(s.minJour6) \\\n\t\t +\",\"+str(s.minJour7)+\",\"+str(s.minJour8)+\",\"+str(s.minJour9)+\",\"+str(s.minJour10)+\",\"+str(s.minJour11)+\",\"+str(s.minJour12) \\\n\t\t +\",\"+str(s.minJour13)+\",\"+str(s.minJour14)+\",\"+str(s.minJour15)+\",\"+str(s.minJour16)+\",\"+str(s.minJour17)+\",\"+str(s.minJour8) \\\n\t\t +\",\"+str(s.minJour19)+\",\"+str(s.minJour20)+\",\"+str(s.minJour21)+\",\"+str(s.minJour22)+\",\"+str(s.minJour23)+\",\"+str(s.minJour24) \\\n\t\t +\",\"+str(s.minJour25)+\",\"+str(s.minJour26)+\",\"+str(s.minJour27)+\",\"+str(s.minJour28)+\",\"+str(s.minJour29)+\",\"+str(s.minJour30)).replace(\",,\", \"\"))\n\n\ndef getmaxMois(s):\n\treturn removeFirstComma((str(s.maxJour1)+\",\"+str(s.maxJour2)+\",\"+str(s.maxJour3)+\",\"+str(s.maxJour4)+\",\"+str(s.maxJour5)+\",\"+str(s.maxJour6) \\\n\t\t +\",\"+str(s.maxJour7)+\",\"+str(s.maxJour8)+\",\"+str(s.maxJour9)+\",\"+str(s.maxJour10)+\",\"+str(s.maxJour11)+\",\"+str(s.maxJour12) \\\n\t\t +\",\"+str(s.maxJour13)+\",\"+str(s.maxJour14)+\",\"+str(s.maxJour15)+\",\"+str(s.maxJour16)+\",\"+str(s.maxJour17)+\",\"+str(s.maxJour8) \\\n\t\t +\",\"+str(s.maxJour19)+\",\"+str(s.maxJour20)+\",\"+str(s.maxJour21)+\",\"+str(s.maxJour22)+\",\"+str(s.maxJour23)+\",\"+str(s.maxJour24) \\\n\t\t +\",\"+str(s.maxJour25)+\",\"+str(s.maxJour26)+\",\"+str(s.maxJour27)+\",\"+str(s.maxJour28)+\",\"+str(s.maxJour29)+\",\"+str(s.maxJour30)).replace(\",,\", \"\"))\n\n\ndef getminAnnee(s):\n\treturn removeFirstComma((str(s.minMois1)+\",\"+str(s.minMois2)+\",\"+str(s.minMois3)+\",\"+str(s.minMois4)+\",\"+str(s.minMois5)+\",\"+str(s.minMois6) \\\n\t\t +\",\"+str(s.minMois7)+\",\"+str(s.minMois8)+\",\"+str(s.minMois9)+\",\"+str(s.minMois10)+\",\"+str(s.minMois11)+\",\"+str(s.minMois12)).replace(\",,\", \"\"))\n\n\ndef getmaxAnnee(s):\n\treturn removeFirstComma((str(s.maxMois1)+\",\"+str(s.maxMois2)+\",\"+str(s.maxMois3)+\",\"+str(s.maxMois4)+\",\"+str(s.maxMois5)+\",\"+str(s.maxMois6) \\\n\t\t +\",\"+str(s.maxMois7)+\",\"+str(s.maxMois8)+\",\"+str(s.maxMois9)+\",\"+str(s.maxMois10)+\",\"+str(s.maxMois11)+\",\"+str(s.maxMois12)).replace(\",,\", \"\"))\n\n\ndef removeFirstComma(temp):\n\tif temp[0] == \",\":\n\t\treturn temp[1:]\n\treturn temp\n\n\n#Retourne l'echelle pour le graphique semaine\n#Cela devrait ressembler a qq chose comme \"\\\"Lu.\\\", \\\"Ma.\\\", \\\"Me.\\\", ...\ndef getjourSemaine(d, s):\n occur = s.count(',')\n chaine=\"\"\n if occur>0:\n #Tableau des jours de 0 a 6\n semaine=['Lu','Ma','Me','Je','Ve','Sa','Di']\n maDate=d\n pos = maDate.weekday()\n posd = maDate.day\n chaine=\"\\\"\"+semaine[pos]+\" \"+str(posd)+\"\\\"\"\n for i in range(occur,0,-1):\n maDate = maDate + timedelta(-1)\n pos = maDate.weekday()\n posd = maDate.day\n if pos < 0:\n pos=6\n chaine=\"\\\"\"+semaine[pos]+\" \"+str(posd)+\"\\\",\"+chaine\n \n return chaine\n \n \n#Retourne l'echelle pour le graphique mois\n#Pour plus de lisibilité, un jour sur quatre est affiché\n#Cela devrait ressembler a qq chose comme \"4 fev.\", \"\", \"\", \"\", \"8 fev\"\ndef getjourMois(d, s):\n occur = s.count(',')\n chaine=\"\"\n #Tableau des mois de 0 a 11\n mois=['Ja.','Fe.','Ma.','Av.','Ma.','Ju.','Ji.','Ao.','Se.','Oc.','No.','De.']\n #1. nom des mois precedents\n maDate=d\n pos = maDate.day\n posm = maDate.month-1\n chaine=\"\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\"\"\n inter=1\n for i in range(occur,0,-1):\n inter+=1\n maDate = maDate + timedelta(-1)\n pos = maDate.day\n posm = maDate.month-1\n if inter==3:\n chaine=\"\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\",\"+chaine\n inter=1\n else:\n chaine=\"\\\"\\\",\"+chaine\n #Si moins d'une annee, on complete avec les mois suivants\n occur = s.count(',')\n maDate=d\n inter=1\n for i in range(30-occur):\n inter+=1\n maDate = maDate + timedelta(1)\n pos = maDate.day\n posm = maDate.month-1\n if inter==3:\n chaine=chaine+\",\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\"\"\n inter=1\n else:\n chaine=chaine+\",\\\"\\\"\"\n \n return chaine\n \n \n \n#Retourne l'echelle pour le graphique annee\n#Cela devrait ressembler a qq chose comme \"\\\"J.\\\", \\\"F.\\\", \\\"M.\\\", ...\ndef getnomMois(d, s):\n occur = s.count(',')\n chaine=\"\"\n #Tableau des mois de 0 a 11\n mois=['Ja.','Fe.','Ma.','Av.','Ma.','Ju.','Ji.','Ao.','Se.','Oc.','No.','De.']\n \n #1. nom des mois precedents\n pos = d.month-1\n chaine=\"\\\"\"+mois[pos]+\"\\\"\"\n for i in range(occur,0,-1):\n pos = pos - 1\n if pos < 0:\n pos=11\n chaine=\"\\\"\"+mois[pos]+\"\\\",\"+chaine\n #Si moins d'une annee, on complete avec les mois suivants\n occur = s.count(',')\n pos = d.month-1\n for i in range(12-occur):\n pos = pos + 1\n if pos >11:\n pos=0\n chaine=chaine+\",\\\"\"+mois[pos]+\"\\\"\"\n \n return chaine\n \n\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef getminSerie(s):\n\tmin=100\n\tvalues = s.split(\",\") \n\tfor i in values:\n\t\tif isfloat(i):\n\t\t\tnum = float(i)\n\t\t\tif num<min:\n\t\t\t\tmin=num\n\treturn min\n\n\ndef getmaxSerie(s):\n\tmax=0\n\tvalues = s.split(\",\") \n\tfor i in values:\n\t\tif isfloat(i):\n\t\t\tnum = float(i)\n\t\t\tif num>max:\n\t\t\t\tmax=num\n\n\treturn max\n\n\n \n# [START main_page]\nclass StatPage(webapp2.RequestHandler):\n def get(self):\n libelle = self.request.get('libelle')\n #self.response.write(libelle)\n api[\"q\"] = \"{'libelle': '\"+libelle+\"'}\"\n r = requests.get(http_start+http_type, params=api, headers=headers)\n data = json.loads(r.text)\n #self.response.write(data)\n dateTraitement = datetime.strptime(data[0]['dateTraitement']['$date'],'%Y-%m-%dT%H:%M:%S.%fZ') \n \n #Array Json en une liste d'objets\n listeSondes = [Sonde(**k) for k in data]\n \n #self.response.write(getminSemaine(listeSondes[0]).count(','))\n \n #Data du template\n template_values = {\n 'libelle' : libelle,\n 'date' : dateEnClair(dateTraitement),\n 'heure' : datetime.strftime(dateTraitement, \"%H:%M\"),\n 'seriesJour' : getJour(listeSondes[0]),\n 'minJour' : getminSerie(getJour(listeSondes[0])),\n 'maxJour' : getmaxSerie(getJour(listeSondes[0])),\n 'minSeriesSemaine' : getminSemaine(listeSondes[0]),\n 'maxSeriesSemaine' : getmaxSemaine(listeSondes[0]),\n 'minSemaine' : getminSerie(getminSemaine(listeSondes[0])),\n 'maxSemaine' : getmaxSerie(getmaxSemaine(listeSondes[0])),\n 'jourSemaine' : getjourSemaine(dateTraitement, getminSemaine(listeSondes[0])),\n 'minSeriesMois' : getminMois(listeSondes[0]),\n 'maxSeriesMois' : getmaxMois(listeSondes[0]),\n 'minMois' : getminSerie(getminMois(listeSondes[0])),\n 'maxMois' : getmaxSerie(getmaxMois(listeSondes[0])),\n 'jourMois' : getjourMois(dateTraitement, getminMois(listeSondes[0])),\n 'minSeriesAnnee' : getminAnnee(listeSondes[0]),\n 'maxSeriesAnnee' : getmaxAnnee(listeSondes[0]),\n 'minAnnee' : getminSerie(getminAnnee(listeSondes[0])),\n 'maxAnnee' : getmaxSerie(getmaxAnnee(listeSondes[0])),\n 'nomMois' : getnomMois(dateTraitement, getminAnnee(listeSondes[0])),\n }\n template = JINJA_ENVIRONMENT.get_template('stat.html')\n self.response.write(template.render(template_values))\n# [END main_page]\n \napp = webapp2.WSGIApplication([\n ('/stat', StatPage),\n], debug=True)\n\n" }, { "alpha_fraction": 0.7050270438194275, "alphanum_fraction": 0.7456768751144409, "avg_line_length": 50.515682220458984, "blob_id": "8c4e7a434573cd89885473f74471fc6277998804", "content_id": "c6a8a7c912b12c1ac5d930982b3cc5118956d514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41073, "license_type": "no_license", "max_line_length": 148, "num_lines": 797, "path": "/webapps/electricite/util.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# [START imports]\nfrom datetime import datetime, timedelta\nfrom collections import namedtuple\nimport random\nimport string\n\nTENSION_VOLT = 230\ntarifHP = 0.16360 \ntarifHC = 0.11500\ntarifHB = 0.14\n\nmois=['Jan.','Fev.','Mar.','Avr.','Mai','Juin','Jui.','Aout','Sep.','Oct.','Nov.','Dec.']\n\nElec_bilan = namedtuple('Elec_bilan', 'periodeTarifaire, intensiteInstant, intensiteSouscrit, optionTarif, heuresCreuses, \\\n intensiteMaximum, adresseConcentrateur, heuresPleines,puissanceApparente, date, heuresPleinesHier, \\\n heuresCreusesHier, heuresPleinesAvantHier,heuresCreusesAvantHier, \\\n jour1,jour2,jour3,jour4,jour5,jour6,jour7,jour8,jour9,jour10, \\\n jour11,jour12,jour13,jour14,jour15,jour16,jour17,jour18, \\\n jour19,jour20,jour21,jour22,jour23,jour24,jour25,jour26, \\\n jour27,jour28,jour29,jour30, \\\n\t\tjourUnAn1,jourUnAn2,jourUnAn3,jourUnAn4,jourUnAn5,jourUnAn6,jourUnAn7,jourUnAn8,jourUnAn9,jourUnAn10, \\\n\t\tjourUnAn11,jourUnAn12,jourUnAn13,jourUnAn14,jourUnAn15,jourUnAn16,jourUnAn17,jourUnAn18, \\\n\t\tjourUnAn19,jourUnAn20,jourUnAn21,jourUnAn22,jourUnAn23,jourUnAn24,jourUnAn25,jourUnAn26, \\\n\t\tjourUnAn27,jourUnAn28,jourUnAn29,jourUnAn30, \\\n heure0,heure1,heure2,heure3,heure4,heure5,heure6,heure7,heure8,heure9,heure10, \\\n heure11,heure12,heure13,heure14,heure15,heure16,heure17,heure18, \\\n heure19,heure20,heure21,heure22,heure23, \\\n heureHier0,heureHier1,heureHier2,heureHier3,heureHier4,heureHier5,heureHier6,heureHier7,heureHier8,heureHier9,heureHier10, \\\n heureHier11,heureHier12,heureHier13,heureHier14,heureHier15,heureHier16,heureHier17,heureHier18, \\\n heureHier19,heureHier20,heureHier21,heureHier22,heureHier23, \\\n mois1,mois2,mois3,mois4,mois5,mois6,mois7,mois8,mois9,mois10, \\\n mois11,mois12, \\\n statMaxDate0,statMaxDate1,statMaxDate2,statMaxDate3,statMaxDate4,statMaxDate5,statMaxDate6,statMaxDate7, \\\n statMaxTotal0,statMaxTotal1,statMaxTotal2,statMaxTotal3,statMaxTotal4,statMaxTotal5,statMaxTotal6,statMaxTotal7, \\\n statMaxhp0,statMaxhp1,statMaxhp2,statMaxhp3,statMaxhp4,statMaxhp5,statMaxhp6,statMaxhp7, \\\n statMaxhc0,statMaxhc1,statMaxhc2,statMaxhc3,statMaxhc4,statMaxhc5,statMaxhc6,statMaxhc7, \\\n statMinDate0,statMinDate1,statMinDate2,statMinDate3,statMinDate4,statMinDate5,statMinDate6,statMinDate7, \\\n statMinTotal0,statMinTotal1,statMinTotal2,statMinTotal3,statMinTotal4,statMinTotal5,statMinTotal6,statMinTotal7, \\\n statMinhp0,statMinhp1,statMinhp2,statMinhp3,statMinhp4,statMinhp5,statMinhp6,statMinhp7, \\\n statMinhc0,statMinhc1,statMinhc2,statMinhc3,statMinhc4,statMinhc5,statMinhc6,statMinhc7')\n\n\ndef randomString(stringLength=5):\n\t#Generate a random string of fixed length\n\tletters = string.ascii_lowercase\n\treturn ''.join(random.choice(letters) for i in range(stringLength))\n\ndef randomInt(intLength=100):\n\t#Generate a random string of fixed length\n\treturn random.randint(1,intLength)\n\n\ndef convertDate(dateTraitement):\n\tday=datetime.strftime(dateTraitement, \"%d\")\n\tmonth = int(datetime.strftime(dateTraitement, \"%m\"))\n\tyear=datetime.strftime(dateTraitement, \"%Y\")\n\treturn day + \" \" + mois[month-1] + \" \" + year\n\n\ndef dateEnClair(dateTraitement):\n\tdateHier=dateTraitement + timedelta(-1)\n\tdateTemp=datetime.strftime(dateHier, \"%d-%m-%Y\")\n\treturn convertDate(dateHier)\n\n\ndef concatDifference(chaine,val, derniereVal, ecart):\n\tif val != \"\":\n\t\tif derniereVal != 0:\n\t\t\tecart = derniereVal - val\n\t\t\tif chaine == \"\":\n\t\t\t\tchaine=str(ecart)\n\t\t\telse:\n\t\t\t\tchaine=str(ecart)+\",\"+chaine\n\t\telse:\n\t\t\tecart = 0\n\t\tderniereVal = val\n\telse:\n\t\tderniereVal = derniereVal-(ecart*1.2)\n\t\tif chaine == \"\":\n\t\t\tchaine=str(ecart)\n\t\telse:\n\t\t\tchaine=str(ecart)+\",\"+chaine\n\n\treturn derniereVal, chaine, ecart\n\n\n\ndef getJour(s):\n\tecart = 0\n\tderniereVal = 0\n\tchaine=\"\"\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure23, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure22, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure21, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure20, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure19, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure18, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure17, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure16, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure15, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure14, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure13, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure12, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure11, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heure10, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure9, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure8, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure7, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure6, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure5, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure4, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure3, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure2, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure1, derniereVal, ecart)\t\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heure0, derniereVal, ecart)\t\n\treturn chaine\n\n\ndef getJourHier(s):\n\tecart = 0\n\tderniereVal = 0\n\tchaine=\"\"\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier23, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier22, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier21, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier20, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier19, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier18, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier17, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier16, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier15, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier14, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier13, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier12, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier11, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.heureHier10, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier9, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier8, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier7, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier6, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier5, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier4, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier3, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier2, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier1, derniereVal, ecart)\t\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.heureHier0, derniereVal, ecart)\t\n\treturn chaine\n\n\ndef getMois(s):\n\tecart = 0\n\tderniereVal = 0\n\tchaine=\"\"\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour30, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour29, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour28, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour27, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour26, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour25, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour24, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour23, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour22, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour21, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour20, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour19, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour18, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour17, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour16, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour15, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour14, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour13, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour12, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour11, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jour10, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour9, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour8, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour7, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour6, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour5, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour4, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour3, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour2, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jour1, derniereVal, ecart)\n\treturn chaine\n\n\ndef getMoisAn(s):\n\tecart = 0\n\tderniereVal = 0\n\tchaine=\"\"\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn30, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn29, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn28, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn27, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn26, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn25, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn24, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn23, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn22, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn21, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn20, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn19, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn18, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn17, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn16, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn15, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn14, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn13, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn12, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn11, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine,s.jourUnAn10, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn9, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn8, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn7, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn6, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn5, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn4, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn3, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn2, derniereVal, ecart)\n\tderniereVal, chaine, ecart =concatDifference(chaine, s.jourUnAn1, derniereVal, ecart)\n\treturn chaine\n\n\n# Retourne la valeur max de la liste mensuel \ndef getValueMax(liste):\n\tmaxValue=0\n\tminValue=99999999999\n\tfor element in liste:\n\t\tif int(element['diff'])>maxValue:\n\t\t\tmaxValue=int(element['diff'])\n\t\tif int(element['diff'])<minValue and int(element['diff'])!= 0:\n\t\t\tminValue=int(element['diff'])\n\treturn maxValue, minValue\n\n\n# Retourne un tableau avec juste le nom des douze derniers mois : fevrie, janvier, decembre, etc..\ndef getLabelMoisUniquement(liste):\n\tcpt=0\n\tnomMois=\"\"\n\t\n\tlastElements = liste[-12:]\n\tfor element in lastElements:\n\t\tnmois=mois[int(element['mo'])-1]\n\t\tnomMois=nomMois+\"'\"+nmois+\"',\"\n\t\tcpt=cpt+1\n\t\tif cpt >= 12:\n\t\t\tbreak\n\treturn nomMois\n\n\nclass SimpleClass(object):\n\tpass\n\n\n#Retourne les consommations annuelles d'octobre au mois courant\ndef getEstimatioAnnuelle(liste):\n\tmy_objects = []\n\t# On cherche jusqu'au mois courant \n\t# Donc si on est le 3 avril, on cherche jusqu'en mars\n\tcurrentMonth = (datetime.now().month)-1\n\tdiff=0\n\tan=0\n\t\n\tfor element in liste:\n\t\tnmois=int(element['mo'])\n\t\t# Octobre ? raz valeur\n\t\tif nmois==10:\n\t\t\tdiff = 0\n\t\t\tan = \"Oct. \"+element['an']\n\t\t#Ajoute la valeur\n\t\tdiff = diff+element['diff']\n\t\t# Mois courant ? on ajoute l'objet à la liste \n\t\tif nmois==currentMonth and an != 0:\n\t\t\tan = an + \"-\"+mois[nmois]+\" \"+element['an']\n\t\t\tx = SimpleClass()\n\t\t\tx.diff = diff\n\t\t\tx.an = an\n\t\t\tmy_objects.append(x)\n\t\t\tdiff=0\n\t\t\tan=0\n\t\n\treturn my_objects\n\n\n\n\n# Retourne les douze dernières valeures de l'année en cours \ndef getSommesAn(liste):\n\tcpt=0\n\tserieValeurs=\"\"\n\tnomMois=\"\"\n\ttotalAn = 0\n\tmini=9999999999\n\tmaxi=-1\n\t\n\tlastElements = liste[-12:]\n\tfor element in lastElements:\n\t\tserieValeurs=serieValeurs+str(element['diff'])+\",\"\n\t\tnmois=mois[int(element['mo'])-1]\n\t\tnomMois=nomMois+\"'\"+nmois+\" \"+element['an']+\"',\"\n\t\tconso=int(element['diff'])\n\t\ttotalAn=totalAn+conso\n\t\tif conso<mini:\n\t\t\tmini=conso\n\t\tif conso>maxi:\n\t\t\tmaxi=conso\n\n\t\tcpt=cpt+1\n\t\tif cpt >= 12:\n\t\t\tbreak\n\ttotalAnEuros=totalAn*tarifHB\n\treturn serieValeurs, nomMois, totalAn, totalAnEuros, mini, maxi\n\n\n\n# Retourne les douze dernières valeures de l'année precédente \ndef getSommesAnPrec(liste):\n\tcpt=0\n\tserieValeurs=\"\"\n\tnomMois=\"\"\n\ttotalAn = 0\n\tmini=9999999999\n\tmaxi=-1\n\t\n\tlastElements = liste[-24:]\n\tfor element in lastElements:\n\t\tserieValeurs=serieValeurs+str(element['diff'])+\",\"\n\t\tnmois=mois[int(element['mo'])-1]\n\t\tnomMois=nomMois+\"'\"+nmois+\" \"+element['an']+\"',\"\n\t\tconso=int(element['diff'])\n\t\ttotalAn=totalAn+conso\n\t\tif conso<mini:\n\t\t\tmini=conso\n\t\tif conso>maxi:\n\t\t\tmaxi=conso\n\n\t\tcpt=cpt+1\n\t\tif cpt >= 12:\n\t\t\tbreak\n\ttotalAnEuros=totalAn*tarifHB\n\treturn serieValeurs, nomMois, totalAn, totalAnEuros, mini, maxi\n\n\n\n# Retourne les douze dernières valeures de l'année precédente \ndef getSommesAnPrec2(liste):\n\tcpt=0\n\tserieValeurs=\"\"\n\tnomMois=\"\"\n\ttotalAn = 0\n\tmini=9999999999\n\tmaxi=-1\n\t\n\tlastElements = liste[-36:]\n\tfor element in lastElements:\n\t\tserieValeurs=serieValeurs+str(element['diff'])+\",\"\n\t\tnmois=mois[int(element['mo'])-1]\n\t\tnomMois=nomMois+\"'\"+nmois+\" \"+element['an']+\"',\"\n\t\tconso=int(element['diff'])\n\t\ttotalAn=totalAn+conso\n\t\tif conso<mini:\n\t\t\tmini=conso\n\t\tif conso>maxi:\n\t\t\tmaxi=conso\n\n\t\tcpt=cpt+1\n\t\tif cpt >= 12:\n\t\t\tbreak\n\ttotalAnEuros=totalAn*tarifHB\n\treturn serieValeurs, nomMois, totalAn, totalAnEuros, mini, maxi\n\n\n\ndef getminAnnee(s):\n\treturn removeFirstComma((str(s.minMois1)+\",\"+str(s.minMois2)+\",\"+str(s.minMois3)+\",\"+str(s.minMois4)+\",\"+str(s.minMois5)+\",\"+str(s.minMois6) \\\n\t\t +\",\"+str(s.minMois7)+\",\"+str(s.minMois8)+\",\"+str(s.minMois9)+\",\"+str(s.minMois10)+\",\"+str(s.minMois11)+\",\"+str(s.minMois12)).replace(\",,\", \"\"))\n\n\ndef getmaxAnnee(s):\n\treturn removeFirstComma((str(s.maxMois1)+\",\"+str(s.maxMois2)+\",\"+str(s.maxMois3)+\",\"+str(s.maxMois4)+\",\"+str(s.maxMois5)+\",\"+str(s.maxMois6) \\\n\t\t +\",\"+str(s.maxMois7)+\",\"+str(s.maxMois8)+\",\"+str(s.maxMois9)+\",\"+str(s.maxMois10)+\",\"+str(s.maxMois11)+\",\"+str(s.maxMois12)).replace(\",,\", \"\"))\n\n\ndef removeFirstComma(temp):\n\tif temp[0] == \",\":\n\t\treturn temp[1:]\n\treturn temp\n\n\ndef diffNombre(a, b):\n\ttry: \n\t\tx=int(a)\n\t\ty=int(b)\n\t\treturn x-y\n\texcept ValueError:\n\t\treturn 0\n\n\ndef calculIndicateurs(v, to, mi, ma, der):\n\ttot=to\n\tmini=mi\n\tmaxi=ma\n\tif v != \"\":\n\t\tif der != 0:\n\t\t\tecart = der - v\n\t\telse:\n\t\t\tecart = 0\n\t\tder = v\n\t\tif ecart>0:\n\t\t\ttot=to+ecart\n\t\t\tif ecart<mi:\n\t\t\t\tmini=ecart\n\t\t\tif ecart>ma:\n\t\t\t\tmaxi=ecart\n\treturn tot, mini, maxi, der\n\n\n\ndef getSommesMois(s):\n\ttotalMois = 0\n\tminMois = 9999999999999\n\tmaxMois = 0\n\tder=0\n\ttotalEuro=0\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour30, totalMois, minMois, maxMois,der)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour29, totalMois, minMois, maxMois,der)\n\tif s.jour30!= \"\" : totalEuro = totalEuro + ((s.jour30-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour28, totalMois, minMois, maxMois,der)\n\tif s.jour29!= \"\" : totalEuro = totalEuro + ((s.jour29-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour27, totalMois, minMois, maxMois,der)\n\tif s.jour28!= \"\" : totalEuro = totalEuro + ((s.jour28-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour26, totalMois, minMois, maxMois,der)\n\tif s.jour27!= \"\" : totalEuro = totalEuro + ((s.jour27-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour25, totalMois, minMois, maxMois,der)\n\tif s.jour26!= \"\" : totalEuro = totalEuro + ((s.jour26-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour24, totalMois, minMois, maxMois,der)\n\tif s.jour25!= \"\" : totalEuro = totalEuro + ((s.jour25-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour23, totalMois, minMois, maxMois,der)\n\tif s.jour24!= \"\" : totalEuro = totalEuro + ((s.jour24-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour22, totalMois, minMois, maxMois,der)\n\tif s.jour23!= \"\" : totalEuro = totalEuro + ((s.jour23-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour21, totalMois, minMois, maxMois,der)\n\tif s.jour22!= \"\" : totalEuro = totalEuro + ((s.jour22-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour20, totalMois, minMois, maxMois,der)\n\tif s.jour21!= \"\" : totalEuro = totalEuro + ((s.jour21-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour19, totalMois, minMois, maxMois,der)\n\tif s.jour20!= \"\" : totalEuro = totalEuro + ((s.jour20-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour18, totalMois, minMois, maxMois,der)\n\tif s.jour19!= \"\" : totalEuro = totalEuro + ((s.jour19-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour17, totalMois, minMois, maxMois,der)\n\tif s.jour18!= \"\" : totalEuro = totalEuro + ((s.jour18-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour16, totalMois, minMois, maxMois,der)\n\tif s.jour17!= \"\" : totalEuro = totalEuro + ((s.jour17-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour15, totalMois, minMois, maxMois,der)\n\tif s.jour16!= \"\" : totalEuro = totalEuro + ((s.jour16-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour14, totalMois, minMois, maxMois,der)\n\tif s.jour15!= \"\" : totalEuro = totalEuro + ((s.jour15-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour13, totalMois, minMois, maxMois,der)\n\tif s.jour14!= \"\" : totalEuro = totalEuro + ((s.jour14-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour12, totalMois, minMois, maxMois,der)\n\tif s.jour13!= \"\" : totalEuro = totalEuro + ((s.jour13-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour11, totalMois, minMois, maxMois,der)\n\tif s.jour12!= \"\" : totalEuro = totalEuro + ((s.jour12-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs(s.jour10, totalMois, minMois, maxMois,der)\n\tif s.jour11!= \"\" : totalEuro = totalEuro + ((s.jour11-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour9, totalMois, minMois, maxMois,der)\n\tif s.jour10!= \"\" : totalEuro = totalEuro + ((s.jour10-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour8, totalMois, minMois, maxMois,der)\n\tif s.jour9!= \"\" : totalEuro = totalEuro + ((s.jour9-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour7, totalMois, minMois, maxMois,der)\n\tif s.jour8!= \"\" : totalEuro = totalEuro + ((s.jour8-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour6, totalMois, minMois, maxMois,der)\n\tif s.jour7!= \"\" : totalEuro = totalEuro + ((s.jour7-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour5, totalMois, minMois, maxMois,der)\n\tif s.jour6!= \"\" : totalEuro = totalEuro + ((s.jour6-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour4, totalMois, minMois, maxMois,der)\n\tif s.jour5!= \"\" : totalEuro = totalEuro + ((s.jour5-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour3, totalMois, minMois, maxMois,der)\n\tif s.jour4!= \"\" : totalEuro = totalEuro + ((s.jour4-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour2, totalMois, minMois, maxMois,der)\n\tif s.jour3!= \"\" : totalEuro = totalEuro + ((s.jour3-der)/1000*tarifHB)\n\ttotalMois, minMois, maxMois, der =calculIndicateurs( s.jour1, totalMois, minMois, maxMois,der)\t\n\tif s.jour2!= \"\" : totalEuro = totalEuro + ((s.jour2-der)/1000*tarifHB)\n\treturn totalMois, minMois, maxMois, totalEuro\n\n\ndef getSommesMoisAn(s):\n\ttotalMoisAn = 0\n\tminMoisAn = 9999999999999\n\tmaxMoisAn = 0\n\tder=0\n\ttotalEuro=0\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn30, totalMoisAn, minMoisAn, maxMoisAn,der)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn29, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn30!= \"\" : totalEuro = totalEuro + ((s.jourUnAn30-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn28, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn29!= \"\" : totalEuro = totalEuro + ((s.jourUnAn29-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn27, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn28!= \"\" : totalEuro = totalEuro + ((s.jourUnAn28-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn26, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn27!= \"\" : totalEuro = totalEuro + ((s.jourUnAn27-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn25, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn26!= \"\" : totalEuro = totalEuro + ((s.jourUnAn26-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn24, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn25!= \"\" : totalEuro = totalEuro + ((s.jourUnAn25-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn23, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn24!= \"\" : totalEuro = totalEuro + ((s.jourUnAn24-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn22, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn23!= \"\" : totalEuro = totalEuro + ((s.jourUnAn23-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn21, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn22!= \"\" : totalEuro = totalEuro + ((s.jourUnAn22-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn20, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn21!= \"\" : totalEuro = totalEuro + ((s.jourUnAn21-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn19, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn20!= \"\" : totalEuro = totalEuro + ((s.jourUnAn20-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn18, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn19!= \"\" : totalEuro = totalEuro + ((s.jourUnAn19-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn17, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn18!= \"\" : totalEuro = totalEuro + ((s.jourUnAn18-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn16, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn17!= \"\" : totalEuro = totalEuro + ((s.jourUnAn17-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn15, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn16!= \"\" : totalEuro = totalEuro + ((s.jourUnAn16-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn14, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn15!= \"\" : totalEuro = totalEuro + ((s.jourUnAn15-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn13, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn14!= \"\" : totalEuro = totalEuro + ((s.jourUnAn14-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn12, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn13!= \"\" : totalEuro = totalEuro + ((s.jourUnAn13-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn11, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn12!= \"\" : totalEuro = totalEuro + ((s.jourUnAn12-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs(s.jourUnAn10, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn11!= \"\" : totalEuro = totalEuro + ((s.jourUnAn11-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn9, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn10!= \"\" : totalEuro = totalEuro + ((s.jourUnAn10-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn8, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn9!= \"\" : totalEuro = totalEuro + ((s.jourUnAn9-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn7, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn8!= \"\" : totalEuro = totalEuro + ((s.jourUnAn8-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn6, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn7!= \"\" : totalEuro = totalEuro + ((s.jourUnAn7-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn5, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn6!= \"\" : totalEuro = totalEuro + ((s.jourUnAn6-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn4, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn5!= \"\" : totalEuro = totalEuro + ((s.jourUnAn5-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn3, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn4!= \"\" : totalEuro = totalEuro + ((s.jourUnAn4-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn2, totalMoisAn, minMoisAn, maxMoisAn,der)\n\tif s.jourUnAn3!= \"\" : totalEuro = totalEuro + ((s.jourUnAn3-der)/1000*tarifHB)\n\ttotalMoisAn, minMoisAn, maxMoisAn, der =calculIndicateurs( s.jourUnAn1, totalMoisAn, minMoisAn, maxMoisAn,der)\t\n\tif s.jourUnAn2!= \"\" : totalEuro = totalEuro + ((s.jourUnAn2-der)/1000*tarifHB)\n\treturn totalMoisAn, minMoisAn, maxMoisAn, totalEuro\n\n\n\ndef getSommesJour(s):\n\ttotalJour = 0\n\tminJour = 9999999999999\n\tmaxJour = 0\n\tder=0\n\ttotalEuro=0\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure23, totalJour, minJour, maxJour,der)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure22, totalJour, minJour, maxJour,der)\n\tif s.heure23!= \"\" : totalEuro = totalEuro + ((s.heure23-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure21, totalJour, minJour, maxJour,der)\n\tif s.heure22!= \"\" : totalEuro = totalEuro + ((s.heure22-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure20, totalJour, minJour, maxJour,der)\n\tif s.heure21!= \"\" : totalEuro = totalEuro + ((s.heure21-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure19, totalJour, minJour, maxJour,der)\n\tif s.heure20!= \"\" : totalEuro = totalEuro + ((s.heure20-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure18, totalJour, minJour, maxJour,der)\n\tif s.heure19!= \"\" : totalEuro = totalEuro + ((s.heure19-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure17, totalJour, minJour, maxJour,der)\n\tif s.heure18!= \"\" : totalEuro = totalEuro + ((s.heure18-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure16, totalJour, minJour, maxJour,der)\n\tif s.heure17!= \"\" : totalEuro = totalEuro + ((s.heure17-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure15, totalJour, minJour, maxJour,der)\n\tif s.heure16!= \"\" : totalEuro = totalEuro + ((s.heure16-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure14, totalJour, minJour, maxJour,der)\n\tif s.heure15!= \"\" : totalEuro = totalEuro + ((s.heure15-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure13, totalJour, minJour, maxJour,der)\n\tif s.heure14!= \"\" : totalEuro = totalEuro + ((s.heure14-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure12, totalJour, minJour, maxJour,der)\n\tif s.heure13!= \"\" : totalEuro = totalEuro + ((s.heure13-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure11, totalJour, minJour, maxJour,der)\n\tif s.heure12!= \"\" : totalEuro = totalEuro + ((s.heure12-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs(s.heure10, totalJour, minJour, maxJour,der)\n\tif s.heure11!= \"\" : totalEuro = totalEuro + ((s.heure11-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure9, totalJour, minJour, maxJour,der)\n\tif s.heure10!= \"\" : totalEuro = totalEuro + ((s.heure10-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure8, totalJour, minJour, maxJour,der)\n\tif s.heure9!= \"\" : totalEuro = totalEuro + ((s.heure9-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure7, totalJour, minJour, maxJour,der)\n\tif s.heure8!= \"\" : totalEuro = totalEuro + ((s.heure8-der)/1000*tarifHP)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure6, totalJour, minJour, maxJour,der)\n\tif s.heure7!= \"\" : totalEuro = totalEuro + ((s.heure7-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure5, totalJour, minJour, maxJour,der)\n\tif s.heure6!= \"\" : totalEuro = totalEuro + ((s.heure6-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure4, totalJour, minJour, maxJour,der)\n\tif s.heure5!= \"\" : totalEuro = totalEuro + ((s.heure5-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure3, totalJour, minJour, maxJour,der)\n\tif s.heure4!= \"\" : totalEuro = totalEuro + ((s.heure4-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure2, totalJour, minJour, maxJour,der)\n\tif s.heure3!= \"\" : totalEuro = totalEuro + ((s.heure3-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure1, totalJour, minJour, maxJour,der)\t\n\tif s.heure2!= \"\" : totalEuro = totalEuro + ((s.heure2-der)/1000*tarifHC)\n\ttotalJour, minJour, maxJour, der =calculIndicateurs( s.heure0, totalJour, minJour, maxJour,der)\n\tif s.heure1!= \"\" : totalEuro = totalEuro + ((s.heure1-der)/1000*tarifHC)\n\treturn totalJour, minJour, maxJour, totalEuro\n\n\n\ndef getSommesJourHier(s):\n\ttotalJourHier = 0\n\tminJourHier = 9999999999999\n\tmaxJourHier = 0\n\tder=0\n\ttotalEuroHier=0\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier23, totalJourHier, minJourHier, maxJourHier,der)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier22, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier23!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier23-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier21, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier22!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier22-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier20, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier21!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier21-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier19, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier20!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier20-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier18, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier19!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier19-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier17, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier18!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier18-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier16, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier17!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier17-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier15, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier16!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier16-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier14, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier15!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier15-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier13, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier14!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier14-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier12, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier13!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier13-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier11, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier12!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier12-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs(s.heureHier10, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier11!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier11-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier9, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier10!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier10-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier8, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier9!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier9-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier7, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier8!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier8-der)/1000*tarifHP)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier6, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier7!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier7-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier5, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier6!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier6-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier4, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier5!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier5-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier3, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier4!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier4-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier2, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier3!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier3-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier1, totalJourHier, minJourHier, maxJourHier,der)\t\n\tif s.heureHier2!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier2-der)/1000*tarifHC)\n\ttotalJourHier, minJourHier, maxJourHier, der =calculIndicateurs( s.heureHier0, totalJourHier, minJourHier, maxJourHier,der)\n\tif s.heureHier1!= \"\" : totalEuroHier = totalEuroHier + ((s.heureHier1-der)/1000*tarifHC)\n\treturn totalJourHier, minJourHier, maxJourHier, totalEuroHier\n\n\n\n#Retourne l'echelle pour le graphique semaine\n#Cela devrait ressembler a qq chose comme \"\\\"Lu.\\\", \\\"Ma.\\\", \\\"Me.\\\", ...\ndef getjourSemaine(d, s):\n\toccur = s.count(',')\n\tchaine=\"\"\n\tif occur>0:\n\t\t#Tableau des jours de 0 a 6\n\t\tsemaine=['Lu','Ma','Me','Je','Ve','Sa','Di']\n\t\tmaDate=d\n\t\tpos = maDate.weekday()\n\t\tposd = maDate.day\n\t\tchaine=\"\\\"\"+semaine[pos]+\" \"+str(posd)+\"\\\"\"\n\t\tfor _ in range(occur,0,-1):\n\t\t\tmaDate = maDate + timedelta(-1)\n\t\t\tpos = maDate.weekday()\n\t\t\tposd = maDate.day\n\t\t\tif pos < 0:\n\t\t\t\tpos=6\n\t\t\tchaine=\"\\\"\"+semaine[pos]+\" \"+str(posd)+\"\\\",\"+chaine\n\n\treturn chaine\n\n\n# Retourne deux séries avec les années et les valeurs \ndef getJuillet(serie):\n\tserieValeurs=\"\"\n\tnomAnnees=\"\"\n\tfor element in serie:\n\t\tif int(element['diff']) != 0:\n\t\t\tserieValeurs=serieValeurs+str(int(element['diff']))+\",\"\n\t\t\tnomAnnees=nomAnnees+\"'\"+element['an']+\"',\"\n\n\treturn nomAnnees, serieValeurs\n\n\n\n#Retourne l'echelle pour le graphique mois\n#Pour plus de lisibilit�, un jour sur quatre est affich� avec inter\n#Cela devrait ressembler a qq chose comme \"4 fev.\", \"\", \"\", \"\", \"8 fev\"\ndef getjourMois(d, s):\n\toccur = s.count(',')\n\t#Tableau des mois de 0 a 11\n\tmois=['Ja.','Fe.','Ma.','Av.','Ma.','Ju.','Ji.','Ao.','Se.','Oc.','No.','De.']\n\t#1. nom des mois precedents\n\tmaDate=d+timedelta(-1)\n\tpos = maDate.day\n\tposm = maDate.month-1\n\tchaine=\"\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\"\"\n\tinter=1\n\tfor _ in range(occur,0,-1):\n\t\tinter+=1\n\t\tmaDate = maDate + timedelta(-1)\n\t\tpos = maDate.day\n\t\tposm = maDate.month-1\n\t\tif inter==4:\n\t\t\tchaine=\"\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\",\"+chaine\n\t\t\tinter=1\n\t\telse:\n\t\t\tchaine=\"\\\"\\\",\"+chaine\n\t\n\t#Si moins d'une annee, on complete avec les mois suivants\n\toccur = s.count(',')\n\tmaDate=d\n\tinter=1\n\tfor _ in range(30-occur):\n\t\tinter+=1\n\t\tmaDate = maDate + timedelta(1)\n\t\tpos = maDate.day\n\t\tposm = maDate.month-1\n\t\tif inter==4:\n\t\t\tchaine=chaine+\",\\\"\"+str(pos)+\" \"+mois[posm]+\"\\\"\"\n\t\t\tinter=1\n\t\telse:\n\t\t\tchaine=chaine+\",\\\"\\\"\"\n\n\treturn chaine\n\n\n\n#Retourne l'echelle pour le graphique annee\n#Cela devrait ressembler a qq chose comme \"\\\"J.\\\", \\\"F.\\\", \\\"M.\\\", ...\ndef getnomMois(d, s):\n\toccur = s.count(',')\n\t#Tableau des mois de 0 a 11\n\tmois=['Ja.','Fe.','Ma.','Av.','Ma.','Ju.','Ji.','Ao.','Se.','Oc.','No.','De.']\n\n\t#1. nom des mois precedents\n\tpos = d.month-1\n\tchaine=\"\\\"\"+mois[pos]+\"\\\"\"\n\tfor _ in range(occur,0,-1):\n\t\tpos = pos - 1\n\t\tif pos < 0:\n\t\t\tpos=11\n\t\tchaine=\"\\\"\"+mois[pos]+\"\\\",\"+chaine\n\n\t#Si moins d'une annee, on complete avec les mois suivants\n\toccur = s.count(',')\n\tpos = d.month-1\n\tfor _ in range(12-occur):\n\t\tpos = pos + 1\n\t\tif pos >11:\n\t\t\tpos=0\n\t\tchaine=chaine+\",\\\"\"+mois[pos]+\"\\\"\"\n\n\treturn chaine\n\n\ndef isfloat(value):\n\ttry:\n\t\tfloat(value)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\n\n\ndef format_int (s):\n\tb = int(s);\n\treturn '{:,}'.format(b).replace(',', ' ')\n\n\ndef convertir_euro (s, heure):\n\tb = int(s);\n\tif heure<7 or heure>21:\n\t\treturn b*tarifHC/1000\n\telse:\n\t\treturn b*tarifHP/1000\n" }, { "alpha_fraction": 0.7797356843948364, "alphanum_fraction": 0.7819383144378662, "avg_line_length": 27.375, "blob_id": "a69137e80254ee5df787a2b7f5df40f0a5d81ca4", "content_id": "31422f0b75319287fe5fa5a0dd513181e636b186", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "no_license", "max_line_length": 70, "num_lines": 16, "path": "/scripts/twitter/piTweet.py", "repo_name": "lcapdecomme/raspberry", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\nfrom twython import Twython\nCONSUMER_KEY = 'xxxxxxxxxxxxxxxxxxxxxxxxx'\nCONSUMER_SECRET = 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'\nACCESS_KEY = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'\nACCESS_SECRET = 'wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww'\n\napi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET) \n\n#api.update_status(status=sys.argv[1])\nmessage=\"\"\nfor line in sys.stdin:\n message = message + line\n\napi.update_status(status=message)\n" }, { "alpha_fraction": 0.7939394116401672, "alphanum_fraction": 0.7975757718086243, "avg_line_length": 36.45454406738281, "blob_id": "99927d97b8903492cc678fbb3d0614d6bfb4c7da", "content_id": "505b71164fc94d14988b45154e4403cee164eacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 836, "license_type": "no_license", "max_line_length": 155, "num_lines": 22, "path": "/scripts/temperatures/README.md", "repo_name": "lcapdecomme/raspberry", "src_encoding": "ISO-8859-1", "text": "# Scripts temperatures\n\n\n## listTemperature.py \n\nCe script se connecte à la bd mongo sur mLab et parcours tous les enregistrements pour retrouver les températures courantes, mini. et maxi. pour une sonde.\n\n\n## voirTemperature.py\n\nCe script python interroge les sondes connectées au raspeberryPi et affiche les températures instantannées.\n\n\n## sauveTemperature.py\n\nCe script ajoute les relevés des sondes en base puis calcule le bilan et les statistiques par sonde. Ce script va donc : \n\n1. Ajouter un enregistrement contenant les relevés des sondes dans la collection **MONGODB_COLLECTION**\n\n2. Insérer un enregistrement bilan par sonde dans la collection **MONGODB_COLLECTION_BILAN** après l'avoir vider\n\n3. Insérer un enregistrement statistique par sonde dans la collection **MONGODB_COLLECTION_STAT** après l'avoir vider\n\n" } ]
14
Swati5140/concrete_compressive_strength
https://github.com/Swati5140/concrete_compressive_strength
6898cb21b0170c3746b5e259f0d8da38475b77b0
f8c3a965e6dfffcc32435f6e7bbe695309231db0
030588a348500da51d9d2cb699ec3197aae0cb2c
refs/heads/main
2023-08-15T17:52:58.860258
2021-10-05T07:36:28
2021-10-05T07:36:28
412,376,256
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6042137145996094, "alphanum_fraction": 0.6072234511375427, "avg_line_length": 24.037734985351562, "blob_id": "623dc947466b4846ab95f05b8c90590ca2877062", "content_id": "e837ac55a40a1a178c537e757b168c7340e2a595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 111, "num_lines": 53, "path": "/home.py", "repo_name": "Swati5140/concrete_compressive_strength", "src_encoding": "UTF-8", "text": "import streamlit as st\nfrom PIL import Image\nimport base64\nimport pandas as pd\nimport numpy as np\n\n\n\ndef main():\n img = Image.open(\"web_logo1.png\")\n st.set_page_config(page_title='Concrete Compressive Strength Prediction App', page_icon=img, layout='wide')\n\n hide_menu_style = \"\"\"\n <style>\n #MainMenu {visibility:hidden; }\n footer {visibility:hidden;}\n </style>\n \"\"\"\n st.markdown(hide_menu_style, unsafe_allow_html=True)\n\n st.error(\"Application in progress....coming soon...!!!\")\n\n st.title(\"Concrete Compressive Strength Prediction App\")\n image=Image.open(\"concrete_png.png\")\n st.image(image,use_column_width=True)\n\n st.write(\"\"\"\n This App predicts the Compressive Strength of Concrete, reducing the wait time, labour and human error.\n\n **Python Libraries Used : ** streamlit, pandas, numpy, sklearn\n \n \n \"\"\")\n st.write(\"**Dataset : ** Data is obtained from Kaggle.\")\n\n df = pd.read_csv('concrete.csv',sep=';')\n\n def convert_df(df):\n return df.to_csv().encode('utf-8')\n \n csv = convert_df(df)\n\n st.download_button(\n \"Download Data as CSV\",\n csv,\n \"cement_data.csv\",\n \"text/csv\",\n key='concrete-cement-compressive-strength'\n )\n\n\nif __name__ == '__main__':\n main() \n\n" } ]
1
ron1n101/hw_5
https://github.com/ron1n101/hw_5
79a079a16f71df441248fa1e76a0edee1627f370
2bbc04907405e046f8e01f3c4c2c83d71137aecc
8f9bd55d894a2e5bbc12f855f498510321ff271d
refs/heads/master
2023-03-02T07:21:45.575386
2021-02-08T09:58:05
2021-02-08T09:58:05
331,645,709
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5922118425369263, "alphanum_fraction": 0.6084111928939819, "avg_line_length": 27.660715103149414, "blob_id": "d491ae10db7e111fb4580dce8575faf6e6cc846e", "content_id": "5ad61e52356d089a86d8ad487ee599d1e860fb89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3675, "license_type": "no_license", "max_line_length": 106, "num_lines": 112, "path": "/app.py", "repo_name": "ron1n101/hw_5", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import render_template\nfrom faker import Faker\nimport requests\nimport csv\nfrom base58 import b58decode, b58encode\nfrom tabulate import tabulate\napp = Flask (__name__)\n\n\[email protected]('/')\[email protected]('/index/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n\"\"\"\n1. Возвращать содержимое файла с Python пакетами (requirements.txt)\n\"\"\"\[email protected]('/requirements/', methods=['GET', 'POST'])\ndef requirements():\n with open('requirements.txt') as File:\n data = File.read()\n requirements=[]\n for word in data.split():\n requirements.append(word)\n return render_template('requirements.html', requirements=requirements)\n\n\"\"\"\n4. Вывести количество космонавтов, находящихся в настоящий момент на орбите\n\"\"\"\n\[email protected]('/cosmo/', methods = ['GET', 'POST'])\ndef cosmo():\n r = requests.get('http://api.open-notify.org/astros.json')\n count = (r.json()[\"number\"])\n return render_template('cosmo.html', count=count)\n\n\n\n\n\"\"\"\n3.Вернуть значения среднего роста (в сантиметрах) и среднего веса (в килограммах)\nНеобходимые данные расположены в файле hw05.csv\nАнализировать файл hw.csv нужно при каждом вызове\n\"\"\"\[email protected]('/file_csv/', methods = ['GET', 'POST'])\ndef file_csv():\n with open('hw05.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, skipinitialspace=True, delimiter= ',')\n middle_height = 0\n middle_weight = 0\n count_str = 0\n i = 0\n\n for row in reader:\n if count_str == 0:\n count_str += 1\n \n else:\n try:\n middle_height += round(float(row[1]))\n middle_weight += round(float(row[2]))\n i += 1\n except IndexError:\n break\n \n middle_height = round(middle_height/i, 2)\n middle_weight = round(middle_weight/i ,2)\n return render_template('file_csv.html', middle_height=middle_height, middle_weight=middle_weight, i=i)\n\n\"\"\"\n5. Закодировать входную строку `STRING` в формате base58\n\"\"\"\[email protected]('/base58/<string:s>/', methods= ['GET', 'POST'])\ndef base58(s):\n \n if ' ' in s:\n return '<h1>В этом запросе не должно быть пробелов</h1>'\n else:\n return b58encode(s)\n \n\n\n\"\"\"\n6. Преобразовать строку `STRING_IN_BASE58` в формате *base58* в исходную строку\n\"\"\"\[email protected]('/base58decode/<string:s>/', methods= ['GET', 'POST'])\ndef base58decode(s):\n if ' ' in s:\n return '<h1>В этом запросе не должно быть пробелов</h1>'\n else:\n return b58decode(s)\n \n\n\"\"\"\n2. Вывести `XX` случайно сгенерированных пользователей (имя и почту)\n\"\"\"\n\[email protected]('/generate-users/<int:users>', methods = [\"GET\", \"POST\"])\ndef generate_users(users):\n if users == 0:\n return '<h1>Вы ввели неправильный запрос!</h1> Попробуйте ещё раз.'\n users_data = []\n users_info = ['name', 'email']\n fake = Faker('en_US')\n for i in range(users):\n users_data.append([fake.name(),fake.email()])\n table = tabulate(users_data, users_info, tablefmt='grid')\n return f'<pre>{table}</pre>'\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.46107783913612366, "alphanum_fraction": 0.6856287717819214, "avg_line_length": 14.904762268066406, "blob_id": "8c2201f03180983e4a2174eae397a57d8c839669", "content_id": "2082a58de4a4175e09b32b2ee9b5abfc782055e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 334, "license_type": "no_license", "max_line_length": 22, "num_lines": 21, "path": "/requirements.txt", "repo_name": "ron1n101/hw_5", "src_encoding": "UTF-8", "text": "base58==2.1.0\ncertifi==2020.12.5\nchardet==4.0.0\nclick==7.1.2\nFaker==5.6.5\nflake8==3.8.4\nFlask==1.1.2\nidna==2.10\nitsdangerous==1.1.0\nJinja2==2.11.2\nMarkupSafe==1.1.1\nmccabe==0.6.1\npycodestyle==2.6.0\npyflakes==2.2.0\npython-dateutil==2.8.1\nrequests==2.25.1\nsix==1.15.0\ntabulate==0.8.7\ntext-unidecode==1.3\nurllib3==1.26.2\nWerkzeug==1.0.1\n" }, { "alpha_fraction": 0.7698924541473389, "alphanum_fraction": 0.7806451320648193, "avg_line_length": 34.75384521484375, "blob_id": "b677b981259cc78ba2da186067cbf1f6fc75a537", "content_id": "742b12a5e3c123c4962254481c5fced8a9e69f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3674, "license_type": "no_license", "max_line_length": 119, "num_lines": 65, "path": "/README.md", "repo_name": "ron1n101/hw_5", "src_encoding": "UTF-8", "text": "# Мануал по работе с проектом\nДля того что бы начать работать с данным проектом, вам необходимо:\n\n - Проверить установлен ли у вас Гит (если нет, то нужно установить)\n - Установить Python\n - Войти в виртуальное окружение\n - Установить requirements.txt (файл со всеми библиотеками, без которых проект не будет работать)\n - \n\n## Установка Гит (Ubuntu/Windows)\n\n*Ubuntu*\n**Проверяем, установлен ли Гит**: \nВводим в терминал `git --version`\nЕсли установлен, то терминал нам об этом сообщит `git version 2.25.1`\n\n**Установка Гита**\n1. В терминале пишем `sudo apt install git` и гит будет установлен.\n\n\n## Windows\n\nДля установки Гита на Windows, вам необходимо перейти https://git-scm.com/\nСкачать файл, и выполнить то, что от вас просит установщик\n\n## Работа с репозиторием\nДля того, что бы начать работать с репозиторием, вам нужно получить к нему доступ.\nЛОГИЧНО.\nДля того что бы получить доступ к нему, вам нужно в Терминале прописать:\n\n $ git clone https://github.com/ron1n101/hw_5.git\n\nРепозиторий установлен\n\n\n## Python\n## Ubuntu\nЧто бы скачать Python на *Ubuntu* вам необходимо прописать в терминале:\n\n sudo apt install python3.8\n\n## Windows\nЧто бы скачать Python на Windows вам необходимо перейти на официальный сайт Python:\n\n [Официальный сайт Python](https://www.python.org/downloads/)\nНажать Download, дождаться загрузки и начать установку.\nВезде ставьте галочки, и не забудьте путь куда устанавливаете Python\n# VirtualVenv\n## Ubuntu\nДля установки установки Виртуального окружения, вам необходимо прописать в Bash(Терминал):\n1. Устанавливаем PIP: `sudo apt install python3-pip`\n2. Устанавливаем Виртуальное окружение: `python3 -m venv \"Название вашего Виртуального Окружение, только без ковычек\"`\n3. Активация: `source название вашего виртуального окружения/bin/activate`\n4. Деактивация `deactivate`\n## Windows\nДля установки установки Виртуального окружения, вам необходимо прописать в Bash(Терминал):\n1. Установка `pip install virtualenv`\n2. Устанавливаем Виртуальное окружение: `python3 -m venv \"Название вашего Виртуального Окружение, только без ковычек\"`\n3. Активация: `source название вашего виртуального окружения/bin/activate\n4. Деактивация `deactivate`\n## Установка необходимых библиотек\n\n pip3 install -r requirements.txt\n## Для запуска работы репозитория \nПропишите в Терминале `python3 app.py`\n\n" }, { "alpha_fraction": 0.4576271176338196, "alphanum_fraction": 0.6779661178588867, "avg_line_length": 18, "blob_id": "bceb5d3e71b503e55b118e5b5c55b4b260ea74ba", "content_id": "67ee58cd8888c8f96a663304f176f91ab6247783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 59, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/.flake8", "repo_name": "ron1n101/hw_5", "src_encoding": "UTF-8", "text": "[flake8]\nmax-line-length = 120\nignore = E501, E226, E251 " } ]
4
tyang123/DAT9-Homework
https://github.com/tyang123/DAT9-Homework
6613fcd90dbeb4b50753bc0165cff489953651e0
451590a3e66e5882d0d0db674f3996a780566806
a215ae40337895985f2dbece774c10074cd5c895
refs/heads/master
2021-01-10T02:26:53.440389
2015-11-20T21:55:22
2015-11-20T21:55:22
43,380,712
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7179049253463745, "alphanum_fraction": 0.7526330947875977, "avg_line_length": 51.238807678222656, "blob_id": "199763a86cadacba6e3fcad5276b9bbd5752d419", "content_id": "5aa78de60a3ce3547276ad09270890fe9015a4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3513, "license_type": "no_license", "max_line_length": 635, "num_lines": 67, "path": "/HW1 9-29-2015.md", "repo_name": "tyang123/DAT9-Homework", "src_encoding": "UTF-8", "text": "## Mr. Tim Yang's Data Science Homework\n## Due 9/29/2015\n\nThis document outlines responses for homework 1. \n\n###1a.\nTo look at table rows and columns, I typed:\n\n Fork https://github.com/vybstat/dat9 into the tyang123 GitHub directory\n cd gitFolder\n git clone https://github.com/tyang123/dat9\n cd dat9\n cd data\n head chipotle.tsv\n\nThe Chipotle File is a table that collects data on customer order history. Column 1 represents the customer's order number, and there may be two rows with the same order_id representing two items on one receipt. Quantity is just the # of items for that row, item_name is the Chipotle menu item, and choice_description lists all of the condiments for the menu item. Each row is a single food item category. For example, If I was the 30th customer and I ordered 2 burritos and 1 drink, the dataset would list two rows. Row 1 would have order_id 30, item_name burrito, quantity 2. Row 2 would have order_id30, item_name drink, quantity 1.\n\n###1b. There are 1834 orders, since the largest number in column \"order_id\" is 1834. I retrieved the last 10 rows of the file by typing:\n in /c/users/Yang/gitFolder/dat9/data directory:\n sort chipotle.tsv\n tail chipotle.tsv\n \n###1c. There are 4623 lines in the file. To retrieve the document word count and row count, I typed:\n in /c/users/Yang/gitFolder/dat9/data directory:\n \n sort chipotle.tsv\n wc chipotle.tsv\n 4623 55838 369600 chipotle.tsv\n 4623 is the number of rows, which is the row count.\n \n \n###1d. There are 591 chicken burritos, and only 386 steak burritos. Therefore, chicken is more popular. I used command line in Git to find the sum of the quantity with the phrase \"chicken burrito\" and \"steak burrito\", I typed:\n\ngrep \"Steak Burrito\" chipotle.tsv>SteakBurritoOnly.tsv\nsort SteakBurritoOnly.tsv\nwc SteakBurritoOnly\n\ngrep \"Chicken Burrito\" chipotle.tsv>chickenBurritoOnly.tsv\nsort chickenBurritoOnly.tsv\nwc chickenBurritoOnly\n\nThese word counts helped me sum up the total quantity and report my answer. I do not know how to sum up a column, but if I did, I would use a sum(column2) function to get me my correct results.\n\n###1e. To bifurcate the chicken burrito population into \"black beans\", and \"pinto beans\", I grepped the chickenBurritoOnly file by \"Black Bean\" and \"Pinto Bean\". There are 282 rows with \"Black Beans\" and only 105 with \"Pinto Beans\", so I believe black beans are more popular. \n\ngrep \"Black Beans\" chickenBurritoOnly.tsv>chickenBurritoBlackBean.tsv\nwc chickenBurritoBlackBean.tsv\n282 4430 30828 chickenBurritoBlack Bean.tsv\n\ngrep \"Pinto Beans\" chickenBurritoOnly.tsv>chickenBurritoPintoBean.tsv\nwc chickenBurritoPintoBean.tsv\n105 1761 12411 chickenBurritoPinto.tsv\n\n###2. To count the number of occurences of the word \"Dictionary\" in the DAT9 repo, I first Git cloned the REPO to my local directory:\n\nFork https://github.com/vybstat/dat9 into the tyang123 GitHub directory\n cd gitFolder\n git clone https://github.com/tyang123/dat9\n cd .\n \nThen, I wrote a recurive grep command to recursively search through the entire dat9 directory for the word \"dictionary\". I included the -r and -i command to ignore case:\n\ngrep -ri dictionary dat9\n\nThe results were there are two instances:\ndat9/project/README.md: * **Data Dictionary (aka code book): description of each variable, including units.\ndat9/README.md: \"Count the number of occurences of the word dictionary across all files in the DAT9 repo. \n \n\n\n\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7865168452262878, "avg_line_length": 43.5, "blob_id": "a4ea94ba39426c37cc4e888e7c2f46d8e0214d7d", "content_id": "4fb35519326389ac60ce8e69fe805a9976a81492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 72, "num_lines": 2, "path": "/README.md", "repo_name": "tyang123/DAT9-Homework", "src_encoding": "UTF-8", "text": "# DAT9-Homework\nTim Yang's DAT9 Repository for storing Homework - General Assembly DAT9 DC\n" }, { "alpha_fraction": 0.6824856400489807, "alphanum_fraction": 0.6928069591522217, "avg_line_length": 26.243478775024414, "blob_id": "9759eed7bee64ff1640e586e1cc9c2cbf75f0c45", "content_id": "0949e1075d7bdb809b39fe472261bf649c706ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9398, "license_type": "no_license", "max_line_length": 111, "num_lines": 345, "path": "/09_web_scraping TY 10-20-2015.py", "repo_name": "tyang123/DAT9-Homework", "src_encoding": "UTF-8", "text": "'''\nHomework: Class 9 Web Scraping Optional\nName: Tim Yang\nClass: DC DAT9\nDate: Due 10-22-2015\nLASS: Web Scraping with Beautiful Soup\n\nWhat is web scraping?\n- Extracting information from websites (simulates a human copying and pasting)\n- Based on finding patterns in website code (usually HTML)\n\nWhat are best practices for web scraping?\n- Scraping too many pages too fast can get your IP address blocked\n- Pay attention to the robots exclusion standard (robots.txt)\n- Let's look at http://www.imdb.com/robots.txt\n\nWhat is HTML?\n- Code interpreted by a web browser to produce (\"render\") a web page\n- Let's look at example.html\n- Tags are opened and closed\n- Tags have optional attributes\n\nHow to view HTML code:\n- To view the entire page: \"View Source\" or \"View Page Source\" or \"Show Page Source\"\n- To view a specific part: \"Inspect Element\"\n- Safari users: Safari menu, Preferences, Advanced, Show Develop menu in menu bar\n- Let's inspect example.html\n'''\n\n# read the HTML code for a web page and save as a string\nimport requests\nurl = r'https://raw.githubusercontent.com/ga-students/DAT-DC-9/master/data/example.html?token=AG8aPhA-GT-gTGX3iXysiHnk2BLKJmttks5WLS2rwA%3D%3D'\nr = requests.get(url)\n\nprint r\n\n# convert HTML into a structured Soup object\nfrom bs4 import BeautifulSoup\nb = BeautifulSoup(r.text)\n\n\n# print out the object\nprint b\nprint b.prettify()\n\n# 'find' method returns the first matching Tag (and everything inside of it)\nb.find(name=\"title\")\n# Tags allow you to access the 'inside text'\nb.find(name=\"title\").text \n#u stored as unicode. ignore this.\n# Tags also allow you to access their attributes\nb.find(name='h1')['id']\n# 'find_all' method is useful for finding all matching Tags\nresults=b.find_all(name='p')\ntype(results)\n\n# ResultSets can be sliced like lists\n\nfor result in results:\n print result, '\\n'\n\n#len(b.find_all(name='p'))\n#b.find_all(name='p')[0]\n#b.find_all(name='p')[0].text\n#b.find_all(name='p')[0]['id']\n\n# iterate over a ResultSet\n#specify the one that I care about.\n b.find(name='p', attrs={'id':'scraping'}).text\n \n# limit search by Tag attribute\n\n# limit search to specific sections\n\n'''\nEXERCISE ONE\n'''\n\n# find the 'h2' tag and then print its text\nb.find(name='h2')\n\n# find the 'p' tag with an 'id' value of 'reproducibility' and then print its text\nb.find(name='p', attrs = {'id':\"reproducibility\"})\n\n# find the first 'p' tag and then print the value of the 'id' attribute\nb.find(name='p')['id']\n\n# print the text of all four li tags\nliTags=b.find_all(\"li\") #unclear what you want text on, loop through .text\n\nfor result in liTags:\n print result.text\n\n\n# print the text of only the API resources\n\napiResults=b.find_all(attrs = {'id':\"api\"}) #unordered list.\n\nfor result in apiResults:\n print result.text\n\n#########Alternate Answer###################\n\n#results= b.find(name='ul', attrs={'id':'api'}).find_all\n\n#for result in results:\n# print result.text\n\n#results stores all the ul's that have ID=API\nresults = b.find(name=\"ul\", attrs = {'id':'api'})\n\n#Results Next finds all the LI in the ID=API Unordered List\nresults_next = results.find_all('li')\n\nfor result in results_next:\n print result.text\n\n'''\nScraping the IMDb website\n'''\n\n# get the HTML from the Shawshank Redemption page\nr = requests.get('http://www.imdb.com/title/tt0111161/')\n\n# convert HTML into Soup\nb = BeautifulSoup(r.text)\nprint b\n\n# run this code if you have encoding errors\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n# get the title\nb.find(name='span', attrs={'class': 'itemprop'}).text\n\n# get the star rating (as a float)\n#b.find(name='span', attrs={'itemprop': 'ratingValue'})\n\nrating= b.find(name='div', attrs={'class': 'titlePageSprite'}).text\nfloat(rating)\n\n'''\nEXERCISE TWO\n'''\n\n# get the description\n#two imprisoned men\n\nb.find(name='p', attrs={'itemprop': 'description'})\n\n# get the content rating\n#rated R\n\ntitleTable=b.find(name='td', attrs={'id': 'overview-top'})\nmenuBar=titleTable.find(name=\"div\", attrs={\"class\":\"infobar\"})\nmenuBar.find(name=\"meta\", attrs={\"itemprop\":\"contentRating\"})[\"content\"]\n\n\n\n# get the duration in minutes (as an integer)\n#duration 142\n\nb.find(name=\"time\", attrs={\"itemprop\":\"duration\"})\n\n\n\n#in(b.find(name='time', attrs={'itemprop':'duration'}) ['datetime'][2:-1])\n\n\n\n'''\nOPTIONAL WEB SCRAPING HOMEWORK\n\nFirst, define a function that accepts an IMDb ID and returns a dictionary of\nmovie information: title, star_rating, description, content_rating, duration.\nThe function should gather this information by scraping the IMDb website, not\nby calling the OMDb API. (This is really just a wrapper of the web scraping\ncode we wrote above.)\n\nFor example, get_movie_info('tt0111161') should return:\n\n{'content_rating': 'R',\n 'description': u'Two imprisoned men bond over a number of years...',\n 'duration': 142,\n 'star_rating': 9.3,\n 'title': u'The Shawshank Redemption'}\n\nThen, open the file imdb_ids.txt using Python, and write a for loop that builds\na list in which each element is a dictionary of movie information.\n\nFinally, convert that list into a DataFrame.\n'''\n\n# define a function that accepts an IMDb ID and returns a dictionary of movie information\ndef get_movie_info(imdb_id):\n r = requests.get('http://www.imdb.com/title/' + imdb_id + '/')\n b = BeautifulSoup(r.text)\n info = {}\n info['title'] = b.find(name='span', attrs={'class':'itemprop', 'itemprop':'name'}).text\n info['star_rating'] = float(b.find(name='span', attrs={'itemprop':'ratingValue'}).text)\n info['description'] = b.find(name='p', attrs={'itemprop':'description'}).text.strip()\n info['content_rating'] = b.find(name='meta', attrs={'itemprop':'contentRating'})['content']\n info['duration'] = int(b.find(name='time', attrs={'itemprop':'duration'}).text.strip()[:-4])\n return info\n\n# test the function\n get_movie_info('tt0372784') #batman Begins\n get_movie_info('tt0120815') #Saving Private Ryan\n\n# open the file of IDs (one ID per row), and store the IDs in a list\nimdbIDList = []\n\nfrom time import sleep\n\nimport csv\nwith open('C:\\Users\\Brittany\\DAT9-class\\WEEK5\\DAT-DC-9\\data\\imdb_ids.txt', 'rU') as inputIMDBFile:\n imdbReader=csv.reader(inputIMDBFile,delimiter='\\n')\n \n for row in imdbReader:\n imdbIDList.append(row)\n print(row)\n sleep(2)\n\n# get the information for each movie, and store the results in a list\nmovieInfoList= []\n\nfor row in imdbIDList:\n #print row[0] + str(get_movie_info(row[0]))\n movieInfoList.append(get_movie_info(row[0]))\n #print get_movie_info(row[0]) \n \n# check that the list of IDs and list of movies are the same length\nprint len(imdbIDList)\nprint len(movieInfoList)\n\n# convert the list of movies into a DataFrame\nimport pandas as pd\nmovie_cols = ['title', 'star_rating', 'description', 'content_rating', 'duration']\n#movieDF=pd.Series(movieInfoList)\n\nmoviesDF=pd.DataFrame(movieInfoList,columns=movie_cols)\n\nmoviesDF[\"title\"]\nmoviesDF[\"description\"]\nmoviesDF[\"duration\"]\n\n\n'''\nAnother IMDb example: Getting the genres\n'''\n\n# read the Shawshank Redemption page again\nget_movie_info('tt0111161')\n\n# only gets the first genre\nimdb_id = \"tt0111161\"\nr = requests.get('http://www.imdb.com/title/' + imdb_id + '/')\nb = BeautifulSoup(r.text)\n \ngenreAll=b.find_all(name='div', attrs={'itemprop':'genre'})\ngenre1 = genreAll[0].find(name='a').text\n\nprint genre1\n \n# gets all of the genres\nfor item in genreAll:\n print item.text\n\n# stores the genres in a list\ngenreList=[]\nfor item in genreAll:\n #print item.text\n genreList.append(item.text)\n\nprint genreList\n\n'''\nAnother IMDb example: Getting the writers\n'''\n\n# attempt to get the list of writers (too many results)\n\n\nwriterList=b.find_all(name='div', attrs={'itemprop':'creator'})\n\nprint writerList\n\n# limit search to a smaller section to only get the writers\n\n\n\n#Not sure where this is found\n\n\n'''\nAnother IMDb example: Getting the URLs of cast images\n'''\n\n# find the images by size\n\nimageList=b.find_all(name='img', attrs={'height':'44', 'width':'32'})\n\nfor row in imageList:\n print row[\"alt\"]\n \n# check that the number of results matches the number of cast images on the page\ncastList=b.find_all(name='img', attrs={'height':'44', 'width':'32'})\n\n\n# iterate over the results to get all URLs\n\n'''\n\n\n\nUseful to know: Alternative Beautiful Soup syntax\n'''\n\n# read the example web page again\nurl = r'https://raw.githubusercontent.com/ga-students/DAT-DC-9/master/data/example.html?token=AG8aPhA-GT-gTGX3iXysiHnk2BLKJmttks5WLS2rwA%3D%3D'\nr = requests.get(url)\n\n# convert to Soup\nb = BeautifulSoup(r.text)\n\n# these are equivalent\nb.find(name='p') # normal way\nb.find('p') # 'name' is the first argument\nb.p # can also be accessed as an attribute of the object\n\n# these are equivalent\nb.find(name='p', attrs={'id':'scraping'}) # normal way\nb.find('p', {'id':'scraping'}) # 'name' and 'attrs' are the first two arguments\nb.find('p', id='scraping') # can write the attributes as arguments\n\n# these are equivalent\nb.find(name='p', attrs={'class':'topic'}) # normal way\nb.find('p', class_='topic') # 'class' is special, so it needs a trailing underscore\nb.find('p', 'topic') # if you don't name it, it's assumed to be the class\n\n# these are equivalent\nb.find_all(name='p') # normal way\nb.findAll(name='p') # old function name from Beautiful Soup 3\nb('p') # if you don't name the method, it's assumed to be find_all" }, { "alpha_fraction": 0.6118001341819763, "alphanum_fraction": 0.6346354484558105, "avg_line_length": 36.32900619506836, "blob_id": "df6014ee3ebf1c4508a403b2ef6fadd45eec6091", "content_id": "47fdf850fd0e430f21235ae44de562eac1382498", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8629, "license_type": "no_license", "max_line_length": 122, "num_lines": 231, "path": "/homework3.py", "repo_name": "tyang123/DAT9-Homework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 29 20:42:33 2015\nClass: DC Data Science DAT9\nAssignment: Chipotle Python Data Class #3\nDate: due October 1, 2015\nAuthor: Tim Yang\nGithub: tyang123\nemail: [email protected]\nAddress: 8th Floor, 1133 15th St NW, Washington, DC 20005\n\"\"\"\n\n\"\"\"************************************************************\nBASIC LEVEL PART 1: Read in the data with csv.reader() and \nstore it in a list of lists called 'data'. \nHint: This is a TSV file, and csv.reader() needs to be told \nhow to handle it. https://docs.python.org/2/library/csv.html\nOutcome: List of lists store in 'data'\n************************************************************\"\"\"\n\n#import csv File from relative /data directory, then process tsv file by delineating on '\\t'\nimport csv\nwith open('DAT-DC-9/data/chipotle.tsv', 'rb') as inputFile:\n tsvReader=csv.reader(inputFile,delimiter='\\t')\n\n #data is the full list. itemRow is one row stored in a list.\n #the counter will be used to store the first row in a separate header list\n data=[]\n itemRow=[]\n counter=0\n \n #iterate by row in TSVreader, and store each row into a list, then append to the chipotleData List\n for row in tsvReader:\n itemRow=row\n data.append(itemRow)\n print itemRow\n \n\"\"\"************************************************************\n\"BASIC LEVEL PART 2: Separate the header and data into two different lists.\nAnswer: Outcome: header stored in \"headerRow', data stored in \"chipotleData\"\n************************************************************\"\"\"\n\n#import csv File from relative /data directory, then process tsv file by delineating on '\\t'\nimport csv\nwith open('DAT-DC-9/data/chipotle.tsv', 'rb') as inputFile:\n tsvReader=csv.reader(inputFile,delimiter='\\t')\n\n #chipotleData is the full dictionary. itemRow is one row stored in a list.\n #the counter will be used to store the first row in a separate header list\n chipotleData=[]\n headerRow=[]\n itemRow=[]\n counter=0\n \n #iterate by row in TSVreader, and store each row into a list, then append to the chipotleData List\n for row in tsvReader:\n if counter == 0:\n headerRow=row\n counter=counter+1\n else:\n itemRow=row\n chipotleData.append(itemRow)\n #print itemRow\n\n#confirms the Header is a separate list\nprint headerRow\n#confirms the Chipotle Data is 4623-1=4622 items long\nprint len(chipotleData)\nprint chipotleData\n \n\"\"\"************************************************************\n\"INTERMEDIATE LEVEL PART 3: Calculate the average price of an order. \nHint: Examine the data to see if the 'quantity' column is relevant \nto this calculation. Hint: Think carefully about the simplest way \nto do this!\nOutcome: $39,237 Total OrderDollars / 1,834 orders = $21.39 dollars/order\n************************************************************\"\"\"\n\n#Calculate Largest Order ID number \nfor rowItem in chipotleData:\n if int(rowItem[0])>maxOrderID:\n maxOrderID=int(rowItem[0])\n print \"Row Item\" + rowItem[0] + \"Max ID\" + str(maxOrderID)\n\nprint maxOrderID\n \n#Iterate from 1 to maxOrderID=1833, total up order price for each orderID\norderTotal = 10000\norderTotal = 0\norderPriceList=[]\n\n# for each order K in the datafile\nfor k in range(maxOrderID+1):\n #we total up the price*quantity for each orderID in the dataset\n for row in chipotleData:\n if int(row[0])==k:\n #Order Total is the sum of the price*quantity for all matching orderIDs\n orderTotal= orderTotal + float(row[1]) * float(row[4][1:50])\n print \"for order#\"+str(k) + \"quantity: \" + row[1] + \"price: \"+ row[4] + \"Total order price:\" + str(orderTotal)\n \n #for a given k, write the orderID and final Order Total to a new list\n orderPriceList.append([k,orderTotal]) \n #reset Order Total to 0 for the next orderID\n orderTotal=0\n \n# Print out sum of order Totals, then divide by # of Orders\n TotalOrderRevenue=10000\n TotalOrderRevenue=0\n \n for orderRow in orderPriceList:\n TotalOrderRevenue =TotalOrderRevenue + orderRow[1]\n \n AverageOrderCost=TotalOrderRevenue/len(orderPriceList)\n print AverageOrderCost\n #Answer is $39,237.02/1,834 orders = $21.39 dollars/order\n \n \n\"\"\"************************************************************\nINTERMEDIATE LEVEL PART 4: Create a list (or set) of all unique sodas and soft \ndrinks that they sell. Note: Just look for 'Canned Soda' and 'Canned Soft Drink',\nand ignore other drinks like 'Izze’.\nOutcomes: Set with list of sodas, 9 in total: \n\nLemonade, Dr. Pepper, Diet Coke, Nestea, Mountain Dew, Diet Dr. Pepper, \nCoke, Coca Cola, Sprite\n************************************************************\"\"\"\n\n#Create Blank list of sodas for the soft drink\n sodaList=[]\n\n#iterate across entire dataset\nfor row in chipotleData:\n #if item name has has Canned Soda or Soft Drink\n if row[2]==\"Canned Soda\" or row[2] == \"Canned Soft Drink\":\n #Append the Choice Description to the soda List\n sodaList.append(row[3])\n\n#Run a set, which takes the unique non-duplicate values into a sodaList\nprint set(sodaList)\n\n#Answer: Lemonade, Dr. Pepper, Diet Coke, Nestea, Mountain Dew, Diet Dr. Pepper, \n#Coke, Coca Cola, Sprite\n\n\n\"\"\"************************************************************\nADVANCED LEVEL PART 5: Calculate the average number of toppings per burrito. \nNote: Let's ignore the 'quantity' column to simplify this task. \nHint: Think carefully about the easiest way to count the number of toppings!\nOutcome: ToppingFrequency List contains list of orderID and Count of Toppings.\n6,323 Toppings Total\n1,172 Burritos\n-------------------------\n5.395 Toppings/Burrito\n************************************************************\"\"\"\n#Create Blank list of sodas for the soft drink\ntoppingFrequencyList=[]\n\n#iterate across entire dataset\nfor row in chipotleData:\n #if item name contains Burrito\n if \"Burrito\" in row[2]:\n #Append OrderID, Topping String, and # of Commas for Topping Count\n toppingFrequencyList.append([row[0], row[3],row[3].count(\",\")])\n #print \"orderID: \"+ row[0] + \" toppings: \" + row[3] + \" # toppings: \" + str(row[3].count(\",\")+1)\n\n#Parse through the Topping List to accurately extract the count of toppings\ntotalToppings=0.00\naverageToppingsPerOrder=0.00\n\nfor row in toppingFrequencyList:\n #Count of toppings equals # of commas + 1\n totalToppings=totalToppings+row[2]+1\n \n\naverageToppingsPerOrder=totalToppings/len(toppingFrequencyList)\nprint totalToppings #There are 6,323 toppings used\nprint str(len(toppingFrequencyList)) #There are 1172 burritos\nprint averageToppingsPerOrder #Average of 5.395 topping/burrito\n\n\"\"\"************************************************************\nADVANCED LEVEL PART 6: Create a dictionary in which the keys represent \nchip orders and the values represent the total number of orders. \nExpected output: {'Chips and Roasted Chili-Corn Salsa': 18, ... } \nNote: Please take the 'quantity' column into account! \nOptional: Learn how to use 'defaultdict' to simplify your code.\nResult: Dictionary chipDict with chip category as key, order total as value.\nAnswers:\n('Chips and Roasted Chili-Corn Salsa', 18)\n('Chips and Mild Fresh Tomato Salsa', 1)\n('Chips and Tomatillo-Red Chili Salsa', 25)\n('Chips and Guacamole', 506)\n('Chips and Fresh Tomato Salsa', 130)\n('Side of Chips', 110)\n('Chips and Tomatillo-Green Chili Salsa', 33)\n('Chips and Tomatillo Red Chili Salsa', 50)\n('Chips and Roasted Chili Corn Salsa', 23)\n('Chips', 230)\n('Chips and Tomatillo Green Chili Salsa', 45)\n************************************************************\"\"\"\n\n\n#Create Blank list of chips and order types\n chipNameList=[]\n\n#iterate across entire dataset\nfor row in chipotleData:\n #if item name contains Chip, add to dataset\n if \"Chips\" in row[2]:\n #Append the Choice Description to the chip list\n chipNameList.append(row[2])\n\n#Run a set, which takes the unique non-duplicate values into a ChipList\n#print chipNameList\nprint set(chipNameList)\n\nchipTypeCount=0\nchipList = []\n\nfor item in set(chipNameList):\n for row in chipotleData:\n if row[2] == item:\n chipTypeCount=chipTypeCount+int(row[1])\n #print \"item: \"+str(row[2])+\" quantity: \"+str(row[1])+\" running chipTotal: \"+str(chipTypeCount)\n\n chipList.append([item,chipTypeCount])\n chipTypeCount=0 \n\nchipDict=dict(chipList)\n \nfor p in chipDict.items():\n print p\n " } ]
4
1000ship/powerSugang
https://github.com/1000ship/powerSugang
b96435cd594c87698fdb8a7212a61af6bbd7d58a
a76ef3c59f3dc7e2df31280520073c008b819dd7
a9feaa9ce89a68e6ab7b3a1305247c31c07cff8e
refs/heads/master
2023-03-12T22:25:49.290439
2021-02-21T05:06:15
2021-02-21T05:06:15
200,389,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6216505765914917, "alphanum_fraction": 0.6591640114784241, "avg_line_length": 29.064516067504883, "blob_id": "03c6d88724f914965791523075cf66fd8bf87324", "content_id": "574951e0ba703bc715c63efe3326197cdfce9253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1703, "license_type": "no_license", "max_line_length": 82, "num_lines": 31, "path": "/README.md", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "# Power Sugang\n> 크롬 버전에 따라서 셀레니움 chromedriver 버전을 바꿔야할 수 있으니 미리 대비\n\n- superpower.py : 수강신청용, 학교 수강신청 웹서버 시간이 00분 00초가 되는 순간, 겁나 빠르게 수강신청 버튼을 눌러버림\n- superpower_old.py : 네이비즘 활용하는 구버전 (성능딸림,, 백업용으로 남겨둠)\n- jupjup.py : 설정한 수업에서 빈 자리가 생기면 겁나 빠르게 주워 담음\n- getsuup.py : 수업 종류 설정하고 들어가면 jupjup.py에서 쓸 수 있게 수업 리스트를 뽑아줌\n- checkComputerSpeed.py : 누가 만들어놓은 수강신청 클릭 연습 사이트인데, 이걸로 클릭속도가 얼마나 빠른지 체크해볼 수 있었다.\n\n## 사용방법\n- 루트(/)에 me.key 텍스트 파일을 만들고 첫 줄에 아이디 둘째 줄에 패스워드 적어둔다. 그리고 원하는거 실행\n\n ```\n B611202\n [비밀번호]\n ```\n\n- selenium, requests, bs4 라이브러리 설치 필요\n\n## 이력\n- 2019-1 수강신청 똥망해버리고 멘탈이 나가서 개발 시작, jupjup.py로 초전박살난 시간표 심폐소생 성공 (거의 이국종 명의님의 기적)\n- 2019-2 All Clear\n- 2020-1 All Clear\n - All Clear 3명, 추가신청 2명 성공\n - (버그발견) 실패 1명 (ㅈㅅ;;), 서버 시간이 밀려 수강신청 기간 아니라는 에러메시지 발생 -> 완벽히 수정\n- 2020-2 All Clear\n - All Clear 총 4명 (재수강 알아서;)\n - 뭔지 모르겠는데 학교엣 주최하는 수강신청 선착순 이벤트에 당첨돼서 문화상품권 받음\n \n\n<img alt=\"야 너두 올클할 수 있어\" src=\"./_readme/youcandoit.png\"/>\n\n" }, { "alpha_fraction": 0.5513326525688171, "alphanum_fraction": 0.6221619248390198, "avg_line_length": 31.677419662475586, "blob_id": "cd3c6e877062d4dbbe096e02e049dcb6ae1be137", "content_id": "75cbd0b2a4842b72f22f64f5236bb9a9886052e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5076, "license_type": "no_license", "max_line_length": 168, "num_lines": 124, "path": "/getsuup.py", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\n\ndriver = webdriver.Chrome(\"driver/chromedriver\")\ndriver.get(\"https://sugang.hongik.ac.kr/\")\nkey = open('me.key','r').read().split('\\n')\nmyid=key[0]\nmypw=key[1]\n\ndriver.find_element_by_name(\"p_userid\").send_keys(key[0])\ndriver.find_element_by_name(\"p_passwd\").send_keys(key[1])\ndriver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[1]/tbody/tr[2]/td/table/tbody/tr[4]/td/input\").click()\n\n#####################\n# 과목별 수강신청버튼\ndriver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[4]/td/li/a\").click()\n\n#ComputerEngineering\n# 3학년\ndriver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[7]/td[2]/form/select/option[4]').click()\n# 4학년\n# driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[7]/td[2]/form/select/option[5]').click()\nlst = [\n # 4, # HCI윈도우즈프로그래밍 이혜영1 화3,목23\n # 6, # HCI윈도우즈프로그래밍 이혜영1 화5,수89\n # 8, # 알고리즘분석 정균락 월6,화6,수6\n # 10, # 알고리즘분석 정균락 월7,화9,수7\n # 12, # 알고리즘분석 정균락 월9,화7,수9\n # 14, # 알고리즘분석 하란 월2,화2,수2\n # 16, # 알고리즘분석 하란 월3,화3,수3\n # 18, # 컴퓨터구조 권건우 화2,수2,목2\n # 20, # 컴퓨터구조 권건우 화3,수3,목3\n # 22, # 컴퓨터구조 권건우 화5,수5,목5\n # 24, # 컴퓨터구조 최윤화 수9,목8,금8\n # 26, # 컴퓨터구조 최윤화 수10,목9,금9\n # 28, # 프로그래밍언어론 표창우 월6,화6,수4\n # 30, # 프로그래밍언어론 표창우 월7,화5,수3\n # 32, # 프로그래밍언어론 송하윤 월89,금6\n # 34, # 프로그래밍언어론 송하윤 화89,금8\n # 36, # 프로그래밍언어론 송하윤 수89,금9\n # 38, # 컴퓨터네트워크 심영철 월2,수2,목2\n # 40, # 컴퓨터네트워크 심영철 월3,수3,목3\n # 42, # 컴퓨터네트워크 심영철 월6,수6,목6\n]\nlst = [\n #4, # 시스템프로그래밍 김선일 월7,화67\n #6, # 시스템프로그래밍 김선일 월6,목67\n #8, # 인공지능 박준 월8,목89\n #10, # 인공지능 박준 월9,수78\n #12, # 소프트웨어공학 김경창 월7,화67\n #14, # 소프트웨어공학 김경창 월6,목67\n #16, # 소프트웨어공학 김한규 월6,수56\n #18, # 소프트웨어공학 김한규 월7,수78\n #20, # 응용데이터베이스 김경창 월3,화3,목3\n #22, # 네트워크보안 박준철 월8,화8,금8\n #24, # 네트워크보안 박준철 월9,화9,금9\n]\n\n#MSC Math\n#driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[8]/td[1]/a').click()\nlst = [\n #84, # 이산수학 하란 월23,수2\n #86, # 이산수학 하란 화23,수3\n #88, # 이산수학 하란 월78,수5\n 90, # 이산수학 이준용 월89,수2\n #92, # 이산수학 이준용 화89,수3\n]\n\n#MSC Computer\n# driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[10]/td[1]/a').click()\nlst = [\n 32, # MATLAB프로그래밍및실습 조수현 수789\n]\n\n#GY Society\n# driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[4]/td[2]/a').click()\nlst = [\n #42, # 직업과취업 유건재 화67\n #44, # 직업과취업 유건재 금67\n]\n\n#GY Basic\n#driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[3]/td[1]/a').click()\n\n#GY Inmun\n# driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[3]/td[2]/a').click()\nlst = [\n #112, # 인문학초청강연 권경민 금234\n]\n\n#GY Hack\n# driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[5]/td[1]/a').click()\nlst = [\n #112, # 인문학초청강연 권경민 금234\n]\n\n#Optional\n#input('선택 후 엔터')\n\n\n#####################\n\ntableHtml = driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/form/table/tbody/tr/td/table').get_attribute('innerHTML')\nbs = BeautifulSoup(tableHtml, 'html.parser')\nlst = bs.find_all('tr')\nfor idx, i in enumerate(lst):\n tds = i.find_all('td')\n if len(tds) < 4:\n continue\n\n bias = 0 #기초교양 페이지는 칸이 1개 밀려서 오차 수정용으로 만들어둠\n tmp = tds[4+bias].find('a')\n if tmp is not None:\n tds[4+bias] = tds[4+bias].find('a')\n\n print('#', end='')\n print(idx+1, end=', # ')\n print(tds[4+bias].contents[0],end=' ')\n print(tds[9+bias].contents[0],end=' ')\n print(tds[10+bias].contents[0],end=' ')\n print()\n\n\ndriver.close()\n" }, { "alpha_fraction": 0.5057423114776611, "alphanum_fraction": 0.565253734588623, "avg_line_length": 36.13178253173828, "blob_id": "7c8aafe38dc869b7ef4632a0060e28e09f003c4c", "content_id": "6e638b90f578c2ea78ef506cbd391e6fdb5041da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5689, "license_type": "no_license", "max_line_length": 143, "num_lines": 129, "path": "/jupjup.py", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "from selenium import webdriver\n\ndriver = webdriver.Chrome( \"driver/chromedriver\" )\ndriver.get(\"https://sugang.hongik.ac.kr/\")\nkey = open('me.key','r').read().split('\\n')\nmyid=key[0]\nmypw=key[1]\n\ndriver.find_element_by_name(\"p_userid\").send_keys(key[0])\ndriver.find_element_by_name(\"p_passwd\").send_keys(key[1])\ndriver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[1]/tbody/tr[2]/td/table/tbody/tr[4]/td/input\").click()\ndriver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[4]/td/li/a\").click()\n\n\ndef powerPick(xpath_list):\n isGotcha = False\n for num in xpath_list:\n xpath = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/form/table/tbody/tr/td/table/tbody/tr[{}]'.format(num)\n subject = driver.find_element_by_xpath(xpath).find_elements_by_tag_name(\"input\")\n for e in subject:\n if e.get_attribute(\"type\") == \"checkbox\":\n e.click()\n isGotcha = True\n if isGotcha:\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/form/input\").click()\n count = 0\n while True:\n try:\n driver.switch_to.alert.accept()\n count += 1\n except:\n if count == len(xpath_list):\n return True\n return False\n break;\n\ndef powerSearch( xpath, lst ):\n try:\n driver.find_element_by_xpath(\n \"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[4]/td/li/a\").click()\n driver.find_element_by_xpath( xpath ).click()\n _result = powerPick(lst)\n except:\n print(\"xpath error, retry\")\n return powerSearch( xpath, lst )\n return _result\n\n############## DATA ################\n\n#ComputerEnginnering\nce = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[7]/td[2]/form/select/option[5]'\nce_list = [\n # 4, # 자료구조및프로그래밍 이기철, 월23,수2,금2\n # 6, # 자료구조및프로그래밍 이기철, 화23,수3,금3\n # 8, # 자료구조및프로그래밍 송하윤, 월89,금56\n # 10, # 자료구조및프로그래밍 송하윤, 목89,금89\n # 12, # 자료구조및프로그래밍 배성일, 화56,목56\n 14, # 자료구조및프로그래밍 배성일, 화89,목89\n # 16, # 논리회로설계및실험 이준용, 월23,화23\n # 18, # 어셈블리언어및실습 박도순 월3,화3,목3\n # 20, # 어셈블리언어및실습 박도순 월6,화6,목6\n 22, # 어셈블리언어및실습 박도순 월7,화7,목7\n # 24, # 어셈블리언어및실습 표창우 월6,화6,금6\n # 26, # 어셈블리언어및실습 표창우 월7,화7,금7\n # 28, # 데이터통신 심영철 월2,수2,목2\n # 30, # 데이터통신 심영철 월6,수6,목6\n ]\n\nmath = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[8]/td[1]/a'\nmath_list = [\n #84, # 이산수학 하란 월23,수2\n #86, # 이산수학 하란 화23,수3\n #88, # 이산수학 하란 월78,수5\n 90, # 이산수학 이준용 월89,수2\n #92, # 이산수학 이준용 화89,수3\n ]\n\ncomputer = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[10]/td[1]/a'\ncomputer_list = [\n #32, # MATLAB프로그래밍및실습 조수현 수789\n ]\n\nsociety = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[4]/td[2]/a'\nsociety_list = [\n 42, # 자기이해와진로탐색 송인숙 목23\n ]\n\n#GY Basic\nbasic = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[3]/td[1]/a'\nbasic_list = [\n 244,238,240,236\n]\n\ninmun = '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[3]/td[2]/a'\ninmun_list = [\n 112\n]\n\n#route = [(society, society_list)]\n# route = [ (inmun, inmun_list)]\n# route = [ ('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[5]/td[1]/a',[20,22]),\n# (ce, [12,14])]\nroute = [('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[3]/div[2]/table/tbody/tr/td/table[2]/tbody/tr[7]/td[2]/form/select/option[4]',\n [#8, # 알고리즘분석 정균락 월6,화6,수6\n #10, # 알고리즘분석 정균락 월7,화9,수7\n #12, # 알고리즘분석 정균락 월9,화7,수9\n #14, # 알고리즘분석 하란 월2,화2,수2\n #16, # 알고리즘분석 하란 월3,화3,수3\n #18, # 컴퓨터구조 권건우 화2,수2,목2\n #20, # 컴퓨터구조 권건우 화3,수3,목3\n #22, # 컴퓨터구조 권건우 화5,수5,목5\n #24, # 컴퓨터구조 최윤화 수9,목8,금8\n #26, # 컴퓨터구조 최윤화 수10,목9,금9\n #28, # 프로그래밍언어론 표창우 월6,화6,수4\n #30, # 프로그래밍언어론 표창우 월7,화5,수3\n #32, # 프로그래밍언어론 송하윤 월89,금6\n #34, # 프로그래밍언어론 송하윤 화89,금8\n #36, # 프로그래밍언어론 송하윤 수89,금9\n ]];\n\n########### EXCUTE ###############\nwhile len(route) > 0:\n for idx, (xpath, lst) in enumerate(route):\n isDone = powerSearch(xpath, lst)\n if isDone:\n print(\"Something is added\")\n route.pop(idx)\nprint('all clear')\ndriver.close()" }, { "alpha_fraction": 0.6227133870124817, "alphanum_fraction": 0.6547256112098694, "avg_line_length": 32.64102554321289, "blob_id": "5ecefb1c471176cf9d861a8a64f32b7505f816c3", "content_id": "24ccbd0907504a6ae1b5afd1c1290bbcfcfdafdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1450, "license_type": "no_license", "max_line_length": 148, "num_lines": 39, "path": "/superpower_old.py", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "from selenium import webdriver\n\n#myid = input(\"ID: \")\n#mypw = input(\"PW: \")\n#mode = int(input(\"모드설정(0,1 중 입력) (0은 테스트용 '00초에만 반응') (1은 실전용 '00분 00초'에 반응 : \"))\nkey = open('me.key','r').read().split('\\n')\nmyid=key[0]\nmypw=key[1]\nmode = 0\n\nif mode:\n print(\"실전모드입니다\")\nelse:\n print(\"테스트모드입니다\")\n\nnavDriver = webdriver.Chrome('driver/chromedriver')\nnavDriver.set_window_size(500,500)\ndriver = webdriver.Chrome('driver/chromedriver')\ndriver.set_window_size(700,700)\n\nprint('수강신청 페이지 키는 중..')\ndriver.get('http://sugang.hongik.ac.kr')\ndriver.find_element_by_name('p_userid').send_keys(myid)\ndriver.find_element_by_name('p_passwd').send_keys(mypw)\ndriver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[1]/tbody/tr[2]/td/table/tbody/tr[4]/td/input').click()\nprint('네이비즘 키는 중..')\nnavDriver.get('https://time.navyism.com/?host=sugang.hongik.ac.kr')\n\nprint('세팅 끝, ㄱㄷ')\n\nwhile True:\n arr = navDriver.find_element_by_id('time_area').text.split()\n if arr[5] == \"00초\" and (arr[4] == \"00분\" or not mode):\n print(\"눌렀음\")\n driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[19]/td/a').click()\n driver.execute_script(\"sel_check()\")\n print('끝')\n navDriver.close()\n break;\n" }, { "alpha_fraction": 0.7245178818702698, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 44.5, "blob_id": "6534562f70eb3fb2c8487ec5cc477a9830e78d04", "content_id": "0ad8ec8e2b8f59a354a8acae86a2561fdfdd2a8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 140, "num_lines": 8, "path": "/checkComputerSpeed.py", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "from selenium import webdriver\n\n\ndriver = webdriver.Chrome('driver/chromedriver')\ndriver.set_window_size(700,700)\ndriver.get(\"https://hongiksugang.github.io/sugang/sugang\")\ndriver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[19]/td/a\").click()\ndriver.find_element_by_id(\"sugangButton\").click()" }, { "alpha_fraction": 0.6224837303161621, "alphanum_fraction": 0.6385877728462219, "avg_line_length": 34.10869598388672, "blob_id": "17422479eaa4260af7a143c73a8c77d2d5ee2c4f", "content_id": "db5efb8b726440157167cd7cadf3172170ee03af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3617, "license_type": "no_license", "max_line_length": 143, "num_lines": 92, "path": "/superpower.py", "repo_name": "1000ship/powerSugang", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport threading\nimport requests\nimport datetime\nimport time\n\nkey = open('me.key','r').read().split('\\n')\nmyid=key[0]\nmypw=key[1]\nmode = 0\n\ndriver = webdriver.Chrome('driver/chromedriver')\ndriver.set_window_size(800,800)\n\nprint('수강신청 페이지 키는 중..')\ndriver.get('http://sugang.hongik.ac.kr')\ndriver.find_element_by_name('p_userid').send_keys(myid)\ndriver.find_element_by_name('p_passwd').send_keys(mypw)\ndriver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[1]/tbody/tr[2]/td/table/tbody/tr[4]/td/input').click()\nprint('세팅 끝, 대기')\n\nversion = 0\nsleepTime = 0.3\nresetServerCounter = 5 # 몇번 접근하여 서버 시간 불러올지 횟수 정함\nserverCounter = resetServerCounter\ncurrentTime = None\ndef getServerTime ():\n global currentTime, serverCounter, timerFunction, version\n timeURL = 'https://sugang.hongik.ac.kr/crossdomain.xml'\n timeResponse = requests.get(timeURL)\n timeString = timeResponse.headers['Date'].split()[4]\n timeArray = timeString.split(':')\n serverTime = datetime.datetime(year=100, month=1, day=1,\n hour=int(timeArray[0]), minute=int(timeArray[1]), second=int(timeArray[2]))\n if currentTime == None:\n currentTime = serverTime\n timerFunction(version)\n print(\">> 기존시간 :\", currentTime.time().second, \"서버시간 :\", serverTime.time().second)\n if serverTime < currentTime:\n # 서버 시간이 더 빨라서 새로고친다.\n # 타이머도 0 밀리초부터 시작하도록 버전 업 시키고 새로 돌린다.\n print(\">> 로컬이 더 빨라서 선두쳐서 튕길 수도 있음..;😨\")\n currentTime = serverTime\n version += 1\n threading.Timer(interval=1, function=timerFunction, args=(version,)).start()\n serverCounter = resetServerCounter\n elif serverTime > currentTime:\n # 수강신청 기간 아닙니다. 뜰 수도 있다. 위험\n # 시간 늦추고 다시 5번 동기화 시켜본다\n print(\">> 서버 시간으로 대체😃\")\n currentTime = serverTime\n version += 1\n threading.Timer(interval=1, function=timerFunction, args=(version,)).start()\n serverCounter = resetServerCounter\n if serverCounter > 0:\n # 동기화 카운터가 남았으면 0.2초 간격으로 계속 시간 파악한다.\n serverCounter -= 1\n time.sleep(sleepTime)\n threading.Thread(target=getServerTime).start()\n else:\n # 아무 문제 없을 시\n # 그래도 10초마다 체크해본다\n # time.sleep(10)\n # t = threading.Thread(target=getServerTime)\n # t.daemon = True\n # t.start()\n pass\nthreading.Thread(target=getServerTime).start()\n\ndef timerFunction ( _version ):\n global currentTime, version\n if version != _version:\n return\n currentTime = currentTime + datetime.timedelta(seconds=1)\n print(currentTime.time(), \"_version :\", _version)\n if currentTime.second == 0:\n sugangTrigger()\n else:\n threading.Timer(interval=1, function=timerFunction, args=(_version,)).start()\n\ndef sugangTrigger ():\n try:\n print(\"Triggered\")\n driver.find_element_by_xpath(\n '/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/table[2]/tbody/tr[1]/td/table/tbody/tr[19]/td/a').click()\n print(\"Next Step\")\n driver.execute_script(\"sel_check()\")\n print(\"Ended\")\n except:\n print(\"서버 닫혀있음, 다시 시도해봄\")\n time.sleep(0.1)\n sugangTrigger()" } ]
6
jleflang/CS325_Portfolio_Project
https://github.com/jleflang/CS325_Portfolio_Project
456900c2a690b3494f8f159f469ac965e2fba899
23239f284c7e688f890176da13d1991e9182cc45
a0f0583f73cfbfc06533931539e48c31a683802e
refs/heads/main
2023-01-21T19:48:58.150435
2020-12-05T20:51:52
2020-12-05T20:51:52
314,070,383
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7418699264526367, "alphanum_fraction": 0.7621951103210449, "avg_line_length": 29.75, "blob_id": "582006b622ca95c102e63ac6baa8d26b753fd2f2", "content_id": "50b12dbb3f02acf82a3c84175b381e70357cd5d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 492, "license_type": "permissive", "max_line_length": 90, "num_lines": 16, "path": "/README.md", "repo_name": "jleflang/CS325_Portfolio_Project", "src_encoding": "UTF-8", "text": "# CS325_Portfolio_Project\nAn implementation of a Sudoku Puzzle game for Oregon State University's CS325 class\n\n# Setup\nThis game requires these steps to run:\n\n1. Input `python3 venv -m game` into console\n2. Input `source game/bin/activate`\n3. Input `pip -r requirements.txt`\n\n# To Run\nA puzzle file from QQWing **with the Compact setting** is required (a sample is included).\nThe game is run by inputting `python sudoku.py` into console.\n\n# To deactivate Venv\nRun `deactivate` in the console\n" }, { "alpha_fraction": 0.44773221015930176, "alphanum_fraction": 0.4748254716396332, "avg_line_length": 30.995561599731445, "blob_id": "0569441832dc889cbf484392e4b7424fc378278b", "content_id": "8a2cabbca1c8d8e5cb6ec8f8cb2766113b9d3e27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21629, "license_type": "permissive", "max_line_length": 87, "num_lines": 676, "path": "/sudoku.py", "repo_name": "jleflang/CS325_Portfolio_Project", "src_encoding": "UTF-8", "text": "# Name: James Leflang\n# CS 325: Analysis of Algorithms\n# Portfolio Project\n# (C) 2020 James Leflang - MIT License\nimport pygame\nimport copy\n\n# Used the following link to do the GUI since I am 100% unfamiliar to PyGame\n# https://geeksforgeeks.org/building-and-visualizing-sudoku-game-using-pygame/\n# Puzzle generated here: https://qqwing.com/generate.html\n# Some of the code mirrors/replicates this \n# (modifications are needed to match application):\n# https://github.com/wyfok/Solve_Sudoku_with_Crook_algorithm\n# \n# The verify_board() method is the primary graded objective for this assignment.\n#\n\n# Initialise the pygame font \npygame.font.init() \n# Configure the window\nscreen = pygame.display.set_mode((500, 600)) \npygame.display.set_caption(\"SUDOKU GAME\") \n \nx = 0\ny = 0\ndif = 500 / 9\nval = 0\n# Default Sudoku Board. \ngrid = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0] \n ] \n\n# Load test fonts for future use \nfont1 = pygame.font.SysFont(\"comicsans\", 40) \nfont2 = pygame.font.SysFont(\"comicsans\", 20)\n\n\ndef get_cord(idx: list):\n \"\"\"Gets the cell location.\n Args:\n idx (list): cell coordinates\n \"\"\"\n global x \n x = idx[0]//dif\n global y \n y = idx[1]//dif\n\n\n# Loads a puzzle from file\n# If you want to replace the file provided, generate one on QQWing with\n# The \"Compact\" setting to be in the correct format\ndef load_puzzle():\n \"\"\"Loads a puzzle from file.\"\"\"\n try:\n with open('puzzle.txt', 'r') as puzzle:\n rows = puzzle.readlines()\n for i, row in enumerate(rows):\n for j, cell in enumerate(row.strip()):\n if cell != '.':\n grid[j][i] = int(cell)\n\n except IOError:\n text1 = font1.render(\"NO FILE FOUND\", True, (0, 0, 0))\n screen.blit(text1, (20, 570))\n\n \n# Highlight the cell selected \ndef draw_box():\n \"\"\"Highlights a cell.\"\"\" \n for i in range(2): \n pygame.draw.line(screen, (255, 0, 0), (x * dif-3, (y + i) * dif),\n (x * dif + dif + 3, (y + i)*dif), 7)\n pygame.draw.line(screen, (255, 0, 0), ((x + i) * dif, y * dif),\n ((x + i) * dif, y * dif + dif), 7)\n\n\n# Function to draw required lines for making Sudoku grid \ndef draw():\n \"\"\"Draws the Sudoku board.\"\"\" \n for i in range(9):\n for j in range(9):\n # If the cell is not empty\n if grid[i][j] != 0:\n \n # Fill blue color in already numbered grid \n pygame.draw.rect(screen, (0, 153, 153),\n pygame.Rect(i * dif, \n j * dif, dif + 1, dif + 1))\n \n # Fill grid with default numbers specified\n text1 = font1.render(str(grid[i][j]), True, (0, 0, 0))\n screen.blit(text1, (i * dif + 15, j * dif + 15))\n\n # Draw lines horizontally and vertically to form grid\n for i in range(10): \n if i % 3 == 0:\n thick = 7\n else: \n thick = 1\n pygame.draw.line(screen, (0, 0, 0), (0, i * dif), (500, i * dif), thick) \n pygame.draw.line(screen, (0, 0, 0), (i * dif, 0), (i * dif, 500), thick) \n\n\n# Fill value entered in cell \ndef draw_val(cell: int):\n \"\"\"Draws the cell's value.\n Args:\n cell (int): Value to draw\n \"\"\"\n text1 = font1.render(str(cell), True, (0, 0, 0))\n screen.blit(text1, (x * dif + 15, y * dif + 15)) \n\n\n# Raise error when wrong value entered \ndef error_puzzle_incomplete():\n \"\"\"Error: Puzzle state is Incomplete.\"\"\" \n text1 = font1.render(\"WRONG !!!\", True, (0, 0, 0))\n screen.blit(text1, (20, 570))\n\n\n# Raise error when wrong key is pressed\ndef error_unk_key():\n \"\"\"Error: Key not recognized.\"\"\" \n text1 = font1.render(\"Wrong !!! Not a valid Key\", True, (0, 0, 0))\n screen.blit(text1, (20, 570)) \n\n\n# Check if the value entered in board is valid \ndef valid(m: list, i: int, j: int, cell: int) -> bool:\n \"\"\"Determine if a value is valid for a cell.\n Args:\n m (list): The board\n i (int): Column number\n j (int): Row number\n cell: Value to verify\n Returns:\n bool: True if valid, else False\n \"\"\"\n for it in range(9): \n if m[i][it] == cell:\n return False\n if m[it][j] == cell:\n return False\n\n it = i//3\n jt = j//3\n for i in range(it * 3, it * 3 + 3): \n for j in range(jt * 3, jt * 3 + 3):\n if m[i][j] == cell:\n return False\n\n return True\n\n\n# This is the core verification method. This method takes a solved board and \n# determines if the solution is valid.\ndef verify_board(board: list) -> bool:\n \"\"\"Verifies the board in O(n^2) time complexity.\n Args:\n board (list): Current puzzle\n Returns:\n bool: True if valid, else false\n \"\"\"\n \n # Row/Column verification\n for i in range(9):\n row = [cell for cell in [board[col][i] for col in range(9)]]\n col = board[i]\n # Unique values must be there!\n for val in range(1, 10):\n if row.count(val) != 1:\n return False\n if col.count(val) != 1:\n return False\n\n # Box verification\n for i in range(3):\n for j in range(3):\n box = list([board[col][row] for col in range(j * 3, j * 3 + 3)\n for row in range(i * 3, i * 3 + 3)])\n\n for val in range(1, 10):\n if box.count(val) != 1:\n return False\n\n return True\n\n\n\n# This method is a direct copy of lines 218-242 in\n# https://github.com/wyfok/Solve_Sudoku_with_Crook_algorithm/blob/master/function.py\ndef do_box(sol: dict):\n \"\"\"Checks a box for the possible values.\n Args:\n sol (dict): The solutions scratchpad\n \"\"\"\n # For the range of boxes\n for i in range(3):\n for j in range(3):\n # Get a set of the possible values for each cell relative to \n # each box they are in\n possible = set([k for b in [value for key, value in sol.items() if\n key[1] in range(i * 3, i * 3 + 3) and\n key[0] in range(j * 3, j * 3 + 3) and\n len(value) > 0]\n for k in b])\n\n # For each of those cells\n for cell in possible:\n # Get a list of possible values\n avail = [key for key, value in sol.items() if x in value if\n key[1] in range(i * 3, i * 3 + 3) and\n key[0] in range(j * 3, j * 3 + 3)]\n\n # Set those values when they are actually available \n # relative to the box\n if len(set([cell[0] for cell in avail])) == 1:\n for key in [key for key, value in sol.items()\n if key[0] == avail[0][0] and key not in avail]:\n sol[key] = [possi for possi in \n sol[key] if possi != cell]\n if len(set([cell[1] for cell in avail])) == 1:\n for key in [key for key, value in sol.items()\n if key[1] == avail[0][1] and key not in avail]:\n sol[key] = [possi for possi in \n sol[key] if possi != cell]\n\n\n# Remove all the impossible values from the solution\ndef remove_impossibles(sol: dict, cur_possibles: dict, board: list):\n \"\"\"Core part of Crook's Algorithm.\n Args:\n sol (dict): The solution scratchpad\n cur_possibles (dict): Values to attempt to fill\n board (list): Current board\n \"\"\"\n # Get the range of possible values\n try:\n min_pos = min((len(v)) for _, v in cur_possibles.items())\n max_pos = max((len(v)) for _, v in cur_possibles.items())\n except ValueError:\n return\n\n # For each value in that range\n for i in reversed(range(min_pos, max_pos + 1)):\n # For each cell that matches that range\n for k, v in {k: v for k, v in cur_possibles.items() if \n len(v) == i}.items():\n\n subset_size = 0\n matched = set()\n\n # Create the necessary subsets\n for k_1, v_1 in cur_possibles.items():\n if len(v) < len(v_1):\n continue\n else:\n if set(v_1).issubset(set(v)):\n matched.add(k_1)\n subset_size += 1\n\n # When the subset is the same size of the possible values\n if subset_size == len(v):\n for k_2, v_2 in {k: v for k, v in cur_possibles.items()\n if k not in matched}.items():\n cur_possibles[k_2] = [t for t in v_2 if t not in v]\n do_check_render(sol, board)\n\n\n# Goes through a column\ndef col_fill(sol: dict, board: list):\n \"\"\"Perform Crook's on the columns.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(9):\n possibles = {k: v for k, v in sol.items() if k[0] == i and len(v) > 0}\n remove_impossibles(sol, possibles, board)\n\n\n# Goes through a row\ndef row_fill(sol: dict, board: list):\n \"\"\"Perform Crook's on the rows.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(9):\n possibles = {k: v for k, v in sol.items() if k[1] == i and len(v) > 0}\n remove_impossibles(sol, possibles, board)\n\n\n# Goes through a box\ndef box_fill(sol: dict, board: list):\n \"\"\"Perform Crook's on the boxes.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(3):\n possible = {k: v for k, v in sol.items() if k[0] in\n [g for g in range(i * 3, i * 3 + 3)] and k[1] in\n [z for z in range(i * 3, i * 3 + 3)] and len(v) > 0}\n remove_impossibles(sol, possible, board)\n\n\n# Crook's algorithm\ndef crooks(sol: dict, board: list):\n \"\"\"Top level Crook's.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n while True:\n old = copy.deepcopy(board)\n row_fill(sol, board)\n col_fill(sol, board)\n box_fill(sol, board)\n if old == board:\n break\n\n\n# Examine a column and fill uniques\ndef column_examine(sol: dict, board: list):\n \"\"\"Perform basic check & fill on the column.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(9):\n # Get the column\n existent = board[i]\n\n # Update the possibilities\n for j in range(9):\n sol[j, i] = [k for k in sol[j, i] if k not in existent]\n\n pos_cell = [k for q in [value for key, value in sol.items() if \n key[1] == i and len(value) > 0] for k in q]\n uniques = [k for k in pos_cell if pos_cell.count(k) == 1]\n if len(uniques) > 0:\n for k in uniques:\n for key, value in {key: value for key, value in sol.items() if\n key[1] == i and len(value) > 0}.items():\n if k in value:\n sol[key].clear()\n board[key[1]][key[0]] = k\n global x, y\n x = key[1]\n y = key[0]\n\n # white color background\\\n screen.fill((255, 255, 255))\n draw()\n draw_box()\n pygame.display.update()\n pygame.time.delay(20)\n\n\n# Examine a row and fill uniques\ndef row_examine(sol: dict, board: list):\n \"\"\"Perform basic check & fill on the row.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(9):\n existent = [cell for cell in [board[col][i] for col in range(9)]]\n\n for j in range(9):\n sol[i, j] = [k for k in sol[i, j] if k not in existent]\n\n pos_cell = [k for q in [value for key, value in sol.items() if \n key[0] == i and len(value) > 0] for k in q]\n uniques = [k for k in pos_cell if pos_cell.count(k) == 1]\n if len(uniques) > 0:\n for k in uniques:\n for key, value in {key: value for key, value in sol.items() if\n key[0] == i and len(value) > 0}.items():\n if k in value:\n sol[key].clear()\n board[key[1]][key[0]] = k\n global x, y\n x = key[1]\n y = key[0]\n\n # white color background\\\n screen.fill((255, 255, 255))\n draw()\n draw_box()\n pygame.display.update()\n pygame.time.delay(20)\n\n\n# Examine a box and fill uniques\ndef box_examine(sol: dict, board: list):\n \"\"\"Perform basic check & fill on the box.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for i in range(3):\n for j in range(3):\n existent = set([board[col][row] for col in range(j * 3, j * 3 + 3)\n for row in range(i * 3, i * 3 + 3)])\n\n for q in range(j * 3, j * 3 + 3):\n for r in range(i * 3, i * 3 + 3):\n sol[r, q] = [k for k in sol[r, q] if k not in existent]\n\n pos_cell = [k for q in [value for key, value in sol.items() if\n key[1] in range(j * 3, j * 3 + 3) and\n key[0] in range(i * 3, i * 3 + 3) and\n len(value) > 0] for k in q]\n uniques = [k for k in pos_cell if pos_cell.count(k) == 1]\n if len(uniques) > 0:\n for k in uniques:\n box = {key: value for key, value in sol.items() if\n key[1] in range(j * 3, j * 3 + 3) and\n key[0] in range(i * 3, i * 3 + 3) and\n len(value) > 0}\n for key, value in box.items():\n if k in value:\n sol[key].clear()\n board[key[1]][key[0]] = k\n global x, y\n x = key[1]\n y = key[0]\n\n # white color background\\\n screen.fill((255, 255, 255))\n draw()\n draw_box()\n pygame.display.update()\n pygame.time.delay(20)\n\n\n# Examine any uniques and fill them\ndef unique_examine(sol: dict, board: list):\n \"\"\"Perform basic check & fill for unique values.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n for k, v in sol.items():\n if len(v) == 1:\n value = v[0]\n v.clear()\n board[k[1]][k[0]] = value\n global x, y\n x = k[1]\n y = k[0]\n\n # white color background\\\n screen.fill((255, 255, 255))\n draw()\n draw_box()\n pygame.display.update()\n pygame.time.delay(20)\n\n\ndef do_check_render(sol: dict, board: list):\n \"\"\"Top level basic check & fill.\n Args:\n sol (dict): The solution scratchpad\n board (list): Current board\n \"\"\"\n while True:\n old = copy.deepcopy(board)\n column_examine(sol, board)\n row_examine(sol, board)\n box_examine(sol, board)\n unique_examine(sol, board)\n if old == board:\n break\n\n\n# Solves the sudoku board using J. F. Crook's Pencil-and-Paper Algorithm\n# http://www.ams.org/notices/200904/tx090400460p.pdf\ndef solve(board: list) -> bool:\n \"\"\"Solve a puzzle with Crook's Algorithm.\n Args:\n board (list): Current Board\n Returns:\n bool: True if solved, else False\n \"\"\"\n sol = {}\n\n pygame.event.pump()\n\n # Make the solution scratchpad\n for i in range(9):\n for j in range(9):\n sol[i, j] = list(range(1, 10))\n if board[i][j] != 0:\n sol[i, j] = []\n\n # Run the algorithm until we can't (no change)\n while True:\n old = copy.deepcopy(board)\n # Basic check and render\n do_check_render(sol, board)\n # Perform Crook's\n crooks(sol, board)\n # Deal with boxes\n do_box(sol)\n if old == board:\n break\n\n del old\n\n # Check for completeness\n # If the puzzle was not solved, return false\n return verify_board(board)\n\n\n# Display instruction for the game \ndef instruction():\n \"\"\"Draw instructions.\"\"\" \n text1 = font2.render(\n \"PRESS D TO RESET / R TO EMPTY / E TO VERIFY\", \n True, (0, 0, 0))\n text2 = font2.render(\"ENTER VALUES AND PRESS ENTER TO SOLVE\", \n True, (0, 0, 0))\n screen.blit(text1, (20, 520)) \n screen.blit(text2, (20, 540)) \n\n\n# Display options when solved \ndef result():\n \"\"\"Draw ending message.\"\"\" \n text1 = font1.render(\"FINISHED PRESS R or D\", True, (0, 0, 0))\n screen.blit(text1, (20, 570))\n\n\n# FLAGS\n# Run\nrun = True\n# Render event\nrender_event = False\n# Solve Event\nsolve_puz = False\n# Result\nis_solved = False\n# Error\nerror = False\n# Verification\nverific = False\n\n# GAME ACTIVE LOOP\nwhile run: \n \n # White color background \n screen.fill((255, 255, 255))\n\n # Load the puzzle file\n load_puzzle()\n\n # Loop through the events stored in event.get() \n for event in pygame.event.get(): \n # Quit the game window \n if event.type == pygame.QUIT: \n run = False \n # Get the mouse position to insert number\n if event.type == pygame.MOUSEBUTTONDOWN: \n render_event = True\n pos = pygame.mouse.get_pos() \n get_cord(pos) \n # Get the number to be inserted if key pressed \n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_LEFT: \n x -= 1\n render_event = True\n if event.key == pygame.K_RIGHT: \n x += 1\n render_event = True\n if event.key == pygame.K_UP: \n y -= 1\n render_event = True\n if event.key == pygame.K_DOWN: \n y += 1\n render_event = True \n if event.key == pygame.K_1: \n val = 1\n if event.key == pygame.K_2: \n val = 2 \n if event.key == pygame.K_3: \n val = 3\n if event.key == pygame.K_4: \n val = 4\n if event.key == pygame.K_5: \n val = 5\n if event.key == pygame.K_6: \n val = 6 \n if event.key == pygame.K_7: \n val = 7\n if event.key == pygame.K_8: \n val = 8\n if event.key == pygame.K_9: \n val = 9 \n if event.key == pygame.K_RETURN: \n solve_puz = True \n # If R pressed, clear the sudoku board \n if event.key == pygame.K_r: \n is_solved = False\n error = False\n solve_puz = False\n grid = [\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\n ] \n # If D is pressed, reset the board \n if event.key == pygame.K_d: \n is_solved = False\n error = False\n solve_puz = False\n load_puzzle()\n # If E is pressed, verify the user input\n if event.key == pygame.K_e:\n verific = True \n\n if solve_puz: \n if not solve(grid):\n error = True\n else: \n is_solved = True\n solve_puz = False \n if val != 0: \n draw_val(val) \n # print(x) \n # print(y) \n if valid(grid, int(x), int(y), val):\n grid[int(x)][int(y)] = val\n render_event = False\n else: \n grid[int(x)][int(y)] = 0\n error_unk_key() \n val = 0 \n\n if verific:\n is_solved = verify_board(grid)\n\n if error: \n error_puzzle_incomplete() \n\n if is_solved: \n result() \n\n draw()\n\n if render_event: \n draw_box()\n\n instruction() \n \n # Update window \n pygame.display.update() \n \n# Quit pygame window \npygame.quit()\n" } ]
2
alexandremendoncaalvaro/draw-size
https://github.com/alexandremendoncaalvaro/draw-size
ccf2cc8f123ed4be19560a7ddca4a9023536d80a
2cfa820589ab34503e6967979e428112a409b16c
d4f4fade161dfacd84226ba9f6d035bf30b022e3
refs/heads/master
2022-08-26T20:38:54.452129
2020-03-05T12:35:02
2020-03-05T12:35:02
194,307,971
20
4
null
2019-06-28T17:04:43
2019-06-28T17:45:34
2019-06-28T17:51:10
Python
[ { "alpha_fraction": 0.5463508367538452, "alphanum_fraction": 0.5699103474617004, "avg_line_length": 34.022422790527344, "blob_id": "bfbc2736af0951672f1a586511328a48b54fb627", "content_id": "2f4aa42cd52a64b05bb2fad66535272711839c2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7810, "license_type": "permissive", "max_line_length": 100, "num_lines": 223, "path": "/draw_size.py", "repo_name": "alexandremendoncaalvaro/draw-size", "src_encoding": "UTF-8", "text": "from scipy.spatial import distance as dist\nfrom imutils import perspective\nfrom imutils import contours\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\n\nclass AppControl():\n def __init__(self):\n self.MINIMUN_SIZE_TOLERANCE = 100.0\n self.argument_parser = argparse.ArgumentParser()\n self.stop_video = False\n \n\n def get_arguments(self):\n self.argument_parser.add_argument('-c', '--camera', type=int, default=1,\n help='webcam source id')\n self.argument_parser.add_argument('-w', '--width', type=float, default=2.0,\n help='width of the left-most object in the image (in cm)')\n self.argument_parser.add_argument('-f', '--float', type=int, default=1,\n help='floating point precision')\n arguments = vars(self.argument_parser.parse_args())\n return arguments\n\n\nclass Color():\n RED = (255, 0, 0)\n GREEN = (0, 255, 0)\n BLUE = (0, 0, 255)\n YELLOW = (255, 255, 0)\n MAGENTA = (255, 0, 255)\n CYAN = (0, 255, 255)\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n\n\nclass Geometry():\n @staticmethod\n def get_midpoint(point_a, point_b):\n return ((point_a[0] + point_b[0]) * 0.5, (point_a[1] + point_b[1]) * 0.5)\n\n\nclass Video(object):\n def __init__(self, camera_id):\n self._video_capture = cv2.VideoCapture(camera_id)\n # self._video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, )\n # self._video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, )\n self._window_name = 'Video'\n cv2.namedWindow(self._window_name, cv2.WINDOW_AUTOSIZE)\n cv2.moveWindow(self._window_name, 0, 0)\n\n def get_frame(self):\n ret, frame = self._video_capture.read()\n return frame\n\n def update_window(self, frame):\n cv2.imshow(self._window_name, frame)\n\n def stop_when_key_press(self, key):\n stop = False\n if cv2.waitKey(1) & 0xFF == ord(key):\n stop = True\n return stop\n\n def finish(self):\n self._video_capture.release()\n cv2.destroyAllWindows()\n\n\nclass ObjectDetector(object):\n def __init__(self, *args, **kwargs):\n return super().__init__(*args, **kwargs)\n\n def get_edges(self, frame):\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray_frame = cv2.GaussianBlur(gray_frame, (7, 7), 0)\n edged_frame = cv2.Canny(gray_frame, 50, 100)\n edged_frame = cv2.dilate(edged_frame, None, iterations=1)\n edged_frame = cv2.erode(edged_frame, None, iterations=1)\n return edged_frame\n\n def get_contours(self, edged_frame):\n shapes_contours = None\n all_contours = cv2.findContours(\n edged_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(all_contours) == 2:\n grabed_contours = imutils.grab_contours(all_contours)\n if len(grabed_contours) > 0:\n (sorted_contours, _) = contours.sort_contours(grabed_contours)\n shapes_contours = sorted_contours\n return shapes_contours\n\n def get_shapes_contours(self, frame):\n edged_frame = self.get_edges(frame)\n shapes_contours = self.get_contours(edged_frame)\n return shapes_contours\n\n def detect(self, c):\n shape = \"desconhecido\"\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.04 * peri, True)\n\n if len(approx) <= 2:\n shape = \"linha\"\n\n elif len(approx) == 3:\n shape = \"triangulo\"\n\n elif len(approx) == 4:\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n shape = \"quadrado\" if ar >= 0.95 and ar <= 1.05 else \"retangulo\"\n\n elif len(approx) == 5:\n shape = \"pentagono\"\n\n else:\n shape = \"circulo\"\n\n return shape\n\n\nclass Box(object):\n def __init__(self, shape_contour):\n min_area_rect = cv2.minAreaRect(shape_contour)\n points = cv2.cv.BoxPoints(min_area_rect) if imutils.is_cv2(\n ) else cv2.boxPoints(min_area_rect)\n points_int = np.array(points, dtype=\"int\")\n self.points = perspective.order_points(points_int)\n\n\nclass ResultFrame(object):\n def paint(self, frame, box_points, reference_width, float_precision, shape_name):\n cv2.drawContours(\n frame, [box_points.astype(\"int\")], -1, Color.GREEN, 2)\n\n for (x, y) in box_points:\n cv2.circle(frame, (int(x), int(y)), 5, Color.BLUE, -1)\n\n (tl, tr, br, bl) = box_points\n (tltrX, tltrY) = Geometry.get_midpoint(tl, tr)\n (blbrX, blbrY) = Geometry.get_midpoint(bl, br)\n (tlblX, tlblY) = Geometry.get_midpoint(tl, bl)\n (trbrX, trbrY) = Geometry.get_midpoint(tr, br)\n\n # draw lines between the midpoints\n cv2.line(frame, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),\n Color.MAGENTA, 2)\n cv2.line(frame, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),\n Color.MAGENTA, 2)\n\n # draw the midpoints on the image\n cv2.circle(frame, (int(tltrX), int(tltrY)), 5, Color.RED, -1)\n cv2.circle(frame, (int(blbrX), int(blbrY)), 5, Color.RED, -1)\n cv2.circle(frame, (int(tlblX), int(tlblY)), 5, Color.RED, -1)\n cv2.circle(frame, (int(trbrX), int(trbrY)), 5, Color.RED, -1)\n\n # compute the Euclidean distance between the midpoints\n dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))\n dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))\n\n pixelsPerMetric = dB / reference_width\n\n # compute the size of the object\n dimA = dA / pixelsPerMetric\n dimB = dB / pixelsPerMetric\n\n # draw the object sizes on the image\n if float_precision <= 0:\n text_dimA = f'{dimA:.0f}cm'\n text_dimB = f'{dimB:.0f}cm'\n elif float_precision == 1:\n text_dimB = f'{dimB:.1f}cm'\n text_dimA = f'{dimA:.1f}cm'\n elif float_precision == 2:\n text_dimA = f'{dimA:.2f}cm'\n text_dimB = f'{dimB:.2f}cm'\n else:\n text_dimA = f'{dimA:.3f}cm'\n text_dimB = f'{dimB:.3f}cm'\n\n cv2.putText(frame, text_dimA,\n (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,\n 0.65, Color.WHITE, 2)\n cv2.putText(frame, text_dimB,\n (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,\n 0.65, Color.WHITE, 2)\n\n cv2.putText(frame, shape_name,\n (int(tr[0] + 10), int(tr[1]) - 20), cv2.FONT_HERSHEY_SIMPLEX,\n 0.65, Color.RED, 2)\n return frame\n\n\ndef main():\n app_control = AppControl()\n arguments = app_control.get_arguments()\n camera_id = arguments['camera']\n reference_width = arguments['width']\n float_precision = arguments['float']\n video = Video(camera_id)\n object_detector = ObjectDetector()\n result_frame = ResultFrame()\n while not app_control.stop_video:\n frame = video.get_frame()\n shapes_contours = object_detector.get_shapes_contours(frame)\n painted_frame = frame.copy()\n if shapes_contours != None:\n for shape_contour in shapes_contours:\n if cv2.contourArea(shape_contour) <= app_control.MINIMUN_SIZE_TOLERANCE:\n continue\n shape_name = object_detector.detect(shape_contour)\n box = Box(shape_contour)\n painted_frame = result_frame.paint(\n painted_frame, box.points, reference_width, float_precision, shape_name)\n video.update_window(painted_frame)\n app_control.stop_video = video.stop_when_key_press('q')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7115328311920166, "alphanum_fraction": 0.7264233827590942, "avg_line_length": 21.240259170532227, "blob_id": "2ad3c0d277de894b17aca49a14d1e26391def8ab", "content_id": "565f12c366b8fc15f78107a799f3a3d9d9975868", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3425, "license_type": "permissive", "max_line_length": 273, "num_lines": 154, "path": "/README.md", "repo_name": "alexandremendoncaalvaro/draw-size", "src_encoding": "UTF-8", "text": "# Draw Size\n\nBasic example of real time size estimation from camera with with OpenCV\n\nThe first-left object width is the reference for measurement\n\n * MIT License\n * Required Python 3.6+\n\n![](draw_paper_example.gif)\n\n# References\n\nBase examples from pyimagesearch:\n\n* https://www.pyimagesearch.com/2016/03/28/measuring-size-of-objects-in-an-image-with-opencv/\n* https://www.pyimagesearch.com/2016/02/08/opencv-shape-detection/\n\n# How to use\nafter install...\n\n## Example\n```bash\npython3 draw_size.py --camera 0 --width 2.0 --float 2\n```\narguments:\n* camera: source id (default webcam is 0) - optional\n* width: width in centimeters of the first-left object (for reference) - required\n* float: floating point precision\n\npress **q** to exit\n\n## For study and sharing purposes I added a basic shape detection algorithm\n\n![](shape_detector_example.png)\n\n## Example\n```bash\npython3 shape_detector.py --image images\\example_01.png\n```\narguments:\n* image: the image path\n\n# Install\n\n*if **pip3** command not working use **pip** instead\n\n**if something not work, try to reboot the terminal. :)\n\n## MacOS\n```bash\nxcode-select --install\n```\n```bash\n/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\n```\n```bash\nbrew install git cmake python3\n```\n```bash\ngit clone https://github.com/alexandremendoncaalvaro/draw-size.git\ncd draw-size\n```\n```bash\npip3 install virtualenv\n```\n```bash\nvirtualenv cv\nsource cv/bin/activate\n```\n```bash\npip3 install -r requirements.txt\n```\n## Ubuntu\n```bash\nsudo apt update && sudo apt upgrade\n```\n```bash\nsudo apt-get install git build-essential cmake python3 python3-pip python3-dev python3-setuptools\n```\n```bash\ngit clone https://github.com/alexandremendoncaalvaro/draw-size.git &&\ncd draw-size\n```\n```bash\npip3 install virtualenv\n```\n```bash\nvirtualenv cv && source cv/bin/activate\n```\n```bash\npip3 install -r requirements.txt\n```\n## Windows\n```cmd\n@\"%SystemRoot%\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command \"iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))\" && SET \"PATH=%PATH%;%ALLUSERSPROFILE%\\chocolatey\\bin\"\n```\n* Install Visual Studio **with Visual C++ Build Tools 2015**\n\n https://visualstudio.microsoft.com\n\n```cmd\nchoco install git cmake python3 -Y\n```\n```cmd\ngit clone https://github.com/alexandremendoncaalvaro/draw-size.git &&\ncd draw-size\n```\n```cmd\npip3 install virtualenv\n```\n```cmd\nvirtualenv cv &&\ncv\\Scripts\\activate\n```\n```cmd\npip3 install -r requirements.txt\n```\n\n## Virtual Enviroments\n*It's optional, but I strongly recomend you to know about (and use) [Virtual Enviroments](https://www.geeksforgeeks.org/python-virtual-environment/)\n\nThe **virtualenv** lib is a tool to create isolated Python environments. virtualenv creates a folder which contains all the necessary executables to use the packages that a Python project would need.\n\n### pip command\n```bash\npip3 install virtualenv\n```\nYou can create a virtualenv using the following command (*choose a name, I used cv):\n```bash\nvirtualenv cv\n```\nActivate it:\n### MacOS & Ubuntu\n```bash\nsource cv/bin/activate\n```\n### Windows\n```cmd\ncv\\Scripts\\activate\n```\n\nNow **ALL** pip libraries will be installed isolated inside the environment. It'll look like this:\n\n**(cv)$ pip3 install ...**\n\nIf you want to deactivate:\n\n**(cv)$ deactivate**\n\nCommand:\n```bash\ndeactivate\n```\n" } ]
2
peulsilva/fourth_sail
https://github.com/peulsilva/fourth_sail
b0729d90524e077bfec3eecf1a8f992aadf154b6
b61f2ef6a9ca44f8eba48ddfbcb0bb750eaa638b
f9135e968c0d48c38127d6608fbd9a0d3979c7c0
refs/heads/main
2023-08-28T08:57:53.528801
2021-10-28T16:35:16
2021-10-28T16:35:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5963636636734009, "alphanum_fraction": 0.5963636636734009, "avg_line_length": 24.090909957885742, "blob_id": "f950218625c42920fcaeec472d73f47bfe8fceaa", "content_id": "30388d16ea4fbc8bf31bdaf1f112834fc9355ec5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "permissive", "max_line_length": 66, "num_lines": 11, "path": "/utils.py", "repo_name": "peulsilva/fourth_sail", "src_encoding": "UTF-8", "text": "def exists(char, array):\n for element in array:\n if element.char == char:\n element.addCounter()\n return True\n return False\n\n\ndef printArray(array):\n for element in array:\n print(\"Letter: \"+ element.char+ \" Count: \", element.count)" }, { "alpha_fraction": 0.5065789222717285, "alphanum_fraction": 0.5197368264198303, "avg_line_length": 23, "blob_id": "d2027c9b5849684f1c66d1bd748598ab72005850", "content_id": "b640a032c0790cb2b42ed61d00760ae941e114d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "permissive", "max_line_length": 29, "num_lines": 6, "path": "/CharObject.py", "repo_name": "peulsilva/fourth_sail", "src_encoding": "UTF-8", "text": "class CharObject:\n def __init__(self, char):\n self.char = char\n self.count = 1\n def addCounter(self):\n self.count+=1\n " }, { "alpha_fraction": 0.7472527623176575, "alphanum_fraction": 0.7505494356155396, "avg_line_length": 24.30555534362793, "blob_id": "404e78adbb51030a313a44454744c0a136c94415", "content_id": "10efa79ee45134c03870888355f4a1c5a92f3d0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 921, "license_type": "permissive", "max_line_length": 276, "num_lines": 36, "path": "/README.md", "repo_name": "peulsilva/fourth_sail", "src_encoding": "UTF-8", "text": "# Resolução do case\n\nO problema proposto estava escondido nos elementos HTML da página fornecida. O problema era:\n\n\"Given an array which may contain duplicates, print all elements and their frequencies in descending order. Your code solution should be sent to the result printed.\"\n\n\n## Funcionamento\n\nO arquivo textFile.txt contém alguns caracteres utilizados para teste. Fique à vontade para modificá-lo. O separador de caracteres utilizado foi o espaço \" \". Caso queira mudar, altere o valor da variável sep no arquivo main.py. Todos os caracteres devem estar na mesma linha.\n\n\n\n```python\nsep= ' '\n\nfileName= \"textFile.txt\"\n```\n\n\nA saída padrão apresenta os caracteres que mais apareceram, em ordem decrescente.\n\nO seguinte exemplo gera a saída abaixo\n\n```\nb b b b c c a\n```\n\n``` bash\nLetter: b Count: 4\nLetter: c Count: 2\nLetter: a Count: 1\n```\n\n## License\n[MIT](https://choosealicense.com/licenses/mit/)" }, { "alpha_fraction": 0.7092437148094177, "alphanum_fraction": 0.7092437148094177, "avg_line_length": 23.83333396911621, "blob_id": "37df71eabe58795c2e9374c8f163ad1e192e1d0c", "content_id": "b786a26cfeac2024dac2bb891617ddc5a3062570", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "permissive", "max_line_length": 108, "num_lines": 24, "path": "/main.py", "repo_name": "peulsilva/fourth_sail", "src_encoding": "UTF-8", "text": "# Given an array which may contain duplicates, print all elements and their frequencies in descending order.\n# Your code solution should be sent to the result printed.\n\nfrom CharObject import CharObject\nfrom utils import exists, printArray\n\nsep= ' '\n\nfileName= \"textFile.txt\"\n\nfile = open(fileName,\"r\")\n\nline = file.readline()\n\ncharArray=[]\nfor char in line:\n if not char == sep:\n charObj = CharObject(char)\n if not exists(char,charArray):\n charArray.append(charObj)\n\ncharArray = sorted(charArray, key= lambda x: x.count, reverse= True)\n \nprintArray(charArray)" } ]
4
marcus-johnson/work-tools
https://github.com/marcus-johnson/work-tools
9ada70ca7a9fdf35c35f6a118f85739aa76cfbf4
e8f5009597e9592136a38d8fe013df2b318c86c6
5d781f3d41d0bef64a779de264c3b6ccaab2b1b3
refs/heads/master
2016-08-17T06:01:48.176821
2016-05-22T06:26:59
2016-05-22T06:26:59
53,428,208
0
0
null
2016-03-08T16:39:38
2016-03-08T22:56:04
2016-03-10T07:00:51
Python
[ { "alpha_fraction": 0.5598617792129517, "alphanum_fraction": 0.5843473076820374, "avg_line_length": 28.71875, "blob_id": "ba5a7d0ae800b7338bf76c32cf9e70add848a969", "content_id": "faaa14e589ac81da47f696f63b292f5ade6e8d2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6657, "license_type": "no_license", "max_line_length": 105, "num_lines": 224, "path": "/inventory-usages/inventory_usage2.py", "repo_name": "marcus-johnson/work-tools", "src_encoding": "UTF-8", "text": "#!python3\n# pylint: disable-msg=C0103\n'''\nThis Script reads the text from an exported Inventory transaction report.\nParses the records into a SQLDB then returns a report of total transactions,\nusage divided into 1, 2, 3, 6, 9, 12, 15, and 18 month totals. And calculates\nwhat we should have on hand based on our last manual inventory date which can\nalso be found on the report\n'''\nimport datetime as dt\nimport pandas as pd\nimport re\nimport sqlite3\nrdate = None\nsdate = None\nsqty = None\nconn = sqlite3.connect('transactions.sqlite')\ncur = conn.cursor()\ncur.execute('''\nDROP TABLE IF EXISTS Counts''')\ncur.execute('''\nCREATE TABLE Counts (tranType TEXT, date TEXT, count INTEGER, itemNum TEXT,\n description TEXT, reqNum TEXT, PoNum TEXT)''')\n\n\ndef setup():\n '''\n Sets ups up SQLDB, opens transaction file,\n calls clean up functions with arg of trans file\n '''\n '''opens inv file'''\n\n file_handle = \"FD211RPT03.txt\"\n fname = input('Enter file name: ')\n if len(fname) < 1:\n fname = file_handle\n inv_file = open(fname)\n data_clean(inv_file)\n\n\ndef data_clean(report):\n '''\n Reads form, pulls out all transaction, gets run_date, inv_date and inv_qty\n Calls: make_trans_dict(transactions)\n returns run_date, inv_date, inv_qty, record_dict\n '''\n transactions = []\n run_date = None\n inv_date = None\n inv_qty = None\n for line in report:\n line = line.rstrip()\n if re.search(\"^RUN DATE\", line) and run_date is None:\n run_date = dt.datetime.strptime(line[9:12] + line[15:17], \"%m/%y\")\n elif re.search(\"^LAST INVENTORY DATE:\", line):\n idate = dt.datetime.strptime(line[21:27] + line[29:31], \"%m/%d/%y\")\n inv_date = idate.date()\n iqty = line[50:58].strip()\n inv_qty = float(iqty)\n elif re.search('^RC', line):\n transactions.append(line)\n elif re.search('^IS ', line):\n transactions.append(line)\n elif re.search('^AJ', line):\n transactions.append(line)\n elif re.search('^RT', line):\n transactions.append(line)\n record_list = make_trans_list(transactions)\n form_data = (run_date, inv_date, inv_qty, record_list)\n fill_db(form_data)\n return form_data\n\n\ndef make_trans_list(tlist):\n '''\n builds trans_dict with trans_type, trans_date, and Qty (for the month)\n '''\n trans_list = list()\n qbuild = str()\n for record in tlist:\n trans_type = record[0:2]\n try:\n trans_date = dt.datetime.strptime(record[6:14], \"%m/%d/%y\")\n except:\n continue\n #allows for negative qtys\n qbuild = record[95:96] + record[87:95]\n qbuild = qbuild.replace(\" \", \"\")\n qty = float(qbuild.strip())\n item_num = record[15:32].strip()\n desc = record[32:51].strip()\n req_num = record[52:66].strip()\n po_num = record[66:80].strip()\n tran_rec = (trans_type, trans_date, qty, item_num, desc, req_num, po_num)\n trans_list.append(tran_rec)\n return trans_list\n\n\ndef fill_db(form_input):\n '''\n Fills db with contents of the transaction lists generated by data_clean()\n '''\n global sdate\n global sqty\n global rdate\n run_date = form_input[0]\n rdate = run_date\n if form_input[1] is None:\n inv_date = dt.datetime.strptime(\"06/23/2015\", \"%m/%d/%Y\")\n else:\n inv_date = form_input[1]\n sdate = inv_date\n inv_qty = form_input[2]\n sqty = inv_qty\n record_list = form_input[3]\n '''\n pulled data now need to send to SQLDB\n '''\n for record in record_list:\n trans_type = record[0]\n tdate = record[1]\n count = record[2]\n item_num = record[3]\n desc = record[4]\n req_num = record[5]\n po_num = record[6]\n cur.execute('''INSERT INTO Counts (tranType, date, count, itemNum, description, reqNum, PoNum)\n VALUES (?, ?, ?, ?, ?, ?, ?)''', (trans_type, tdate, count, item_num, desc, req_num, po_num))\n conn.commit()\n\n\ndef filterDF(key_input, key='itemNum'):\n '''\n filter a df by a key\n '''\n\n #TODO:make it so the function differentiates between when key_input is passed\n #vs when it needs to be entered by user\n\n #key_input = input(\"Enter:\")\n\n nf = data_frame[data_frame[key] == str(key_input)]\n return nf\n\n\nsetup()\nsqlstr = 'SELECT * FROM Counts ORDER BY date'\ndata_frame = pd.read_sql_query(sqlstr, conn)\n\n\ndef add_date(date, year=0, month=0):\n year, month = divmod(year*12 + month, 12)\n if date.month > (12 - month):\n year = date.year + year + 1\n month = date.month + month - 12\n else:\n year = date.year + year\n month = date.month + month\n return date.replace(year=year, month=month)\n\n\ndef subtract_date(date, year=0, month=0):\n year, month = divmod(year*12 + month, 12)\n if date.month <= month:\n year = date.year - year - 1\n month = date.month - month + 12\n else:\n year = date.year - year\n month = date.month - month\n return date.replace(year=year, month=month)\n\n\ndef getUsages(df=data_frame):\n '''\n take in data_frame, extract issues and returns. displays usages(issue -\n return) for 1,2,3,6,9,12,15, and 18 months.\n '''\n x = subtract_date(rdate, 1, 6)\n y = add_date(x, 0, 1)\n monthlist = (1, 2, 3, 6, 9, 12, 15, 18)\n exportlist = list()\n j = 0\n df_is = df[df['tranType'] == 'IS']\n df_is = df_is[df_is['date'] > str(x)]\n df_rt = df[df['tranType'] == 'RT']\n df_rt = df_rt[df_rt['date'] > str(x)]\n exportlist.append(df['description'].iloc[0])\n exportlist.append(df['itemNum'].iloc[0])\n for i in range(18):\n df_is1 = df_is[(df_is['date'] > str(x)) & (df_is['date'] < str(y))]\n df_rt1 = df_rt[(df_rt['date'] > str(x)) & (df_rt['date'] < str(y))]\n is_sum = df_is1['count'].sum()\n rt_sum = df_rt1['count'].sum()\n if (i + 1) == monthlist[j]:\n exportlist.append(str(i + 1) + \" month : \" + str(is_sum - rt_sum))\n j += 1\n y = add_date(y, 0, 1)\n return exportlist\n\n\ndef getAllItems(df=data_frame):\n nf = df.drop_duplicates(['itemNum'])\n nums = nf['itemNum']\n for num in nums:\n yield str(num)\n\n\ndef getAllUsages(k):\n bf = filterDF(k, key='itemNum')\n r = getUsages(bf)\n return r\n\n\ndef main():\n finalframe = list()\n headers = ['item', 'item number', 'm1', 'm2', 'm3', 'm6', 'm9', 'm12', 'm15', 'm18']\n finalframe.append(headers)\n for i in getAllItems():\n finalframe.append(getAllUsages(i))\n df = pd.DataFrame(finalframe)\n df.to_csv('frames.csv')\n print(df)\n\nmain()\n" }, { "alpha_fraction": 0.7430278658866882, "alphanum_fraction": 0.7450199127197266, "avg_line_length": 26.88888931274414, "blob_id": "65fa66bc28675f686a14ed44117707e34e4aad11", "content_id": "d8be9d43fdd556bb339464049d4a3b054aa8ed44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 98, "num_lines": 18, "path": "/contact-merge/contact-merger.py", "repo_name": "marcus-johnson/work-tools", "src_encoding": "UTF-8", "text": "#!python3\n'''\nThis script reads in the labelfile csv that is created by the ivhreporting app\nand the contacts.xls file that susan maintains, it puts them both into\nthe same pandas dataframe, then it exports them in a format that can be uploaded to the usps site.\n'''\nimport pandas as pd\nimport re\nimport xlrd\n\nfile_handle = \"labelfile.txt\"\nxcel_file = \"contacts.xslx\"\n\nlabel_file = open(file_handle)\nexcel_file = open(xcel_file)\n\n#csv = pd.read_csv(label_file, sep=' ')\nxcl = pd.read_excel(excel_file)\n" } ]
2
ethan2000hao/ethan
https://github.com/ethan2000hao/ethan
0cda6a1a8293a86c85966d8511d93de521a2fb12
3211940e2b2f1cae655c2ed1d660a8be17f3cf2b
9de56a3eb5242e9d198f47703bea7ad1b7bedf3b
refs/heads/master
2021-06-12T01:32:16.067816
2021-05-10T08:18:11
2021-05-10T08:18:11
183,402,299
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.580152690410614, "alphanum_fraction": 0.6106870174407959, "avg_line_length": 23.625, "blob_id": "d9226e95f43435e0df9110011ab22bd906f8be5d", "content_id": "31d0a0a4c74e6243c2a7883c3897602fdc9a8d75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/copyPicture/copyPicture.py", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "import os\nimport shutil\npath = 'C:/Users/86188/Desktop/record'\nnew_path = 'C:/Users/86188/Desktop/ppp'\ni = 0\nfor root, dirs, files in os.walk(path):\n # print(root)\n\n for sFile in files:\n\n i = i + 1\n srcname = os.path.join(root, sFile)\n desname = os.path.join(new_path, sFile)\n shutil.copy(srcname, desname)\n print(\"copying: NO \", i)\nprint(\"finished !\")" }, { "alpha_fraction": 0.5910979509353638, "alphanum_fraction": 0.6094955205917358, "avg_line_length": 33.408164978027344, "blob_id": "a4ed3b33a46b51bcb86866e495c2dee50491439b", "content_id": "c660b23d06abf70452db5409d0ed726de876365f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1879, "license_type": "no_license", "max_line_length": 101, "num_lines": 49, "path": "/pySendEmail/sendPicture.py", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "import smtplib\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n# 第三方 SMTP 服务\nmail_host = \"smtp.qq.com\" # 设置服务器\nmail_user = \"[email protected]\" # 用户名\nmail_pass = \"qfwdbfpzdkxkbhab\" # 口令\nsender = '[email protected]'\nreceivers = ['[email protected]']\n# ***************************************************************\nmsgRoot = MIMEMultipart()\nmsgRoot['From'] = Header('Ethan Hao', 'utf-8')\nmsgRoot['To'] = Header('CHING MOBILE', 'utf-8')\nsubject = 'New York Daily '\nmsgRoot['Subject'] = Header(subject, 'utf-8')\n\nmail_msg = \"\"\"\n<p>New York Daily </p>\n<p><a href=\"http://www.baidu.com\">Just do it !</a></p>\n<p>演示:</p>\n<p><img src=\"cid:image1\"></p>\n\"\"\"\n\nmsgRoot.attach(MIMEText(mail_msg, 'html', 'utf-8'))\n# 指定图片为当前目录\nfp = open('C:/Users/86188/Desktop/PythonTest/t1/w4.jpg', 'rb')\nmsgImage = MIMEImage(fp.read())\nfp.close()\n# 定义图片 ID,在 HTML 文本中引用\nmsgImage.add_header('Content-ID', '<image1>')\nmsgRoot.attach(msgImage)\n# ************************传送文件***************************************\n# 构造附件1,传送当前目录下的 test.txt 文件\natt1 = MIMEText(open('C:/Users/86188/Desktop/PythonTest/t1/foo.txt', 'rb').read(), 'base64', 'utf-8')\natt1[\"Content-Type\"] = 'application/octet-stream'\n# 这里的filename可以任意写,写什么名字,邮件中显示什么名字\natt1[\"Content-Disposition\"] = 'attachment; filename=\"given.txt\"'\nmsgRoot.attach(att1)\n# ***************************************************************\ntry:\n smtpObj = smtplib.SMTP()\n smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号\n smtpObj.login(mail_user, mail_pass)\n smtpObj.sendmail(sender, receivers, msgRoot.as_string())\n print(\"邮件发送成功\")\nexcept smtplib.SMTPException:\n print(\"Error: 无法发送邮件\")" }, { "alpha_fraction": 0.690095841884613, "alphanum_fraction": 0.7100638747215271, "avg_line_length": 33.80555725097656, "blob_id": "b2a922f90b1ef0324538635e0c7767332a2aac4e", "content_id": "c4c2f646ff8df0e01833428b159397e7dcea3218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 101, "num_lines": 36, "path": "/pySendEmail/sendEmail.py", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "import smtplib\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\n# 第三方 SMTP 服务\nmail_host = \"smtp.qq.com\" # 设置服务器\nmail_user = \"[email protected]\" # 用户名\nmail_pass = \"qfwdbfpzdkxkbhab\" # 口令\nsender = '[email protected]'\nreceivers = ['[email protected]'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n\n#创建一个带附件的实例\nmessage = MIMEMultipart()\nmessage['From'] = Header('Like The Wind', 'utf-8')\nmessage['To'] = Header('风', 'utf-8')\nsubject = 'New York Daily '\nmessage['Subject'] = Header(subject, 'utf-8')\n# 邮件正文内容\n# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码\nmail_msg ='New York Daily '\nmessage.attach(MIMEText(mail_msg, 'base64', 'utf-8'))\n# 构造附件1,传送当前目录下的 test.txt 文件\natt1 = MIMEText(open('C:/Users/86188/Desktop/PythonTest/t1/foo.txt', 'rb').read(), 'base64', 'utf-8')\natt1[\"Content-Type\"] = 'application/octet-stream'\n# 这里的filename可以任意写,写什么名字,邮件中显示什么名字\natt1[\"Content-Disposition\"] = 'attachment; filename=\"test.txt\"'\nmessage.attach(att1)\ntry:\n smtpObj = smtplib.SMTP()\n smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号\n smtpObj.login(mail_user, mail_pass)\n smtpObj.sendmail(sender, receivers, message.as_string())\n print(\"邮件发送成功\")\nexcept smtplib.SMTPException:\n print(\"Error: 无法发送邮件\")" }, { "alpha_fraction": 0.5772151947021484, "alphanum_fraction": 0.6151898503303528, "avg_line_length": 25.399999618530273, "blob_id": "3e5a56e234cdf44a8857102deb5cc90535a1b679", "content_id": "e2c4a6c5da7cecd474ca303d83cf3ac86bf4657c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/pycopyFiels/pycopyFiels.py", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n\nsrcPath = r\"C:\\Users\\86188\\Desktop\\test01\\ta\"\ndesPath = r\"C:\\Users\\86188\\Desktop\\test01\\te\"\n\nfor root, dirs, files in os.walk(srcPath):\n\n for sFile in files:\n if len(sFile) > 2:\n srcname = os.path.join(root, sFile)\n\n desname = os.path.join(desPath, sFile)\n shutil.copy(srcname, desname)\n print(\"copying: \" + desname)" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.7948718070983887, "avg_line_length": 39, "blob_id": "4098bb9c48da4369a153beda10bcf5a0c6b58dc1", "content_id": "2e1c9facdf04acd4957cb5c64622b6258be90cd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 39, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/copyPicture/readme.txt", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "the code only can use to copy pictures~" }, { "alpha_fraction": 0.42307692766189575, "alphanum_fraction": 0.46367520093917847, "avg_line_length": 18.54166603088379, "blob_id": "f0cb484085f1aa6365e65c7b7bf8d1c4e4b5702d", "content_id": "82aacafb1a1a9a7ca73446e22acc7915430973b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 46, "num_lines": 24, "path": "/inputAndTry/inputAndTry.py", "repo_name": "ethan2000hao/ethan", "src_encoding": "UTF-8", "text": "import types\nlist = [1,2,3,45,64,45]\ndef so():\n while True:\n try:\n str_num = input('input a number:')\n num=float(str_num)\n print(num)\n break #若输入的正确,则退出,错误执行except下面代码\n except:\n print('您输入的内容不规范,请重新输入:')\n a = num\n if a >8:\n del list[0]\n elif a <= 8:\n print('0000000')\n return list\ndef main():\n print(list)\n print('**************')\n c=so()\n print(c)\n\nmain()" } ]
6
codehutlabs/django_sqs
https://github.com/codehutlabs/django_sqs
b9fe476f27bfbbce9ad08740e8c65108179b2f80
7ff113cec7017fef09a92f6797153fd9207df5fc
42e263fd5401d717656934c4de3c8d6439116631
refs/heads/master
2022-07-09T19:07:24.325045
2019-09-07T12:34:33
2019-09-07T12:34:33
205,505,919
0
0
MIT
2019-08-31T06:31:42
2021-03-15T19:00:09
2023-08-28T17:21:23
Python
[ { "alpha_fraction": 0.6888657808303833, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 31.03333282470703, "blob_id": "4e0bea09d44432a3dd400b27ae3d8db41a15c147", "content_id": "76e5c35ca7efca49246e2c821b947ca63e67d2f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "permissive", "max_line_length": 72, "num_lines": 30, "path": "/djangosqs/apps/website/models.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Topping(models.Model):\n\n title = models.CharField(max_length=255, blank=False, null=False)\n\n def __str__(self):\n return self.title\n\n\nclass Pizza(models.Model):\n\n title = models.CharField(max_length=255, blank=False, null=False)\n image = models.FileField(upload_to=\"uploads\", blank=True, null=True)\n toppings = models.ManyToManyField(Topping, blank=True)\n price = models.DecimalField(max_digits=4, decimal_places=2)\n\n def __str__(self):\n return self.title\n\n\nclass Order(models.Model):\n\n name = models.CharField(max_length=255, blank=False, null=False)\n address = models.CharField(max_length=255, blank=False, null=False)\n phone = models.CharField(max_length=255, blank=False, null=False)\n email = models.CharField(max_length=255, blank=False, null=False)\n pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)\n quantity = models.IntegerField(blank=False, null=False)\n" }, { "alpha_fraction": 0.5905852317810059, "alphanum_fraction": 0.5938931107521057, "avg_line_length": 30.440000534057617, "blob_id": "5e8e83aeb2aef1c5548e562fde249239b9559595", "content_id": "d1c3371c327f2dba31464cdc80973519b2a2209f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3930, "license_type": "permissive", "max_line_length": 86, "num_lines": 125, "path": "/djangosqs/apps/website/sqs.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from djangosqs.apps.website.pdf import Pdf\nfrom djangosqs.apps.website.postmark import Postmark\nfrom djangosqs.settings import DEFAULT_FROM_EMAIL\n\nimport boto3\nimport json\nimport time\nimport typing as t\n\n\nclass Sqs:\n def __init__(\n self,\n region_name: str,\n queue_name: str,\n dl_queue_name: str,\n template_id: str = \"\",\n delay_seconds: int = 0,\n visibility_timeout: int = 20,\n max_receive_count: int = 5,\n wait_seconds: int = 20,\n sleep_seconds: int = 5,\n ) -> None:\n self.region_name = region_name\n self.queue_name = queue_name\n self.dl_queue_name = dl_queue_name\n self.template_id = template_id\n self.delay_seconds = delay_seconds\n self.delay_seconds_str = str(delay_seconds)\n self.visibility_timeout = visibility_timeout\n self.max_receive_count = max_receive_count\n self.wait_seconds = wait_seconds\n self.wait_seconds_str = str(wait_seconds)\n self.sleep_seconds = sleep_seconds\n\n sqs = boto3.resource(\"sqs\", region_name=self.region_name)\n\n dl_queue_attributes = {\"DelaySeconds\": self.delay_seconds_str}\n\n sqs.create_queue(QueueName=self.dl_queue_name, Attributes=dl_queue_attributes)\n\n dl_queue = sqs.get_queue_by_name(QueueName=self.dl_queue_name)\n dl_queue_arn = dl_queue.attributes[\"QueueArn\"]\n\n redrive_policy = {\n \"deadLetterTargetArn\": dl_queue_arn,\n \"maxReceiveCount\": self.max_receive_count,\n }\n queue_attributes = {\n \"DelaySeconds\": self.delay_seconds_str,\n \"ReceiveMessageWaitTimeSeconds\": self.wait_seconds_str,\n \"RedrivePolicy\": json.dumps(redrive_policy),\n }\n\n self.queue = sqs.create_queue(\n QueueName=self.queue_name, Attributes=queue_attributes\n )\n\n self.client = boto3.client(\"sqs\", region_name=self.region_name)\n\n def get_queue(self):\n return self.queue\n\n def get_client(self):\n return self.client\n\n def send_message(\n self, message_body: t.Dict[str, t.Union[str, t.Dict[str, str]]]\n ) -> t.Dict[str, t.Union[str, t.Dict[str, t.Union[int, str, t.Dict[str, str]]]]]:\n\n body = json.dumps(message_body, sort_keys=True)\n\n response = self.client.send_message(\n QueueUrl=self.queue.url, MessageBody=body, DelaySeconds=self.delay_seconds\n )\n\n return response\n\n def process_queue(self) -> None:\n\n response = self.client.receive_message(\n QueueUrl=self.queue.url,\n AttributeNames=[\"SentTimestamp\"],\n MaxNumberOfMessages=1,\n MessageAttributeNames=[\"All\"],\n VisibilityTimeout=self.visibility_timeout,\n WaitTimeSeconds=self.wait_seconds,\n )\n\n if response and \"Messages\" in response:\n response_message = response[\"Messages\"][0]\n message_id = response_message[\"MessageId\"]\n receipt_handle = response_message[\"ReceiptHandle\"]\n\n message = json.loads(response_message[\"Body\"])\n\n success = self.process_message(message)\n\n if success:\n print(\"Message {} processed.\".format(message_id))\n self.client.delete_message(\n QueueUrl=self.queue.url, ReceiptHandle=receipt_handle\n )\n else:\n print(\"There was an error with message {}.\".format(message_id))\n\n time.sleep(self.sleep_seconds)\n\n def process_message(self, message: dict) -> bool:\n\n pdf = Pdf()\n\n message[\"action_url\"] = pdf.receipt(message)\n\n postmark = Postmark(\n subject=\"\",\n body=\"\",\n from_email=DEFAULT_FROM_EMAIL,\n to=[message[\"to\"]],\n template_id=self.template_id,\n data=message,\n )\n num_sent = postmark.send()\n\n return num_sent > 0\n" }, { "alpha_fraction": 0.5693144202232361, "alphanum_fraction": 0.5705224871635437, "avg_line_length": 31.145631790161133, "blob_id": "e511297b13f76f0190904ec4dcb95d97d1fab8db", "content_id": "3cc394d2be7427c746b632a1df4e27224e90f9fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3311, "license_type": "permissive", "max_line_length": 69, "num_lines": 103, "path": "/djangosqs/apps/website/views.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom djangosqs.apps.website.forms import OrderForm\nfrom djangosqs.apps.website.models import Order\nfrom djangosqs.apps.website.models import Pizza\nfrom djangosqs.apps.website.sqs import Sqs\nfrom djangosqs.settings import MICRO_CONFIG\nfrom djangosqs.settings import TEMPLATE_ID\n\nimport datetime\n\n\nclass HomeView(TemplateView):\n\n template_name = \"home.html\"\n\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n context[\"page_title\"] = \"Order a pizza\"\n context[\"pizzas\"] = Pizza.objects.all().order_by(\"id\")\n\n return context\n\n\nclass OrderView(TemplateView):\n\n template_name = \"order.html\"\n\n def get_context_data(self, **kwargs):\n context = super(OrderView, self).get_context_data(**kwargs)\n context[\"page_title\"] = \"Order a pizza\"\n\n return context\n\n def get(self, request, *args, **kwargs):\n context = self.get_context_data()\n pizza = Pizza.objects.get(pk=1)\n if \"pizza\" in kwargs:\n id = int(kwargs[\"pizza\"])\n pizza = Pizza.objects.get(pk=id)\n\n context[\"form\"] = OrderForm(None, initial={\"pizza\": pizza})\n return super(OrderView, self).render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n context = self.get_context_data()\n form = OrderForm(self.request.POST)\n if form.is_valid():\n order = form.save()\n\n details = []\n quantity = order.quantity\n while quantity > 0:\n details.append(\n {\n \"description\": order.pizza.title,\n \"amount\": \"{} EUR\".format(order.pizza.price),\n }\n )\n quantity -= 1\n\n total = order.pizza.price * order.quantity\n\n message_body = {\n \"to\": order.email,\n \"name\": order.name,\n \"product_name\": \"Order a Pizza\",\n \"receipt_id\": \"#{}\".format(str(order.id).zfill(4)),\n \"date\": datetime.date.today().strftime(\"%B %d, %Y\"),\n \"receipt_details\": details,\n \"total\": \"{} EUR\".format(total),\n \"image\": \"{}\".format(order.pizza.image),\n \"action_url\": \"\",\n }\n\n region_name = str(MICRO_CONFIG[\"REGION_NAME\"])\n queue_name = str(MICRO_CONFIG[\"STANDARD_QUEUE\"])\n dl_queue_name = str(MICRO_CONFIG[\"DL_QUEUE\"])\n\n sqs = Sqs(\n region_name=region_name,\n queue_name=queue_name,\n dl_queue_name=dl_queue_name,\n template_id=TEMPLATE_ID,\n )\n sqs.send_message(message_body)\n\n return HttpResponseRedirect(reverse(\"website:orders\"))\n\n return super(OrderView, self).render_to_response(context)\n\n\nclass OrdersView(TemplateView):\n\n template_name = \"orders.html\"\n\n def get_context_data(self, **kwargs):\n context = super(OrdersView, self).get_context_data(**kwargs)\n context[\"page_title\"] = \"Pizza Orders\"\n context[\"orders\"] = Order.objects.all().order_by(\"id\")\n\n return context\n" }, { "alpha_fraction": 0.44586893916130066, "alphanum_fraction": 0.44586893916130066, "avg_line_length": 20.272727966308594, "blob_id": "33507e4588e3f0e3e3177ebeb6d95b4a177dbb96", "content_id": "cd38c05850bde9d2a9c14ba3de06108309d1db3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "permissive", "max_line_length": 41, "num_lines": 33, "path": "/djangosqs/apps/website/postmark.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.core.mail import EmailMessage\n\n\nclass Postmark(EmailMessage):\n def __init__(\n self,\n subject: str = \"\",\n body=\"\",\n from_email=None,\n to=None,\n bcc=None,\n connection=None,\n attachments=None,\n headers=None,\n cc=None,\n reply_to=None,\n template_id=\"\",\n data={},\n ):\n self.template_id = template_id\n self.merge_global_data = data\n super(Postmark, self).__init__(\n subject,\n body,\n from_email,\n to,\n bcc,\n connection,\n attachments,\n headers,\n cc,\n reply_to,\n )\n" }, { "alpha_fraction": 0.7626359462738037, "alphanum_fraction": 0.7639155387878418, "avg_line_length": 27.418182373046875, "blob_id": "56ebfa6cfeb9ee07b9ec83424043bad98142a9f9", "content_id": "60389387a1bb51a77a1e6d4f6baaae8f98334261", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1563, "license_type": "permissive", "max_line_length": 213, "num_lines": 55, "path": "/Makefile", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "# Convenience makefile to build the dev env and run common commands\n# This example is for a virtualenv-based django project\n# Based on https://github.com/niteoweb/Makefile\n\n.PHONY: all\nall: .installed\n\n.PHONY: install\ninstall:\n\t@rm -f .installed # force re-install\n\t@make .installed\n\n.installed: requirements.txt djangosqs_backup djangosqs_media djangosqs_static\n\t@echo \"requirements.txt is newer than .installed, (re)installing\"\n\t@virtualenv -p python3.7 venv\n\t@venv/bin/pip install -r requirements-dev.txt\n\t@venv/bin/pre-commit install -f --hook-type pre-commit\n\t@venv/bin/pre-commit install -f --hook-type pre-push\n\t@echo \"This file is used by 'make' for keeping track of last install time. If requirements.txt is newer then this file (.installed) then all 'make *' commands that depend on '.installed' know they need to run.\" \\\n\t\t> .installed\n\t@cp djangosqs/local_settings.txt djangosqs/local_settings.py\n\ndjangosqs_backup:\n\t@mkdir djangosqs_backup\n\ndjangosqs_media:\n\t@mkdir djangosqs_media\n\t@mkdir djangosqs_media/uploads\n\t@mkdir djangosqs_media/receipt\n\ndjangosqs_static:\n\t@mkdir djangosqs_static\n\n.PHONY: collectstatic\ncollectstatic:\n\t@venv/bin/python manage.py collectstatic --noinput\n\n.PHONY: migrate\nmigrate:\n\t@venv/bin/python manage.py migrate\n\n.PHONY: load\nload:\n\t@venv/bin/python manage.py loaddata orderapizza.json\n\t@echo Copying images to media folder\n\t@rsync -rupE djangosqs/static/images/ djangosqs_media/uploads/\n\n.PHONY: run\nrun:\n\t@venv/bin/python manage.py runserver\n\n.PHONY: clean\nclean:\n\t@rm -rf venv/ htmlcov/\n\t@rm -f .installed .coverage\n" }, { "alpha_fraction": 0.7044444680213928, "alphanum_fraction": 0.7044444680213928, "avg_line_length": 33.61538314819336, "blob_id": "d87bacf45032a081e7c8d70442fe8ee318a16fec", "content_id": "1fcb073c1c33275a2bac4bde2bfb6a9a5a39f15a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "permissive", "max_line_length": 66, "num_lines": 13, "path": "/djangosqs/apps/website/urls.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom djangosqs.apps.website.views import HomeView\nfrom djangosqs.apps.website.views import OrdersView\nfrom djangosqs.apps.website.views import OrderView\n\napp_name = \"website\"\n\nurlpatterns = [\n path(\"\", HomeView.as_view(), name=\"home\"),\n path(\"order/\", OrderView.as_view(), name=\"order\"),\n path(\"order/<int:pizza>/\", OrderView.as_view(), name=\"order\"),\n path(\"orders/\", OrdersView.as_view(), name=\"orders\"),\n]\n" }, { "alpha_fraction": 0.7487002015113831, "alphanum_fraction": 0.7487002015113831, "avg_line_length": 22.079999923706055, "blob_id": "87bbfa393c6ac0cd6117b877792252fbaa7c204b", "content_id": "abf7e20cc7cee96f1a9bb851a46e453bbb22321e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "permissive", "max_line_length": 77, "num_lines": 25, "path": "/djangosqs/apps/website/admin.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom djangosqs.apps.website.models import Order\nfrom djangosqs.apps.website.models import Pizza\nfrom djangosqs.apps.website.models import Topping\n\n\nclass ToppingAdmin(admin.ModelAdmin):\n list_display = (\"title\",)\n\n\nadmin.site.register(Topping, ToppingAdmin)\n\n\nclass PizzaAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"image\", \"price\")\n\n\nadmin.site.register(Pizza, PizzaAdmin)\n\n\nclass OrderAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"address\", \"phone\", \"email\", \"pizza\", \"quantity\")\n\n\nadmin.site.register(Order, OrderAdmin)\n" }, { "alpha_fraction": 0.5751978754997253, "alphanum_fraction": 0.5798153281211853, "avg_line_length": 28.153846740722656, "blob_id": "870e6fb2e4e92964ed9c56ba31e26c655244c073", "content_id": "2a236d1410a5668f7d0690dba0e38ee3c26c3e08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/djangosqs/apps/website/forms.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django import forms\nfrom djangosqs.apps.website.models import Order\nfrom djangosqs.apps.website.models import Pizza\n\n\nclass RequestForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\", None)\n super(RequestForm, self).__init__(*args, **kwargs)\n\n\nclass OrderForm(RequestForm):\n\n name = forms.CharField(\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Your Name\"}\n )\n )\n address = forms.CharField(\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Your Address\"}\n )\n )\n phone = forms.CharField(\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Your Phone\"}\n )\n )\n email = forms.CharField(\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Your Email\"}\n )\n )\n\n CHOICES = ((1, \"1 pizza\"), (2, \"2 pizzas\"), (3, \"3 pizzas\"))\n quantity = forms.ChoiceField(\n choices=CHOICES,\n initial=\"1\",\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n )\n\n pizza = forms.ModelChoiceField(\n queryset=Pizza.objects.all(),\n widget=forms.Select(attrs={\"class\": \"form-control\"}),\n )\n\n class Meta:\n model = Order\n fields = [\"name\", \"address\", \"phone\", \"email\", \"quantity\", \"pizza\"]\n\n def __init__(self, *args, **kwargs):\n super(OrderForm, self).__init__(*args, **kwargs)\n" }, { "alpha_fraction": 0.4871794879436493, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 12, "blob_id": "6fe17479c50ae2b8ddd4302eef7aacbb102ee274", "content_id": "556ae4f320a23cf3338b934cc7b8fefbdd18b58d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "permissive", "max_line_length": 45, "num_lines": 6, "path": "/djangosqs/apps/website/tests/test_py.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "import pytest\n\n\ndef test_py():\n\n assert 1 + 1 == 2, \"1 + 1 should equal 2\"\n" }, { "alpha_fraction": 0.5687022805213928, "alphanum_fraction": 0.5687022805213928, "avg_line_length": 26.10344886779785, "blob_id": "316f9290a941e6486ac241e9e409d83c152c99a7", "content_id": "0f47d78ee92893f0c6204181508e556bd48a8639", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 786, "license_type": "permissive", "max_line_length": 56, "num_lines": 29, "path": "/djangosqs/apps/website/management/commands/sqsrunner.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand\nfrom djangosqs.apps.website.sqs import Sqs\nfrom djangosqs.settings import MICRO_CONFIG\nfrom djangosqs.settings import TEMPLATE_ID\n\n\nclass Command(BaseCommand):\n\n help = \"SQS Runner\"\n\n def handle(self, *args, **options):\n\n print(\"========================\")\n region_name = str(MICRO_CONFIG[\"REGION_NAME\"])\n queue_name = str(MICRO_CONFIG[\"STANDARD_QUEUE\"])\n dl_queue_name = str(MICRO_CONFIG[\"DL_QUEUE\"])\n\n sqs = Sqs(\n region_name=region_name,\n queue_name=queue_name,\n dl_queue_name=dl_queue_name,\n template_id=TEMPLATE_ID,\n )\n\n while True:\n sqs.process_queue()\n\n print(\"========================\")\n print(\"Done!\")\n" }, { "alpha_fraction": 0.8058252334594727, "alphanum_fraction": 0.8058252334594727, "avg_line_length": 33.33333206176758, "blob_id": "79b1f940d8e8b5ebbb410bc52d34881ecdd2c29c", "content_id": "c283ea419102f7c0ead1bcd3b001f977d062b4d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 103, "license_type": "permissive", "max_line_length": 46, "num_lines": 3, "path": "/pytest.ini", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "[pytest]\nDJANGO_SETTINGS_MODULE=djangosqs.test_settings\naddopts = --nomigrations --ignore=node_modules\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7196765542030334, "avg_line_length": 36.099998474121094, "blob_id": "8d906921fadbe5517372d521316e305cd6ad3983", "content_id": "76d9dfb61679bb2204c23290bad898428c55bf1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1484, "license_type": "permissive", "max_line_length": 271, "num_lines": 40, "path": "/README.md", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "# ![Django + Amazon Simple Queue Service](logo.png)\n\nThis codebase was created to demonstrate a fully fledged fullstack application built with Django including some CRUD operations, authentication, routing, sending messages to AWS SQS, processing AWS SQS queues, generating a PDF and sending email using Postmarkapp service.\n\n# How it works\n\nStarting the Django server will bring up the Order a Pizza site.\n\n# Getting started\n\nYou need to have [virtualenv](https://virtualenv.pypa.io/) and Python 3.7 installed on your machine. Now run:\n\n $ make\n $ make collectstatic\n $ make migrate\n $ make load\n $ make run\n\nOr the traditional way:\n\n $ virtualenv -p python3.7 venv\n $ source venv/bin/activate\n (venv) $ pip install -r requirements.txt\n (venv) $ pre-commit install -f --hook-type pre-commit\n (venv) $ pre-commit install -f --hook-type pre-push\n (venv) $ mkdir djangosqs_backup\n (venv) $ mkdir djangosqs_media\n (venv) $ mkdir djangosqs_static\n (venv) $ mkdir djangosqs_media/receipt\n (venv) $ mkdir djangosqs_media/uploads\n (venv) $ cp djangosqs/local_settings.txt djangosqs/local_settings.py\n (venv) $ rsync -rupE djangosqs/static/images/ djangosqs_media/uploads/\n (venv) $ python manage.py collectstatic --noinput\n (venv) $ python manage.py migrate\n (venv) $ python manage.py loaddata orderapizza.json\n (venv) $ python manage.py runserver\n\nNow point your browser to:\n\n- http://localhost:8000/ -> DjangoSQS frontend app\n" }, { "alpha_fraction": 0.5135135054588318, "alphanum_fraction": 0.7117117047309875, "avg_line_length": 17.5, "blob_id": "40be8db92e83f9a362a531f9976d4e2f071de6a8", "content_id": "f11eef107a4c0b8a77a588581667d1fb9d7dec8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 111, "license_type": "permissive", "max_line_length": 22, "num_lines": 6, "path": "/requirements.txt", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "boto3==1.9.217\nDjango==2.2.4\ndjango-anymail==6.1.0\ndjango-dbbackup==3.2.0\nmysqlclient==1.4.4\nreportlab==3.5.23\n" }, { "alpha_fraction": 0.5628811120986938, "alphanum_fraction": 0.5788872241973877, "avg_line_length": 28.81818199157715, "blob_id": "b6c318ab34d3cc049023b416344e22f12aff8ec1", "content_id": "73e3461ff837cf7d6c17cffd12ae853ce65efa9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2624, "license_type": "permissive", "max_line_length": 107, "num_lines": 88, "path": "/djangosqs/apps/website/pdf.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "from djangosqs import settings\nfrom reportlab.lib.enums import TA_CENTER\nfrom reportlab.lib.enums import TA_JUSTIFY\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.lib.styles import ParagraphStyle\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import Image\nfrom reportlab.platypus import Paragraph\nfrom reportlab.platypus import SimpleDocTemplate\nfrom reportlab.platypus import Spacer\n\nimport hashlib\n\n\nclass Pdf:\n receipt_url = None\n\n def __init__(self):\n font = \"{}/fonts/{}\".format(settings.STATIC_ROOT, \"RobotoSlab-Regular.ttf\")\n pdfmetrics.registerFont(TTFont(\"roboto\", font))\n\n def receipt(self, message):\n\n receipt = []\n\n styles = getSampleStyleSheet()\n\n styles.add(\n ParagraphStyle(\n name=\"pizza-title\",\n fontName=\"roboto\",\n fontSize=16,\n leading=18,\n alignment=TA_CENTER,\n )\n )\n styles.add(\n ParagraphStyle(\n name=\"pizza-center\",\n fontName=\"roboto\",\n fontSize=10,\n leading=12,\n alignment=TA_CENTER,\n )\n )\n\n styles.add(\n ParagraphStyle(\n name=\"pizza-normal\",\n fontName=\"roboto\",\n fontSize=10,\n leading=14,\n alignment=TA_JUSTIFY,\n )\n )\n\n text = \"Receipt {}\".format(message[\"receipt_id\"])\n receipt.append(Paragraph(text, styles[\"pizza-title\"]))\n receipt.append(Spacer(1, 25))\n\n text = \"Thanks for using {}. This PDF is the receipt for your purchase. No payment is due.\".format(\n message[\"product_name\"]\n )\n receipt.append(Paragraph(text, styles[\"pizza-center\"]))\n receipt.append(Spacer(1, 25))\n\n pizza_image = \"{}/{}\".format(settings.MEDIA_ROOT, message[\"image\"])\n receipt.append(Image(pizza_image))\n receipt.append(Spacer(1, 25))\n\n hash_object = hashlib.md5(message[\"receipt_id\"].encode())\n file_name = \"receipt/{}.pdf\".format(hash_object.hexdigest())\n\n doc = SimpleDocTemplate(\n \"{}/{}\".format(settings.MEDIA_ROOT, file_name),\n pagesize=A4,\n rightMargin=40,\n leftMargin=40,\n topMargin=150,\n bottomMargin=0,\n )\n doc.build(receipt)\n\n self.receipt_url = \"http://127.0.0.1:8000/media/{}\".format(file_name)\n\n return self.receipt_url\n" }, { "alpha_fraction": 0.7280701994895935, "alphanum_fraction": 0.7280701994895935, "avg_line_length": 18, "blob_id": "1133ba2ec27d98d73e049f04bf80baa85581d0f4", "content_id": "8459d4c22ed894d7ca4f44d879d33b038ac2876d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 228, "license_type": "permissive", "max_line_length": 29, "num_lines": 12, "path": "/mypy.ini", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "[mypy]\nfollow_imports = silent\ncheck_untyped_defs = True\nhtml_report = ./htmltypecov\nlinecount_report = ./typecov\nignore_missing_imports = True\n\n[mypy-*.migrations.*]\nignore_errors = True\n\n[mypy-*.models.*]\nignore_errors = True\n" }, { "alpha_fraction": 0.49238577485084534, "alphanum_fraction": 0.6852791905403137, "avg_line_length": 15.416666984558105, "blob_id": "3527d3f2e06aa6c3cb3b93cf7d4b543564520eab", "content_id": "2bb90be25d51c83cb02287fbabc3ed533715b439", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 197, "license_type": "permissive", "max_line_length": 23, "num_lines": 12, "path": "/requirements-dev.txt", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "-r requirements.txt\nblack==19.3b0\nflake8==3.7.8\nisort==4.3.21\nlxml==4.4.1\nmypy==0.720\npre-commit==1.18.3\npre-commit-hooks==2.3.0\npytest==5.1.1\npytest-cov==2.7.1\npytest-django==3.5.1\ntypecov==0.2.1\n" }, { "alpha_fraction": 0.39163821935653687, "alphanum_fraction": 0.4065699577331543, "avg_line_length": 30.675676345825195, "blob_id": "ca0b5b219f5389b3b597cc419c015faa6e904b90", "content_id": "8bc5b58fb53e18a8ca632d603e367712185833f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2344, "license_type": "permissive", "max_line_length": 88, "num_lines": 74, "path": "/djangosqs/apps/website/migrations/0001_initial.py", "repo_name": "codehutlabs/django_sqs", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-29 13:44\n\nfrom django.db import migrations\nfrom django.db import models\n\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = []\n\n operations = [\n migrations.CreateModel(\n name=\"Topping\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"title\", models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name=\"Pizza\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"title\", models.CharField(max_length=255)),\n (\"image\", models.FileField(blank=True, null=True, upload_to=\"uploads\")),\n (\"price\", models.DecimalField(decimal_places=2, max_digits=4)),\n (\"toppings\", models.ManyToManyField(blank=True, to=\"website.Topping\")),\n ],\n ),\n migrations.CreateModel(\n name=\"Order\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"name\", models.CharField(max_length=255)),\n (\"address\", models.CharField(max_length=255)),\n (\"phone\", models.CharField(max_length=255)),\n (\"email\", models.CharField(max_length=255)),\n (\"quantity\", models.IntegerField()),\n (\n \"pizza\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\"website.Pizza\"\n ),\n ),\n ],\n ),\n ]\n" } ]
17
chaoqun-wang/DjangoProjectDatabase
https://github.com/chaoqun-wang/DjangoProjectDatabase
e84561cd0cc3f35ce206b154829a09f76b71fda3
463a1e8c75d8697fddbecbf828fa07b64bf1db99
1517a0beb0950d51d4ca441ce44ecfad15862981
refs/heads/master
2021-05-22T02:46:21.455485
2020-04-06T07:15:01
2020-04-06T07:15:01
252,924,419
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5534173846244812, "alphanum_fraction": 0.5892501473426819, "avg_line_length": 25.456140518188477, "blob_id": "2015f2443cc95ce739ea3eb9d4d161b215344088", "content_id": "4dffba0fff53529d408ce69e49a9bd9ad90840d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1651, "license_type": "no_license", "max_line_length": 148, "num_lines": 57, "path": "/DangDang/order/tests.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nimport re, time, datetime, string\n\n\n# Create your tests here.\n\n# data = '18322099340'\n\n# if re.match(r\"^1[35678]\\d{9}$\", data):\n# print(1)\n# else:\n# print(0)\n\nclass AutoSerialNumber(object):\n \"\"\"创建OA单号\"\"\"\n\n def __init__(self):\n # J201906120001\n # self.fd_apply_no = ApplicationBasicFormModel.delete_objects.filter(fd_apply_no__contains=\"J\").order_by(\"-fd_apply_no\").first().fd_apply_no\n self.fd_apply_no = \"J20196120001\"\n self.date_str = self.fd_apply_no[1: 9] # 日期字符串\n self._serial_number = self.fd_apply_no[9:] # 流水号字符串\n self._serial_number = 0 # 流水号\n\n @property\n def serial_number(self):\n return self._serial_number\n\n @serial_number.setter\n def serial_number(self, value):\n if isinstance(value, int):\n self._serial_number = value\n else:\n self._serial_number = 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.serial_number += 1\n # 生成一个固定4位数的流水号\n return \"{0:03d}\".format(self.serial_number)\n\n def __call__(self, *args, **kwargs):\n # 返回生成序列号(日期加流水号)\n return \"J\" + self.date_str + next(self)\n\n # 时间格式化,最好是用定时器来调用该方法\n def timed_clear_serial_number(self):\n \"\"\"用于每天定时清除流水号\"\"\"\n\n self.serial_number = 1\n self.date_str = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n\nprint(time.strftime('%Y-%m-%d %H:%M:%S'))\nprint(time.strftime('%Y%m%d%H%M%S'))\nprint(time.time())" }, { "alpha_fraction": 0.7821782231330872, "alphanum_fraction": 0.7821782231330872, "avg_line_length": 19.200000762939453, "blob_id": "9d524ffbd9d7d42199b9277fa7745799d6be5ff8", "content_id": "de58b39c041052f96e00c978edb0011121a3f7fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/DangDang/createCaptcha/apps.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass CreatecaptchaConfig(AppConfig):\n name = 'createCaptcha'\n" }, { "alpha_fraction": 0.669926643371582, "alphanum_fraction": 0.669926643371582, "avg_line_length": 33.08333206176758, "blob_id": "bb29df5e358ec2d18952c84c5fb8c6a30b9bdcd5", "content_id": "d0f54fec734e1a655c6c2f7235a1539620d13687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/DangDang/cart/urls.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom cart import views\n\napp_name = 'cart'\n\nurlpatterns = [\n path('shopping_cart/', views.shopping_cart, name='shopping_cart'),\n path('add_cart/', views.add_cart, name='add_cart'),\n path('reduce_cart/', views.reduce_cart, name='reduce_cart'),\n path('change_number/', views.change_number, name='change_number'),\n path('del_cart/', views.del_cart, name='del_cart'),\n]\n" }, { "alpha_fraction": 0.5213675498962402, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 16.923076629638672, "blob_id": "59dacdfbc8a567473d98f8cfe8d12a0264d45838", "content_id": "6a855f24e483b8c029c4189f511b17754d771ba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 46, "num_lines": 13, "path": "/DangDang/user/tests.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nimport re, uuid\n\n\n# Create your tests here.\n\n\ndef hehe():\n username = input('请输入:')\n # re_phone = r\"^1[35678]\\d{9}$\"\n if re.match(r\"^1[35678]\\d{9}$\", username):\n return 1\n return 0\n\n" }, { "alpha_fraction": 0.692967414855957, "alphanum_fraction": 0.7032589912414551, "avg_line_length": 28.149999618530273, "blob_id": "63e785fba85972ba8dd4a9d5ef5387141575d18e", "content_id": "f89ad605fa55ae0cc90cbabdde3beccced02a804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 70, "num_lines": 20, "path": "/DangDang/createCaptcha/views.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.shortcuts import HttpResponse\nfrom captcha.image import ImageCaptcha\nimport random, string\n\n\n# Create your views here.\n\ndef get_captcha(request):\n # 1.声明一个ImageCaptcha()对象\n image = ImageCaptcha()\n # 2.随机生成验证码列表(以大小写字母和阿拉伯数字随机组成组成)\n code_list = random.sample(string.ascii_letters + string.digits, 5)\n # 3.验证码列表转成字符串\n code = ''.join(code_list)\n print(code, '我是验证码!!!')\n # 4.将验证码存到session中以便后续验证使用\n request.session['code'] = code\n # 5.将转成字符串形式的随机验证码画到图片上\n data = image.generate(code) # data是二进制数据\n return HttpResponse(data, 'image/png')\n" }, { "alpha_fraction": 0.5127830505371094, "alphanum_fraction": 0.5153396725654602, "avg_line_length": 25.076190948486328, "blob_id": "329c8086de2b080e5b11fc56738061545a666382", "content_id": "202e70850954cafe93782b50d1daf7ffbfb4af00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3162, "license_type": "no_license", "max_line_length": 65, "num_lines": 105, "path": "/DangDang/cart/cart.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from home.models import TBooks\n\n\n# 创建一个书籍对象\nclass Book:\n def __init__(self, book_id, number):\n \"\"\"\n 初始化购物车中书籍属性\n :param book_id: 获取书籍id\n :param number: 获取书籍数量\n \"\"\"\n # 通过书籍的id获取数据库中的书籍对象\n book = TBooks.objects.filter(id=book_id)[0]\n # 初始化书籍对象属性\n self.book_id = book.id\n self.book_name = book.book_name\n self.book_price = book.dang_price\n self.book_number = number\n self.book_picture = book.picture_one\n self.total_price = self.book_price * self.book_number\n self.publisher = book.publisher\n\n\n# 创建购物车对象\nclass Cart:\n def __init__(self):\n \"\"\"\n 初始化书籍容器,用来存储购物车中的书籍对象\n \"\"\"\n self.book_items = []\n\n def get_book(self, book_id):\n \"\"\"\n 获取书籍对象\n :param book_id: 获取书籍id\n :return: 书籍对象,没有自动返回None\n \"\"\"\n for book in self.book_items:\n print(book.book_id, '37行', type(book.book_id))\n print(book_id, '38行', type(book_id))\n if book.book_id == book_id:\n return book\n\n def add_book(self, book_id, number=1):\n \"\"\"\n 增加书籍的方法\n :param book_id: 获取书籍id\n :param number: 获取书籍数量\n :return:\n \"\"\"\n print(book_id)\n book = self.get_book(book_id)\n if book:\n print('以有此书:', book)\n book.book_number += number\n book.total_price = book.book_number * book.book_price\n else:\n print('没有此书')\n book = Book(book_id, number)\n self.book_items.append(book)\n\n def reduce_book(self, book_id, number=1):\n \"\"\"\n 减少书籍的方法\n :param book_id: 获取书籍id\n :param number: 获取书籍数量\n :return:\n \"\"\"\n book = self.get_book(book_id)\n if book:\n book.book_number -= number\n book.total_price = book.book_number * book.book_price\n\n def change_book(self, book_id, number):\n \"\"\"\n 直接修改书籍数量的方法\n :param book_id: 获取书籍id\n :param number: 获取书籍数量\n :return:\n \"\"\"\n book = self.get_book(book_id)\n if book:\n book.book_number = number\n book.total_price = book.book_number * book.book_price\n\n def del_book(self, book_id):\n \"\"\"\n 删除书籍的方法\n :param book_id: 获取书籍id\n :return: True:代表删除成功\n \"\"\"\n book = self.get_book(book_id)\n if book:\n self.book_items.remove(book)\n return True\n\n def add_shopping_cart(self, book_id, number):\n \"\"\"\n 将购物车中的商品添加到用户的购物车中\n :param book_id: 获取书籍id\n :param user_id: 获取用户id\n :return:\n \"\"\"\n book = Book(book_id, number)\n self.book_items.append(book)\n" }, { "alpha_fraction": 0.5123980641365051, "alphanum_fraction": 0.5205546617507935, "avg_line_length": 40.41891860961914, "blob_id": "4666f5f9d0af6d43f4d22a07837589a9c3900172", "content_id": "6383eb2692ab1bd41bbe4a3c0720a6031b71050b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6440, "license_type": "no_license", "max_line_length": 113, "num_lines": 148, "path": "/DangDang/cart/views.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.http import JsonResponse\nfrom django.shortcuts import render, HttpResponse\nfrom cart.cart import Cart\nfrom home.models import TUser, TShoppingCart\nfrom django.db import transaction\n\n\n# Create your views here.\n\n# 渲染购物车页面\ndef shopping_cart(request):\n try:\n username = request.session.get('username')\n if request.session.get('login_status'):\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n books = TShoppingCart.objects.filter(user_id=user_id)\n cart = Cart()\n for book in books:\n cart.add_shopping_cart(book.book_id, book.quantity)\n else:\n cart = request.session.get('cart')\n return render(request, 'cart/car.html', {\n 'username': username,\n 'cart': cart\n })\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 添加购物车逻辑\ndef add_cart(request):\n try:\n with transaction.atomic():\n book_id = request.POST.get('book_id')\n number = request.POST.get('num', '1')\n cart = request.session.get('cart')\n if str(number).isdigit() and int(number) in range(1, 201):\n if request.session.get('login_status'):\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n\n user_book = TShoppingCart.objects.filter(book_id=int(book_id), user_id=user_id)\n if user_book:\n user_book[0].quantity += int(number)\n user_book[0].save()\n else:\n TShoppingCart.objects.create(quantity=int(number), book_id=int(book_id), user_id=user_id)\n else:\n if cart:\n cart.add_book(int(book_id), int(number))\n request.session['cart'] = cart\n else:\n cart = Cart()\n cart.add_book(int(book_id), int(number))\n request.session['cart'] = cart\n return JsonResponse({'msg': '添加成功,快去购物车看看吧^_^!', 'status': 1})\n return JsonResponse({'msg': '添加失败,添加的数量要在1-200之间', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 减少购物车逻辑\ndef reduce_cart(request):\n try:\n with transaction.atomic():\n book_id = request.POST.get('book_id')\n number = request.POST.get('num', '1')\n cart = request.session.get('cart')\n if str(number).isdigit() and int(number) in range(1, 201):\n if request.session.get('login_status'):\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n user_book = TShoppingCart.objects.filter(book_id=int(book_id), user_id=user_id)\n if user_book:\n user_book[0].quantity -= int(number)\n user_book[0].save()\n else:\n cart.reduce_book(int(book_id), int(number))\n request.session['cart'] = cart\n return JsonResponse({'msg': '减少成功', 'status': 1})\n return JsonResponse({'msg': '添加失败,添加的数量要在1-200之间', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 购物车界面input输入框重新修改数量\ndef change_number(request):\n try:\n with transaction.atomic():\n book_id = request.POST.get('book_id')\n number = request.POST.get('num')\n cart = request.session.get('cart')\n if str(number).isdigit() and int(number) in range(1, 201):\n if request.session.get('login_status'):\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n\n user_book = TShoppingCart.objects.filter(book_id=int(book_id), user_id=user_id)\n if user_book:\n user_book[0].quantity = int(number)\n user_book[0].save()\n else:\n cart.change_book(int(book_id), int(number))\n request.session['cart'] = cart\n return JsonResponse({'msg': '修改成功', 'status': 1})\n return JsonResponse({'msg': '修改失败,修改的数量要在1-200之间', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 删除购物车逻辑\ndef del_cart(request):\n try:\n with transaction.atomic():\n book_id = request.POST.get('book_id')\n cart = request.session.get('cart')\n if request.session.get('login_status'):\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n user_book = TShoppingCart.objects.filter(book_id=int(book_id), user_id=user_id)\n if user_book:\n user_book[0].delete()\n else:\n cart.del_book(int(book_id))\n request.session['cart'] = cart\n return JsonResponse({'msg': '删除成功', 'status': 1})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n" }, { "alpha_fraction": 0.6275510191917419, "alphanum_fraction": 0.7474489808082581, "avg_line_length": 16.81818199157715, "blob_id": "72b7c1cebf8ca13104f94c87628da31e5f325db2", "content_id": "3dfa2c6ecffe6c7dec3122756d580752318de7da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 392, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/DangDang/config.ini", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "[uwsgi]\nhttp = 192.168.64.128:9000\n\nsocket = 192.168.64.128:9001\n\nchdir = /usr/local/django_projects/DangDang\n\nwsgi-file = DangDang/wsgi.py\n\nprocesses = 4\n\nthreads = 2\n\nstats = 192.168.64.128:9002\n\nvacuum = true\n\npidfile = /usr/local/django_projects/DangDang/uwsgi.pid\n\ndaemonize = /usr/local/django_projects/DangDang/uwsgi.log\n\nstatic-map =/static=/usr/local/django_projects/DangDang/static\n" }, { "alpha_fraction": 0.5808980464935303, "alphanum_fraction": 0.5882038474082947, "avg_line_length": 41.19548797607422, "blob_id": "dd917f8aff700dba252d9db90e6d88dd427596f4", "content_id": "46a77fb60359da29e99d337a656ed861e73d5de1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5768, "license_type": "no_license", "max_line_length": 246, "num_lines": 133, "path": "/DangDang/order/views.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.db.models import Sum\nfrom django.shortcuts import render, HttpResponse\nfrom django.http import JsonResponse\nfrom home.models import TUser, TAddress, TOrder, TOrderDetail, TShoppingCart\nfrom django.db import transaction\nfrom datetime import datetime\nfrom cart.cart import Cart\nimport re\nfrom order.generate_order_number import generate_order_number\n\n\n# Create your views here.\n\n\n# 渲染订单页面\ndef indent_page(request):\n try:\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n address = TAddress.objects.filter(user_id=user_id)\n cart = TShoppingCart.objects.filter(user_id=user_id)\n order = Cart()\n for book in cart:\n order.add_shopping_cart(book.book_id, book.quantity)\n total_price = 0\n for book in order.book_items:\n total_price += book.total_price\n request.session['total_price'] = total_price\n request.session['order'] = order\n return render(request, 'order/indent.html', {\n 'username': username,\n 'address': address,\n 'total_price': total_price,\n 'order': order,\n })\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\ndef select_address(request):\n try:\n addr_id = request.POST.get('addr_id')\n address = TAddress.objects.filter(id=addr_id)[0]\n\n def address_default(addr):\n if isinstance(addr, TAddress):\n return {\n 'id': addr.id,\n 'consignee': addr.consignee,\n 'address': addr.address,\n 'postcode': addr.postcode,\n 'phone_number': addr.phone_number,\n 'telephone': addr.telephone,\n 'user_id': addr.user_id\n }\n\n return JsonResponse({'msg': address, 'status': 1}, json_dumps_params={\"default\": address_default})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 提交地址信息\ndef submit_address(request):\n try:\n with transaction.atomic():\n addr_id = request.POST.get('addr_id')\n consignee = request.POST.get('ship_man')\n address = request.POST.get('address')\n zip_code = request.POST.get('zip_code')\n phone = request.POST.get('phone')\n telephone = request.POST.get('telephone')\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n if addr_id:\n res = TAddress.objects.filter(id=int(addr_id), user_id=int(user_id))\n if res:\n address_obj = res[0]\n else:\n return JsonResponse({'msg': '您可真皮^_^~!!!', 'status': 0})\n else:\n if consignee != '' and ('省' in address or '市' in address) and ('区' in address or '县' in address) and re.match(r\"[0-9]\\d{5}(?!\\d)\", zip_code) and (re.match(r\"^1[35678]\\d{9}$\", phone) or re.match(\"^0\\\\d{2,3}-\\\\d{7,8}$\", telephone)):\n address_obj = TAddress.objects.create(consignee=consignee, address=address, postcode=zip_code, phone_number=phone, telephone=telephone, user_id=user_id)\n else:\n return JsonResponse({'msg': '请检查信息是否填写正确78', 'status': 0})\n order_obj = TOrder.objects.create(order_number=generate_order_number(), generated_time=datetime.now(), total_price=request.session.get('total_price'), user_id=user_id, address_id=address_obj.id, status=0)\n order = request.session.get('order')\n for book in order.book_items:\n TOrderDetail.objects.create(book_id=book.book_id, price=book.book_price, books_number=book.book_number, order_id=order_obj.id)\n del request.session['order']\n TShoppingCart.objects.filter(user_id=user_id).delete()\n return JsonResponse({'msg': '/order/indent_ok/', 'status': 1})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 渲染订单完成页面\ndef indent_ok(request):\n try:\n username = request.session.get('username')\n if '@' in username:\n user_id = TUser.objects.filter(username=username)[0].id\n else:\n user_id = TUser.objects.filter(phone_number=username)[0].id\n order = TOrder.objects.filter(user_id=user_id, status=0)\n order_number = order[0].order_number\n total_price = order[0].total_price\n books_number = order[0].torderdetail_set.all().aggregate(books_number=Sum('books_number'))['books_number']\n address = TAddress.objects.filter(id=order[0].address_id)[0]\n books = Cart()\n for order_detail in TOrderDetail.objects.filter(order_id=order[0].id):\n books.add_shopping_cart(order_detail.book_id, order_detail.books_number)\n order[0].status = 1\n order[0].save()\n return render(request, 'order/indent ok.html', {\n 'username': username,\n 'order_number': order_number,\n 'total_price': total_price,\n 'books_number': books_number,\n 'address': address,\n 'books': books.book_items\n })\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n" }, { "alpha_fraction": 0.6742021441459656, "alphanum_fraction": 0.6742021441459656, "avg_line_length": 43.235294342041016, "blob_id": "411d33c8b950555a5e4b51f60ea84abc1b997962", "content_id": "d9bd1e557fb182163177cad9c9d19afd2b574cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/DangDang/user/urls.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom user import views\n\napp_name = 'user'\n\nurlpatterns = [\n path('register_page/', views.register_page, name='register_page'),\n path('check_username/', views.check_username, name='check_username'),\n path('check_captcha/', views.check_captcha, name='check_captcha'),\n path('register_logic/', views.register_logic, name='register_logic'),\n path('login_page/', views.login_page, name='login_page'),\n path('login_logic/', views.login_logic, name='login_logic'),\n path('register_ok/', views.register_ok, name='register_ok'),\n path('sign_out/', views.sign_out, name='sign_out'),\n path('send_email/', views.send_email, name='send_email'),\n path('check_code/', views.check_code, name='check_code'),\n]\n" }, { "alpha_fraction": 0.6035242080688477, "alphanum_fraction": 0.607929527759552, "avg_line_length": 19.636363983154297, "blob_id": "eb2a35ac36807bdae5f8326fc8e9ce468cddd9c9", "content_id": "26b736b90c5a88711ac082cd106c83d138140bd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 68, "num_lines": 44, "path": "/DangDang/user/encipher.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "import os, uuid, hashlib, django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"DangDang.settings\")\ndjango.setup()\n\nfrom home.models import TUser\n\n\ndef generate_salt():\n \"\"\"\n 生成随机字符串(盐)\n :return: 返回随机字符串(盐)\n \"\"\"\n salt = hashlib.md5()\n salt.update(str(uuid.uuid4()).replace('-', '').encode())\n return str(salt.hexdigest())\n\n\ndef generate_password(password):\n \"\"\"\n 密码加密\n :param password: 获取明文密码\n :return: 返回加密后的密码\n \"\"\"\n sha = hashlib.sha1()\n sha.update(password.encode())\n return str(sha.hexdigest())\n\n\ndef get_salt(username):\n \"\"\"\n 获取数据库中的盐\n :param username: 获取用户名\n :return: 返回获取到的盐,没有返回None\n \"\"\"\n if '@' in username:\n res = TUser.objects.filter(username=username)\n else:\n res = TUser.objects.filter(phone_number=username)\n if res:\n salt = res[0].salt\n print(salt, '获取盐')\n return salt\n return str()\n" }, { "alpha_fraction": 0.6702898740768433, "alphanum_fraction": 0.6702898740768433, "avg_line_length": 26.600000381469727, "blob_id": "33661494c669b48f2381fed6f76c874d07c585d9", "content_id": "07faa91c69fb1ee082304ab026f75d9406aa8cdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/DangDang/home/urls.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom home import views\n\napp_name = 'home'\n\nurlpatterns = [\n path('home_page/', views.home_page, name='home_page'),\n path('details_page/', views.details_page, name='details_page'),\n path('list_page/', views.list_page, name='list_page'),\n]\n" }, { "alpha_fraction": 0.5782738924026489, "alphanum_fraction": 0.5858970880508423, "avg_line_length": 37.26041793823242, "blob_id": "862456cf671b8f9eff97036780f8917b3bf21d23", "content_id": "735b8d51c2fb8f125ca4c8abc76d67df364f861e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3767, "license_type": "no_license", "max_line_length": 97, "num_lines": 96, "path": "/DangDang/home/views.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, HttpResponse\nfrom home.models import TBookClassification, TBooks\nfrom django.core.paginator import Paginator\n\n\n# Create your views here.\n\n\n# 渲染当当网首页\ndef home_page(request):\n try:\n username = request.session.get('username')\n level_one = TBookClassification.objects.filter(level=1)\n level_two = TBookClassification.objects.filter(level=2)\n new_book = TBooks.objects.all().order_by('-add_time')[0:8]\n recommend_book = TBooks.objects.all()[0:10]\n new_hot_top = TBooks.objects.all().order_by('-add_time', '-sales')[0:5]\n new_hot_bottom = TBooks.objects.all().order_by('-add_time', '-sales')[0:10]\n return render(request, 'home/index.html', {\n 'username': username,\n 'level_one': level_one,\n 'level_two': level_two,\n 'new_book': new_book,\n 'recommend_book': recommend_book,\n 'new_hot_top': new_hot_top,\n 'new_hot_bottom': new_hot_bottom,\n })\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 渲染书籍详情页面\ndef details_page(request):\n try:\n username = request.session.get('username')\n book_id = request.GET.get('book_id')\n book = TBooks.objects.filter(id=book_id)\n return render(request, 'home/Book details.html', {'book': book[0], 'username': username})\n except Exception as error:\n print(error)\n return HttpResponse('<h1>无此书籍信息</h1>')\n\n\n# 渲染书籍列表页面\ndef list_page(request):\n try:\n username = request.session.get('username')\n level_one = TBookClassification.objects.filter(level=1)\n level_two = TBookClassification.objects.filter(level=2)\n id = request.GET.get('category_id')\n level = request.GET.get('level')\n if id and level:\n request.session['id'] = id\n request.session['level'] = level\n else:\n id = request.session.get('id')\n level = request.session.get('level')\n page_num = request.GET.get('number', default='1')\n queryset = TBookClassification.objects.filter(pk=0).none()\n if level == '1':\n data = TBookClassification.objects.filter(parent_id=id)\n for i in data:\n book = TBooks.objects.filter(category_id=i.id)\n queryset |= book\n elif level == '2':\n queryset = TBooks.objects.filter(category_id=id)\n else:\n queryset = TBookClassification.objects.filter(pk=0).none()\n sort_select = request.GET.get('num')\n if sort_select == '1':\n html_file = 'home/booklist_default.html'\n elif sort_select == '2':\n queryset = queryset.order_by('-sales')\n html_file = 'home/booklist_sale_desc.html'\n elif sort_select == '3':\n queryset = queryset.order_by('dang_price')\n html_file = 'home/booklist_price_asc.html'\n elif sort_select == '4':\n queryset = queryset.order_by('-publishing_time')\n html_file = 'home/booklist_pubdate_desc.html'\n else:\n html_file = 'home/booklist_default.html'\n pager_obj = Paginator(queryset, per_page=4)\n if page_num.isdigit() is not True or int(page_num) not in pager_obj.page_range:\n page_num = 1\n page_obj = pager_obj.page(int(page_num))\n return render(request, html_file, {\n 'username': username,\n 'level_one': level_one,\n 'level_two': level_two,\n 'page_obj': page_obj,\n })\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n" }, { "alpha_fraction": 0.7283236980438232, "alphanum_fraction": 0.7283236980438232, "avg_line_length": 20.625, "blob_id": "4776f45ff8c82cb5d97a1a8b470770ead4b82e20", "content_id": "0c0d791575162a576d158ea31f21231d004075bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 64, "num_lines": 8, "path": "/DangDang/createCaptcha/urls.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom createCaptcha import views\n\napp_name = 'createCaptcha'\n\nurlpatterns = [\n path('get_captcha/', views.get_captcha, name='get_captcha'),\n]\n" }, { "alpha_fraction": 0.5710738897323608, "alphanum_fraction": 0.5741775035858154, "avg_line_length": 45.02857208251953, "blob_id": "4780be3d883926503ca993c862b36e85077ec682", "content_id": "5f89f6b37743f152773a13833e8837489d926fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3720, "license_type": "no_license", "max_line_length": 172, "num_lines": 70, "path": "/DangDang/DangDang/middleware.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.shortcuts import redirect\nfrom django.utils.deprecation import MiddlewareMixin\nfrom home.models import TUser\n\n\nclass MyMiddleware(MiddlewareMixin): # 自定义的中间件\n def __init__(self, get_response): # 初始化\n super().__init__(get_response)\n\n # 前端页面发请求 -> 视图函数\n\n # view处理请求前执行\n def process_request(self, request): # 某一个view\n # print(\"request:\", request, '中间件第13行打印~~~')\n if '/' == request.path: # 直接进入主页\n return redirect('home:home_page')\n # 从那儿来回哪儿去\n book_id = request.GET.get('book_id')\n if 'home_page' in request.path or 'list_page' in request.path or 'details_page' in request.path or 'shopping_cart' in request.path or 'indent_page' in request.path:\n if request.path == '/dangdang/details_page/':\n request.session['url'] = request.path + '?book_id=' + book_id\n else:\n request.session['url'] = request.path\n # 登录用户后,并点击了记住我(一周),下次打开当当网,实现自动登录\n if request.session.get('flag'): # 判断是否有标记,有标记不做任何事\n pass\n else: # 无标记验证是否有七天记住我\n if 'home_page' in request.path or 'list_page' in request.path or 'details_page' in request.path or 'shopping_cart' in request.path:\n username = request.COOKIES.get('username') # 取用户名\n password = request.COOKIES.get('password') # 取密码\n url = request.session.get('url')\n if username: # 如果用户名存在,验证用户名密码是否正确\n if '@' in username:\n result = TUser.objects.filter(username=username, password=password, status=1)\n else:\n result = TUser.objects.filter(phone_number=username, password=password, status=1)\n if result:\n request.session['flag'] = True # 存标记防止重定向过多\n request.session['login_status'] = True\n request.session['username'] = username\n return redirect(url)\n request.session['flag'] = True # 存标记防止重定向过多\n return redirect(url)\n request.session['flag'] = True # 存标记防止重定向过多\n return redirect(url)\n # 订单页,订单完成页强制登录验证\n if 'indent_page' in request.path or 'indent_ok' in request.path:\n if request.session.get('login_status'):\n pass\n else:\n return redirect('user:login_page')\n # 限制访问页面\n if 'register_ok' in request.path or 'indent_page' in request.path or 'indent_ok' in request.path:\n if request.META.get('HTTP_REFERER') is None:\n return redirect('home:home_page')\n\n # 在process_request之后View之前执行\n def process_view(self, request, view_func, view_args, view_kwargs):\n # print(\"view:\", request, view_func, view_args, view_kwargs, '中间件第34行打印~~~')\n pass\n\n # view执行之后,响应之前执行\n def process_response(self, request, response):\n # print(\"response:\", request, response, '中间件第38行打印~~~')\n return response # 必须返回response\n\n # 如果View中抛出了异常\n def process_exception(self, request, ex): # View中出现异常时执行\n print(\"exception:\", request, ex, '中间件第69行打印~~~')\n pass\n" }, { "alpha_fraction": 0.5370301604270935, "alphanum_fraction": 0.5457139015197754, "avg_line_length": 36.845069885253906, "blob_id": "c3b32da33a78e06a8fe063eaddef8bdc544b1f1d", "content_id": "2a6b1f68df4ffaf9184907e788e0c312c5a59833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8553, "license_type": "no_license", "max_line_length": 114, "num_lines": 213, "path": "/DangDang/user/views.py", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "from django.core.mail import send_mail\nfrom django.db import transaction\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom home.models import TUser, TShoppingCart\nfrom user.encipher import generate_salt, generate_password, get_salt\nimport re, uuid\n\n\n# Create your views here.\n\n\n# 渲染注册页面\ndef register_page(request):\n try:\n return render(request, 'user/register.html')\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 检查用户名\ndef check_username(request):\n try:\n username = request.POST.get('username')\n if username == '':\n return JsonResponse({'msg': '帐号不能为空', 'status': 0})\n if '@' in username:\n if re.match(r'^[a-zA-Z0-9_-]+(.[a-zA-Z0-9_-]+){0,4}@[a-zA-Z0-9_-]+(.[a-zA-Z0-9_-]+){0,4}$', username):\n res = TUser.objects.filter(username=username)\n if res:\n return JsonResponse({'msg': '此账号已存在', 'status': 0})\n return JsonResponse({'msg': '邮箱可用', 'status': 1})\n return JsonResponse({'msg': '邮箱格式错误', 'status': 0})\n else:\n if re.match(r\"^1[35678]\\d{9}$\", username):\n res = TUser.objects.filter(phone_number=username)\n if res:\n return JsonResponse({'msg': '此账号已存在', 'status': 0})\n return JsonResponse({'msg': '手机号可用', 'status': 1})\n return JsonResponse({'msg': '手机号格式错误', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 检查验证码\ndef check_captcha(request):\n try:\n captcha_code = request.POST.get('code')\n if captcha_code.lower() == request.session.get('code').lower():\n return JsonResponse({'msg': '验证码正确', 'status': 1})\n return JsonResponse({'msg': '验证码错误', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n# 注册逻辑\ndef register_logic(request):\n try:\n with transaction.atomic():\n username = request.POST.get('txt_username')\n password = request.POST.get('txt_password')\n salt = generate_salt()\n password = generate_password(password)\n if '@' in username:\n user = TUser.objects.create(username=username, password=password + salt, salt=salt, status=1)\n else:\n user = TUser.objects.create(phone_number=username, password=password + salt, salt=salt, status=1)\n request.session['login_status'] = True\n request.session['username'] = username\n cart = request.session.get('cart')\n if cart:\n for book in cart.book_items:\n user_book = TShoppingCart.objects.filter(book_id=book.book_id, user_id=user.id)\n if user_book:\n user_book[0].quantity += book.book_number\n user_book[0].save()\n else:\n TShoppingCart.objects.create(\n quantity=book.book_number,\n book_id=book.book_id,\n user_id=user.id\n )\n del request.session['cart']\n return JsonResponse({'msg': '注册成功', 'status': 1})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 渲染注册成功页面\ndef register_ok(request):\n try:\n url = request.session.get('url')\n if url == None:\n url = '/dangdang/home_page/'\n username = request.session.get('username')\n return render(request, 'user/register ok.html', {'username': username, 'url': url})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 渲染登录页面\ndef login_page(request):\n try:\n username = request.COOKIES.get('username')\n password = request.COOKIES.get('password')\n url = request.session.get('url')\n if username:\n if '@' in username:\n result = TUser.objects.filter(username=username, password=password, status=1)\n else:\n result = TUser.objects.filter(phone_number=username, password=password, status=1)\n if result:\n request.session['login_status'] = True\n request.session['username'] = username\n return redirect(url)\n return render(request, 'user/login.html')\n return render(request, 'user/login.html')\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 登录逻辑\ndef login_logic(request):\n try:\n with transaction.atomic():\n username = request.POST.get('username')\n password = request.POST.get('password')\n remember = request.POST.get('auto_login')\n url = request.session.get('url')\n if url == None:\n url = '/dangdang/home_page/'\n salt = get_salt(username)\n password = generate_password(password) + salt\n if '@' in username:\n user = TUser.objects.filter(username=username, password=password, status=1)\n else:\n user = TUser.objects.filter(phone_number=username, password=password, status=1)\n if user:\n request.session['login_status'] = True\n request.session['username'] = username\n response = JsonResponse({'msg': url, 'status': 1})\n if remember:\n response.set_cookie('username', username, max_age=7 * 24 * 3600)\n response.set_cookie('password', password, max_age=7 * 24 * 3600)\n return response\n cart = request.session.get('cart')\n if cart:\n for book in cart.book_items:\n user_book = TShoppingCart.objects.filter(book_id=book.book_id, user_id=user[0].id)\n if user_book:\n user_book[0].quantity += book.book_number\n user_book[0].save()\n else:\n TShoppingCart.objects.create(\n quantity=book.book_number,\n book_id=book.book_id,\n user_id=user[0].id\n )\n del request.session['cart']\n return response\n return JsonResponse({'msg': '帐号或密码错误', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 退出登录\ndef sign_out(request):\n try:\n response = redirect('home:home_page')\n response.delete_cookie('username')\n response.delete_cookie('password')\n request.session.flush()\n return response\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 邮件发送的代码\ndef send_email(request):\n try:\n username = request.POST.get('username')\n random_code = str(uuid.uuid4())\n print(username, random_code)\n request.session['random_code'] = random_code\n res = send_mail('中期项目:当当-邮箱验证', random_code, '[email protected]', [username], fail_silently=False)\n if res == 1:\n return JsonResponse({'msg': '发送成功,请注意查收', 'status': 1})\n else:\n return JsonResponse({'msg': '发送失败', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n\n\n# 检查邮件随机验证码\ndef check_code(request):\n try:\n code = request.POST.get('code')\n random_code = request.session.get('random_code')\n url = request.session.get('url')\n if code == random_code:\n return JsonResponse({'msg': '恭喜您验证成功', 'status': 1, 'url': url})\n return JsonResponse({'msg': '验证码错误', 'status': 0})\n except Exception as error:\n print(error)\n return HttpResponse('哎呀!服务器出错了~')\n" }, { "alpha_fraction": 0.3227970600128174, "alphanum_fraction": 0.3428274989128113, "avg_line_length": 36.034324645996094, "blob_id": "c7c7604d78fb4b64ee5a842ace0499f9235167ab", "content_id": "cb0119fdfc074e372570197a8575669e4c2fc72f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 72290, "license_type": "no_license", "max_line_length": 387, "num_lines": 1952, "path": "/DangDang/static/js/collect.js", "repo_name": "chaoqun-wang/DjangoProjectDatabase", "src_encoding": "UTF-8", "text": "if (typeof JSON !== \"object\") {\n JSON = {}\n}\n(function () {\n \"use strict\";\n var g = /^[\\],:{}\\s]*$/;\n var h = /\\\\(?:[\"\\\\\\/bfnrt]|u[0-9a-fA-F]{4})/g;\n var l = /\"[^\"\\\\\\n\\r]*\"|true|false|null|-?\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)?/g;\n var m = /(?:^|:|,)(?:\\s*\\[)+/g;\n var o = /[\\\\\\\"\\u0000-\\u001f\\u007f-\\u009f\\u00ad\\u0600-\\u0604\\u070f\\u17b4\\u17b5\\u200c-\\u200f\\u2028-\\u202f\\u2060-\\u206f\\ufeff\\ufff0-\\uffff]/g;\n var p = /[\\u0000\\u00ad\\u0600-\\u0604\\u070f\\u17b4\\u17b5\\u200c-\\u200f\\u2028-\\u202f\\u2060-\\u206f\\ufeff\\ufff0-\\uffff]/g;\n\n function f(n) {\n return n < 10 ? \"0\" + n : n\n }\n\n function this_value() {\n return this.valueOf()\n }\n\n if (typeof Date.prototype.toJSON !== \"function\") {\n Date.prototype.toJSON = function () {\n return isFinite(this.valueOf()) ? this.getUTCFullYear() + \"-\" + f(this.getUTCMonth() + 1) + \"-\" + f(this.getUTCDate()) + \"T\" + f(this.getUTCHours()) + \":\" + f(this.getUTCMinutes()) + \":\" + f(this.getUTCSeconds()) + \"Z\" : null\n };\n Boolean.prototype.toJSON = this_value;\n Number.prototype.toJSON = this_value;\n String.prototype.toJSON = this_value\n }\n var q;\n var r;\n var s;\n var t;\n\n function quote(b) {\n o.lastIndex = 0;\n return o.test(b) ? \"\\\"\" + b.replace(o, function (a) {\n var c = s[a];\n return typeof c === \"string\" ? c : \"\\\\u\" + (\"0000\" + a.charCodeAt(0).toString(16)).slice(-4)\n }) + \"\\\"\" : \"\\\"\" + b + \"\\\"\"\n }\n\n function str(a, b) {\n var i;\n var k;\n var v;\n var c;\n var d = q;\n var e;\n var f = b[a];\n if (f && typeof f === \"object\" && typeof f.toJSON === \"function\") {\n f = f.toJSON(a)\n }\n if (typeof t === \"function\") {\n f = t.call(b, a, f)\n }\n switch (typeof f) {\n case\"string\":\n return quote(f);\n case\"number\":\n return isFinite(f) ? String(f) : \"null\";\n case\"boolean\":\n case\"null\":\n return String(f);\n case\"object\":\n if (!f) {\n return \"null\"\n }\n q += r;\n e = [];\n if (Object.prototype.toString.apply(f) === \"[object Array]\") {\n c = f.length;\n for (i = 0; i < c; i += 1) {\n e[i] = str(i, f) || \"null\"\n }\n v = e.length === 0 ? \"[]\" : q ? \"[\\n\" + q + e.join(\",\\n\" + q) + \"\\n\" + d + \"]\" : \"[\" + e.join(\",\") + \"]\";\n q = d;\n return v\n }\n if (t && typeof t === \"object\") {\n c = t.length;\n for (i = 0; i < c; i += 1) {\n if (typeof t[i] === \"string\") {\n k = t[i];\n v = str(k, f);\n if (v) {\n e.push(quote(k) + (q ? \": \" : \":\") + v)\n }\n }\n }\n } else {\n for (k in f) {\n if (Object.prototype.hasOwnProperty.call(f, k)) {\n v = str(k, f);\n if (v) {\n e.push(quote(k) + (q ? \": \" : \":\") + v)\n }\n }\n }\n }\n v = e.length === 0 ? \"{}\" : q ? \"{\\n\" + q + e.join(\",\\n\" + q) + \"\\n\" + d + \"}\" : \"{\" + e.join(\",\") + \"}\";\n q = d;\n return v\n }\n }\n\n if (typeof JSON.stringify !== \"function\") {\n s = {\"\\b\": \"\\\\b\", \"\\t\": \"\\\\t\", \"\\n\": \"\\\\n\", \"\\f\": \"\\\\f\", \"\\r\": \"\\\\r\", \"\\\"\": \"\\\\\\\"\", \"\\\\\": \"\\\\\\\\\"};\n JSON.stringify = function (a, b, c) {\n var i;\n q = \"\";\n r = \"\";\n if (typeof c === \"number\") {\n for (i = 0; i < c; i += 1) {\n r += \" \"\n }\n } else if (typeof c === \"string\") {\n r = c\n }\n t = b;\n if (b && typeof b !== \"function\" && (typeof b !== \"object\" || typeof b.length !== \"number\")) {\n throw new Error(\"JSON.stringify\");\n }\n return str(\"\", {\"\": a})\n }\n }\n if (typeof JSON.parse !== \"function\") {\n JSON.parse = function (d, e) {\n var j;\n\n function walk(a, b) {\n var k;\n var v;\n var c = a[b];\n if (c && typeof c === \"object\") {\n for (k in c) {\n if (Object.prototype.hasOwnProperty.call(c, k)) {\n v = walk(c, k);\n if (v !== undefined) {\n c[k] = v\n } else {\n delete c[k]\n }\n }\n }\n }\n return e.call(a, b, c)\n }\n\n d = String(d);\n p.lastIndex = 0;\n if (p.test(d)) {\n d = d.replace(p, function (a) {\n return \"\\\\u\" + (\"0000\" + a.charCodeAt(0).toString(16)).slice(-4)\n })\n }\n if (g.test(d.replace(h, \"@\").replace(l, \"]\").replace(m, \"\"))) {\n j = eval(\"(\" + d + \")\");\n return (typeof e === \"function\") ? walk({\"\": j}, \"\") : j\n }\n throw new SyntaxError(\"JSON.parse\");\n }\n }\n}());\nvar DDF;\n(function (o, r) {\n var F = function (a, b) {\n return new F.prototype.init(a, b)\n };\n F.idSelectorReg = /^#[\\w-]+/i;\n F.classSelectorReg = /^\\.[\\w-]+$/i;\n F.attrSelectorReg = /^[\\w\\*]+\\[([\\w-]+)(=\\'(.*)\\')?\\]$/i;\n F.tagSelectorReg = /^[\\w\\*]+$/i;\n F.events = ['blur', 'change', 'click', 'dblclick', 'error', 'focus', 'keydown', 'keypress', 'keyup', 'load', 'mousedown', 'mousemove', 'mouseout', 'mouseover', 'mouseup', 'reset', 'resize', 'select', 'submit', 'unload', 'message'];\n F.bindElem = [];\n F.bindFn = {};\n F.fn = F.prototype = {\n init: function (a, b) {\n b = b || document;\n var c = this.qsa(b, a);\n if (c && !F.isEmptyObject(c)) {\n F.merge(this, c);\n this.selector = c\n }\n return this\n }, size: function () {\n return this.length\n }, find: function (d) {\n if (this.selector) {\n var e = false;\n var f = this;\n F.each(this.selector, function (a, b) {\n if (e === false) {\n f.clearDom.call(f);\n e = true\n }\n var c = f.qsa(b, d);\n if (c.length > 0) {\n f.selector = typeof f.selector == 'array' ? f.selector : [];\n F.merge(f.selector, c);\n F.merge(f, c)\n }\n })\n }\n return this\n }, qsa: function (a, b) {\n if (!b) {\n return null\n } else if (b.nodeType) {\n var c = [];\n c.push(b);\n return c\n } else if (b === \"body\" && document.body) {\n var c = [];\n c.push(document.body);\n return c\n } else if (b == o) {\n var c = [];\n c.push(o);\n return c\n } else if (typeof b == 'function') {\n return []\n } else if (typeof b === \"string\") {\n if (F.idSelectorReg.test(b)) {\n var c = [];\n c.push(document.getElementById(b.slice(1)));\n return c\n } else if (a.querySelectorAll) {\n return a.querySelectorAll(b)\n } else if (F.classSelectorReg.test(b)) {\n var d = document.getElementsByTagName(\"*\");\n var c = [];\n for (var i in d) {\n if (d[i].nodeType == 1) {\n var e = u(d[i], 'class');\n if (e) {\n var f = e.split(' ');\n if (F.inArray(b.slice(1), f) >= 0) {\n c.push(d[i])\n }\n }\n }\n }\n return c\n } else if (F.attrSelectorReg.test(b)) {\n var d = document.getElementsByTagName(\"*\");\n var c = [];\n for (var i in d) {\n if (d[i] && d[i].nodeType == 1) {\n var g = b.match(F.attrSelectorReg);\n if (g && typeof g[1] != 'undefined') {\n var h = g[1];\n var j = u(d[i], h);\n if (typeof g[3] != 'undefined' && g[3] !== null && g[3] !== '') {\n if (g[3] == j) {\n c.push(d[i])\n }\n } else if (typeof j != 'undefined' && j !== null) {\n c.push(d[i])\n }\n }\n }\n }\n return c\n } else if (F.tagSelectorReg.test(b)) {\n var d = document.getElementsByTagName(b);\n var c = [];\n for (var i in d) {\n if (d[i].nodeType == 1) {\n c.push(i)\n }\n }\n return c\n } else {\n return null\n }\n }\n }, bind: function (d, e) {\n if (F.inArray(d, F.events) >= 0) {\n F.each(this, function (k, a) {\n if (a.addEventListener) {\n a.addEventListener(d, e, false)\n } else if (a.attachEvent) {\n a.attachEvent('on' + d, e)\n } else {\n a[\"on\" + d] = e\n }\n var b = F.inArray(a, F.bindElem);\n b = F.inArray(a, F.bindElem) < 0 ? (F.bindElem.push(a) - 1) : b;\n if (F.bindFn[b]) {\n F.bindFn[b].push(e)\n } else {\n var c = [];\n c.push(e);\n F.bindFn[b] = c\n }\n })\n }\n }, unbind: function (c, d) {\n if (F.inArray(c, F.events) >= 0) {\n F.each(this, function (k, a) {\n if (d) {\n if (a.removeEventListener) {\n a.removeEventListener(c, d, false)\n } else if (a.detachEvent) {\n a.detachEvent('on' + c, d)\n } else {\n a[\"on\" + c] = null\n }\n } else {\n var b = F.inArray(a, F.bindElem);\n if (b >= 0) {\n for (var i = 0; i < F.bindFn[b].length; i++) {\n if (a.removeEventListener) {\n a.removeEventListener(c, F.bindFn[b][i], false)\n } else if (a.detachEvent) {\n a.detachEvent('on' + c, F.bindFn[b][i])\n }\n }\n a[\"on\" + c] = null\n }\n }\n })\n }\n }, attr: function (a, b) {\n if (arguments.length > 0 && typeof a == 'string') {\n if (arguments.length > 1) {\n _self = this;\n return F.each(_self, function () {\n b == null ? z(this, a) : w(this, a, b)\n })\n } else {\n return typeof this[0] != 'undefined' && this[0].nodeType == 1 ? u(this[0], a) : null\n }\n }\n return null\n }, children: function () {\n if (typeof this[0] != 'undefined' && this[0].nodeType == 1) {\n var a = this[0].childNodes;\n var b = [];\n var c = false;\n for (var i = 0; i < a.length; i++) {\n if (a[i].nodeType == 1) {\n if (c === false) {\n this.clearDom.call(this);\n c = true\n }\n b.push(a[i])\n }\n }\n this.selector = b;\n F.merge(this, b);\n return this\n }\n return null\n }, parent: function () {\n if (typeof this[0] != 'undefined' && this[0].nodeType == 1) {\n var a = [];\n a.push(this[0].parentNode);\n this.selector = a;\n this.clearDom.call(this);\n F.merge(this, a);\n return this\n }\n return null\n }, parents: function (a) {\n if (typeof this[0] != 'undefined' && this[0].nodeType == 1) {\n var b = this[0].parentNode;\n var c = [];\n if (a) {\n var d = null, regRes = null;\n if (typeof a == 'function') {\n d = 'function'\n } else if (regRes = a.match(F.idSelectorReg)) {\n d = 'id'\n } else if (regRes = a.match(F.classSelectorReg)) {\n d = 'class'\n } else if (regRes = a.match(F.attrSelectorReg)) {\n d = 'attr'\n } else if (regRes = a.match(F.tagSelectorReg)) {\n d = 'tag'\n }\n }\n while (b != null && b.tagName != null) {\n if (a) {\n if (d == 'id') {\n attrValue = F(b).attr('id');\n if (attrValue != null && attrValue == regRes[0].slice(1)) {\n c.push(b)\n }\n }\n if (d == 'class') {\n attrValue = F(b).attr('class');\n if (attrValue != null) {\n var e = attrValue.split(' ');\n if (F.inArray(regRes[0].slice(1), e) >= 0) c.push(b)\n }\n }\n if (d == 'attr') {\n if (typeof regRes[1] != 'undefined' && regRes[1] !== null) {\n var f = null, attrValue = null;\n f = regRes[1];\n if (typeof regRes[3] != 'undefined' && regRes[3] !== null) {\n attrValue = F(b).attr(f);\n if (regRes[3] == attrValue) c.push(b)\n } else {\n attrValue = F(b).attr(f);\n if (attrValue !== null) c.push(b)\n }\n }\n }\n if (d == 'tag') {\n if (typeof regRes[0] != 'undefined' && regRes[0] !== null) {\n if (F(b).tagName() === regRes[0]) c.push(b)\n }\n }\n if (d == 'function') {\n if (a(b)) {\n c.push(b)\n }\n }\n } else {\n c.push(b)\n }\n b = b.parentNode\n }\n this.clearDom.call(this);\n if (c.length > 0) {\n this.selector = c;\n F.merge(this, c)\n }\n return this\n }\n return null\n }, parentsUntil: function (a) {\n if (typeof this[0] != 'undefined' && this[0].nodeType == 1) {\n var b = this[0].parentNode;\n var c = [];\n var d = null, regRes = null;\n if (typeof a == 'function') {\n d = 'function'\n } else if (regRes = a.match(F.idSelectorReg)) {\n d = 'id'\n } else if (regRes = a.match(F.classSelectorReg)) {\n d = 'class'\n } else if (regRes = a.match(F.attrSelectorReg)) {\n d = 'attr'\n } else if (regRes = a.match(F.tagSelectorReg)) {\n d = 'tag'\n }\n while (b != null && b.tagName != null) {\n if (d == 'id') {\n attrValue = F(b).attr('id');\n if (attrValue != null && attrValue == regRes[0].slice(1)) {\n c.push(b);\n break\n }\n }\n if (d == 'class') {\n attrValue = F(b).attr('class');\n if (attrValue != null) {\n var e = attrValue.split(' ');\n if (F.inArray(regRes[0].slice(1), e) >= 0) {\n c.push(b);\n break\n }\n }\n }\n if (d == 'attr') {\n if (typeof regRes[1] != 'undefined' && regRes[1] !== null) {\n var f = null, attrValue = null;\n f = regRes[1];\n if (typeof regRes[3] != 'undefined' && regRes[3] !== null) {\n attrValue = F(b).attr(f);\n if (regRes[3] == attrValue) {\n c.push(b);\n break\n }\n } else {\n attrValue = F(b).attr(f);\n if (attrValue !== null) {\n c.push(b);\n break\n }\n }\n }\n }\n if (d == 'tag') {\n if (typeof regRes[0] != 'undefined' && regRes[0] !== null) {\n if (F(b).tagName() === regRes[0]) {\n c.push(b);\n break\n }\n }\n }\n if (d == 'function') {\n if (a(b)) {\n c.push(b);\n break\n }\n }\n b = b.parentNode\n }\n this.clearDom.call(this);\n if (c.length > 0) {\n this.selector = c;\n F.merge(this, c)\n }\n return this\n }\n return null\n }, each: function (a) {\n F.each(this, a);\n return this\n }, css: function (d, e) {\n if (typeof this[0] != 'undefined' && this[0].nodeType == 1) {\n var f = this[0];\n var g = /^(left|right|bottom|top)$/;\n var h = \"getComputedStyle\" in o;\n var j = function (c) {\n c = (c === \"float\") ? (h ? \"CSSFloat\" : \"styleFloat\") : c;\n c = c.replace(/\\-(\\w)/g, function (a, b) {\n return b.toUpperCase()\n });\n return c\n };\n if (arguments.length == 1 && e === r) {\n if (typeof d == \"object\") {\n for (var i in d) {\n var i = j(i);\n f.style[i] = d[i]\n }\n return this\n }\n d = j(d);\n return !!f.style[d] ? f.style[d] : h ? function () {\n var a = getComputedStyle(f, null)[d];\n if (g.test(d) && a === \"auto\") {\n return \"0px\"\n }\n return a\n }() : function () {\n var a = f.currentStyle[d];\n if (d === \"width\" || d === \"height\" && a === \"auto\") {\n var b = f.getBoundingClientRect();\n return (d === \"width\" ? b.right - b.left : b.bottom - b.top) + \"px\"\n }\n if (d === \"opacity\") {\n var c = f.currentStyle.filter;\n if (/opacity/.test(c)) {\n a = c.match(/\\d /)[0] / 100;\n return (a === 1 || a === 0) ? a.toFixed(0) : a.toFixed(1)\n } else if (a === r) {\n return \"1\"\n }\n }\n if (rPos.test(p) && a === \"auto\") {\n return \"0px\"\n }\n return a\n }()\n } else if (arguments.length == 2) {\n f.style[d] = e;\n return this\n }\n }\n }, width: function () {\n var a = this[0];\n var b;\n var c = document.documentElement, doBody = document.body;\n if (a) {\n if (typeof a == \"object\" && \"setInterval\" in a) {\n b = c.clientWidth || doBody.clientWidth\n } else if (a == document) {\n b = c.offsetWidth || doBody.offsetWidth\n } else {\n res = a.getBoundingClientRect();\n b = res.width || res.right - res.left\n }\n return b\n }\n }, height: function () {\n var a = this[0];\n var b;\n var c = document.documentElement, doBody = document.body;\n if (a) {\n if (typeof a == \"object\" && \"setInterval\" in a) {\n b = c.clientHeight || doBody.clientHeight\n } else if (a == document) {\n b = c.offsetHeight || doBody.offsetHeight\n } else {\n res = a.getBoundingClientRect();\n b = res.height || res.bottom - res.top\n }\n return b\n }\n }, location: function () {\n if (typeof this[0] != 'undefined') {\n var a = this[0];\n if (a && typeof a.getBoundingClientRect === 'function') {\n var b = a.getBoundingClientRect();\n var c = document.documentElement.clientTop;\n var d = document.documentElement.clientLeft;\n return {top: b.top - c, left: b.left - d, right: b.right - d, bottom: b.bottom - c}\n }\n return null\n }\n return null\n }, tagName: function () {\n if (this[0]) {\n return this[0].tagName.toLowerCase()\n }\n return false\n }, dom: function () {\n return this.selector ? this.selector[0] : null\n }, clearDom: function () {\n for (var i = 0; i < this.length; i++) {\n delete this[i]\n }\n this.length = 0;\n this.selector = ''\n }\n };\n F.prototype.init.prototype = F.prototype;\n var u = function (n, a) {\n if (typeof n != 'object' || typeof a != 'string') return;\n return a == 'class' ? n.className : n.getAttribute(a)\n };\n var w = function (n, a, v) {\n if (typeof n != 'object' || typeof a != 'string') return;\n a == 'class' ? n.className = v : n.setAttribute(a, v)\n };\n var z = function (n, a) {\n if (typeof n != 'object' || typeof a != 'string') return;\n n.removeAttribute(a);\n if (a == 'class') n.removeAttribute('className')\n };\n F.each = function (a, b, c) {\n var d, i = 0, length = a.length, isObj = length === r || typeof a == 'function';\n if (c) {\n if (isObj) {\n for (d in a) {\n if (b.apply(a[d], c) === false) {\n break\n }\n }\n } else {\n for (; i < length;) {\n if (b.apply(a[i++], c) === false) {\n break\n }\n }\n }\n } else {\n if (isObj) {\n for (d in a) {\n if (b.call(a[d], d, a[d]) === false) {\n break\n }\n }\n } else {\n for (var e = a[0]; i < length && b.call(e, i, e) !== false; e = a[++i]) {\n }\n }\n }\n return a\n };\n F.merge = function (a, b) {\n var i = a.length || 0, j = 0;\n if (typeof b.length === \"number\") {\n for (var l = b.length; j < l; j++) {\n a[i++] = b[j]\n }\n } else {\n while (b[j] !== r) {\n a[i++] = b[j++]\n }\n }\n a.length = i;\n return a\n };\n F.extend = function (a, b) {\n if (typeof a == 'object' && typeof b == 'object') {\n for (var i in b) {\n if (b.hasOwnProperty(i)) {\n a[i] = b[i]\n }\n }\n return a\n }\n return null\n };\n F.ajax = function (d) {\n var f = {\n url: o.location.href,\n type: \"GET\",\n async: true,\n data: {},\n dataType: 'json',\n jsonp: 'callback',\n jsonpCallback: 'ddf_' + F.createPid(),\n jsonpTimeout: 6000,\n success: function () {\n },\n error: function () {\n }\n };\n d = F.extend(f, d);\n d.type = (d.type || \"GET\").toUpperCase();\n if (o.XMLHttpRequest === r) {\n o.XMLHttpRequest = function () {\n try {\n return new ActiveXObject(\"MSXML2.XMLHttp.6.0\")\n } catch (e) {\n try {\n return new ActiveXObject(\"MSXML2.XMLHttp.3.0\")\n } catch (e1) {\n F.error(\"XMLHttpRequest is not supported\")\n }\n }\n }\n }\n var g = function (a) {\n if (typeof a == 'object') {\n var b = [];\n for (var k in a) {\n b.push(encodeURIComponent(k) + \"=\" + encodeURIComponent(a[k]))\n }\n return b.join(\"&\")\n }\n return a\n };\n if (d.dataType.toLowerCase() !== 'jsonp') {\n var h = new XMLHttpRequest();\n var i = d.data ? g(d.data) : '';\n if (d.type == \"GET\") {\n d.url = d.url.indexOf('?') >= 0 ? d.url + '&' + i : d.url + '?' + i;\n h.open(\"GET\", d.url);\n h.send(null)\n } else if (d.type == \"POST\") {\n h.open(\"POST\", d.url, true);\n h.setRequestHeader(\"Content-Type\", \"application/x-www-form-urlencoded\");\n h.send(i)\n }\n if (d.async === true) {\n h.onreadystatechange = function () {\n if (h.readyState == 4) {\n j()\n }\n }\n }\n if (d.async === false) {\n j()\n }\n var j = function () {\n var a = h.status;\n var b = d.dataType.toLowerCase();\n var c;\n switch (b) {\n case'html':\n c = h.responseText;\n break;\n case'json':\n c = JSON.parse(h.responseText);\n break;\n case'xml':\n c = h.responseXML;\n break\n }\n ;\n if (a == 200) {\n d.success && d.success(c)\n } else {\n d.error && d.error(a, c)\n }\n }\n } else {\n var i = d.data ? g(d.data) : '';\n var l = d.url.indexOf('?') >= 0 ? d.url + '&' + i : d.url + '?' + i;\n l = l + '&' + d.jsonp + '=' + d.jsonpCallback;\n o[d.jsonpCallback] = function (a) {\n try {\n d.success && d.success(a)\n } finally {\n o[d.jsonpCallback] = null;\n try {\n delete o[d.jsonpCallback]\n } catch (e) {\n }\n document.body.removeChild(m)\n }\n };\n var m = document.createElement('script');\n m.src = l;\n document.body.appendChild(m);\n o.setTimeout(function () {\n if (typeof o[d.jsonpCallback] == 'function') {\n o[d.jsonpCallback] = function () {\n d.error && d.error()\n };\n document.body.removeChild(m)\n }\n }, d.jsonpTimeout)\n }\n };\n F.cookie = function (a) {\n var b = {key: '', value: '', expires: new Date('2999/1/1'), path: '/', domain: 'dangdang.com'};\n a = a || {};\n a = F.extend(b, a);\n if (a.value === '') {\n var c = null;\n if (document.cookie && document.cookie != '') {\n var d = document.cookie.split(';');\n for (var i = 0; i < d.length; i++) {\n var e = (d[i] || \"\").replace(/^\\s+|\\s+$/g, \"\");\n if (e.substring(0, (a.key.length) + 1) == (a.key + '=')) {\n c = decodeURIComponent(e.substring(a.key.length + 1));\n break\n }\n }\n }\n return c\n } else {\n if (a.value === null) {\n a.expires = -1\n }\n var f = '';\n if (a.expires && (typeof a.expires == 'number' || a.expires.toUTCString)) {\n var g;\n if (typeof a.expires == 'number') {\n g = new Date();\n g.setTime(g.getTime() + (a.expires * 24 * 60 * 60 * 1000))\n } else {\n g = a.expires\n }\n f = '; expires=' + g.toUTCString()\n }\n var h = a.path ? '; path=' + a.path : '', domain = a.domain ? '; domain=' + a.domain : '',\n secure = a.secure ? '; secure' : '';\n document.cookie = [a.key, '=', encodeURIComponent(a.value), f, h, domain, secure].join('')\n }\n };\n F.Deferred = function () {\n var c = {\n status: 'pending', doneList: [], failList: [], resolve: function (a) {\n this.status = 'resolved';\n for (var i = 0; i < this.doneList.length; i++) {\n this.doneList[i].call(this, a);\n this.doneList.shift()\n }\n }, reject: function (a) {\n this.status = 'rejected';\n for (var i = 0; i < this.failList.length; i++) {\n this.failList[i].call(this, a);\n this.doneList.shift()\n }\n }, done: function (a) {\n if (typeof a === 'function') {\n this.doneList.push(a)\n }\n return this\n }, fail: function (a) {\n if (typeof a === 'function') {\n this.failList.push(a)\n }\n return this\n }, then: function (a, b) {\n this.done(a).fail(b);\n return this\n }, always: function (a) {\n this.done(a).fail(a);\n return this\n }\n };\n return c\n };\n F.inArray = function (a, b) {\n if (b.indexOf) {\n return b.indexOf(a)\n }\n for (var i = 0, length = b.length; i < length; i++) {\n if (b[i] === a) {\n return i\n }\n }\n return -1\n };\n F.chunkArray = function (a, b) {\n var c = [];\n for (var x = 0; x < Math.ceil(a.length / b); x++) {\n var d = x * b;\n var e = d + b;\n c.push(a.slice(d, e))\n }\n return c\n };\n F.isArray = function (a) {\n return Object.prototype.toString.call(a) === '[object Array]'\n };\n F.isEmptyObject = function (a) {\n var i;\n for (i in a) {\n return false\n }\n return true\n };\n F.now = function () {\n return (new Date()).getTime()\n };\n F.error = function (a) {\n throw a;\n };\n F.md5 = function (k) {\n var l = {\n hexcase: 0, b64pad: \"\", chrsz: 8, binl2hex: function (a) {\n var b = this.hexcase ? \"0123456789ABCDEF\" : \"0123456789abcdef\";\n var c = \"\";\n for (var i = 0; i < a.length * 4; i++) {\n c += b.charAt((a[i >> 2] >> ((i % 4) * 8 + 4)) & 0xF) + b.charAt((a[i >> 2] >> ((i % 4) * 8)) & 0xF)\n }\n return c\n }, core_md5: function (x, e) {\n x[e >> 5] |= 0x80 << ((e) % 32);\n x[(((e + 64) >>> 9) << 4) + 14] = e;\n var a = 1732584193;\n var b = -271733879;\n var c = -1732584194;\n var d = 271733878;\n for (var i = 0; i < x.length; i += 16) {\n var f = a;\n var g = b;\n var h = c;\n var j = d;\n a = this.md5_ff(a, b, c, d, x[i + 0], 7, -680876936);\n d = this.md5_ff(d, a, b, c, x[i + 1], 12, -389564586);\n c = this.md5_ff(c, d, a, b, x[i + 2], 17, 606105819);\n b = this.md5_ff(b, c, d, a, x[i + 3], 22, -1044525330);\n a = this.md5_ff(a, b, c, d, x[i + 4], 7, -176418897);\n d = this.md5_ff(d, a, b, c, x[i + 5], 12, 1200080426);\n c = this.md5_ff(c, d, a, b, x[i + 6], 17, -1473231341);\n b = this.md5_ff(b, c, d, a, x[i + 7], 22, -45705983);\n a = this.md5_ff(a, b, c, d, x[i + 8], 7, 1770035416);\n d = this.md5_ff(d, a, b, c, x[i + 9], 12, -1958414417);\n c = this.md5_ff(c, d, a, b, x[i + 10], 17, -42063);\n b = this.md5_ff(b, c, d, a, x[i + 11], 22, -1990404162);\n a = this.md5_ff(a, b, c, d, x[i + 12], 7, 1804603682);\n d = this.md5_ff(d, a, b, c, x[i + 13], 12, -40341101);\n c = this.md5_ff(c, d, a, b, x[i + 14], 17, -1502002290);\n b = this.md5_ff(b, c, d, a, x[i + 15], 22, 1236535329);\n a = this.md5_gg(a, b, c, d, x[i + 1], 5, -165796510);\n d = this.md5_gg(d, a, b, c, x[i + 6], 9, -1069501632);\n c = this.md5_gg(c, d, a, b, x[i + 11], 14, 643717713);\n b = this.md5_gg(b, c, d, a, x[i + 0], 20, -373897302);\n a = this.md5_gg(a, b, c, d, x[i + 5], 5, -701558691);\n d = this.md5_gg(d, a, b, c, x[i + 10], 9, 38016083);\n c = this.md5_gg(c, d, a, b, x[i + 15], 14, -660478335);\n b = this.md5_gg(b, c, d, a, x[i + 4], 20, -405537848);\n a = this.md5_gg(a, b, c, d, x[i + 9], 5, 568446438);\n d = this.md5_gg(d, a, b, c, x[i + 14], 9, -1019803690);\n c = this.md5_gg(c, d, a, b, x[i + 3], 14, -187363961);\n b = this.md5_gg(b, c, d, a, x[i + 8], 20, 1163531501);\n a = this.md5_gg(a, b, c, d, x[i + 13], 5, -1444681467);\n d = this.md5_gg(d, a, b, c, x[i + 2], 9, -51403784);\n c = this.md5_gg(c, d, a, b, x[i + 7], 14, 1735328473);\n b = this.md5_gg(b, c, d, a, x[i + 12], 20, -1926607734);\n a = this.md5_hh(a, b, c, d, x[i + 5], 4, -378558);\n d = this.md5_hh(d, a, b, c, x[i + 8], 11, -2022574463);\n c = this.md5_hh(c, d, a, b, x[i + 11], 16, 1839030562);\n b = this.md5_hh(b, c, d, a, x[i + 14], 23, -35309556);\n a = this.md5_hh(a, b, c, d, x[i + 1], 4, -1530992060);\n d = this.md5_hh(d, a, b, c, x[i + 4], 11, 1272893353);\n c = this.md5_hh(c, d, a, b, x[i + 7], 16, -155497632);\n b = this.md5_hh(b, c, d, a, x[i + 10], 23, -1094730640);\n a = this.md5_hh(a, b, c, d, x[i + 13], 4, 681279174);\n d = this.md5_hh(d, a, b, c, x[i + 0], 11, -358537222);\n c = this.md5_hh(c, d, a, b, x[i + 3], 16, -722521979);\n b = this.md5_hh(b, c, d, a, x[i + 6], 23, 76029189);\n a = this.md5_hh(a, b, c, d, x[i + 9], 4, -640364487);\n d = this.md5_hh(d, a, b, c, x[i + 12], 11, -421815835);\n c = this.md5_hh(c, d, a, b, x[i + 15], 16, 530742520);\n b = this.md5_hh(b, c, d, a, x[i + 2], 23, -995338651);\n a = this.md5_ii(a, b, c, d, x[i + 0], 6, -198630844);\n d = this.md5_ii(d, a, b, c, x[i + 7], 10, 1126891415);\n c = this.md5_ii(c, d, a, b, x[i + 14], 15, -1416354905);\n b = this.md5_ii(b, c, d, a, x[i + 5], 21, -57434055);\n a = this.md5_ii(a, b, c, d, x[i + 12], 6, 1700485571);\n d = this.md5_ii(d, a, b, c, x[i + 3], 10, -1894986606);\n c = this.md5_ii(c, d, a, b, x[i + 10], 15, -1051523);\n b = this.md5_ii(b, c, d, a, x[i + 1], 21, -2054922799);\n a = this.md5_ii(a, b, c, d, x[i + 8], 6, 1873313359);\n d = this.md5_ii(d, a, b, c, x[i + 15], 10, -30611744);\n c = this.md5_ii(c, d, a, b, x[i + 6], 15, -1560198380);\n b = this.md5_ii(b, c, d, a, x[i + 13], 21, 1309151649);\n a = this.md5_ii(a, b, c, d, x[i + 4], 6, -145523070);\n d = this.md5_ii(d, a, b, c, x[i + 11], 10, -1120210379);\n c = this.md5_ii(c, d, a, b, x[i + 2], 15, 718787259);\n b = this.md5_ii(b, c, d, a, x[i + 9], 21, -343485551);\n a = this.safe_add(a, f);\n b = this.safe_add(b, g);\n c = this.safe_add(c, h);\n d = this.safe_add(d, j)\n }\n return Array(a, b, c, d)\n }, md5_cmn: function (q, a, b, x, s, t) {\n return this.safe_add(this.bit_rol(this.safe_add(this.safe_add(a, q), this.safe_add(x, t)), s), b)\n }, md5_ff: function (a, b, c, d, x, s, t) {\n return this.md5_cmn((b & c) | ((~b) & d), a, b, x, s, t)\n }, md5_gg: function (a, b, c, d, x, s, t) {\n return this.md5_cmn((b & d) | (c & (~d)), a, b, x, s, t)\n }, md5_hh: function (a, b, c, d, x, s, t) {\n return this.md5_cmn(b ^ c ^ d, a, b, x, s, t)\n }, md5_ii: function (a, b, c, d, x, s, t) {\n return this.md5_cmn(c ^ (b | (~d)), a, b, x, s, t)\n }, str2binl: function (a) {\n var b = Array();\n var c = (1 << this.chrsz) - 1;\n for (var i = 0; i < a.length * this.chrsz; i += this.chrsz) b[i >> 5] |= (a.charCodeAt(i / this.chrsz) & c) << (i % 32);\n return b\n }, safe_add: function (x, y) {\n var a = (x & 0xFFFF) + (y & 0xFFFF);\n var b = (x >> 16) + (y >> 16) + (a >> 16);\n return (b << 16) | (a & 0xFFFF)\n }, bit_rol: function (a, b) {\n return (a << b) | (a >>> (32 - b))\n }, hex_md5: function (s) {\n return this.binl2hex(this.core_md5(this.str2binl(s), s.length * this.chrsz))\n }\n };\n return l.hex_md5(k)\n };\n F.urlParam = function (a, b, c) {\n if (a) {\n if (b) {\n var d = a.split('#')[0];\n var e = new RegExp(\"([&|?]\" + b + \"=)([^&]*)\", \"g\");\n if (typeof c != 'undefined') {\n var f = /^https?:\\/\\/(.*?)($|\\/)/.exec(d);\n if (f && f[3] == '') {\n d += '/'\n }\n if (/\\?/g.test(d)) {\n if (e.test(d)) {\n d = d.replace(e, \"$1\" + c)\n } else {\n if (d.substr(-1, 1) == '&') {\n d += b + '=' + c\n } else {\n d += '&' + b + '=' + c\n }\n }\n } else {\n d += \"?\" + b + \"=\" + c\n }\n if (d.split('#')[1]) {\n d += d.split('#')[1]\n }\n return d\n } else {\n var g = e.exec(a);\n if (g != null && typeof g[2] != 'undefined') return g[2];\n return false\n }\n } else {\n var h = new Object();\n if (a.indexOf(\"?\") > 0) {\n var j = a.substring(a.indexOf(\"?\") + 1);\n if (j.indexOf(\"#\") > 0) {\n j = j.substring(0, j.indexOf(\"#\"))\n }\n var k = j.split(\"&\");\n for (var i = 0; i < k.length; i++) {\n h[k[i].split(\"=\")[0]] = k[i].split(\"=\")[1]\n }\n }\n return h\n }\n }\n return false\n };\n F.hashKey = 'DDClick521';\n F.hash = function (c) {\n var d = function (a, b) {\n return new Array(b + 1).join(a)\n };\n var e = parseInt(c.substr(0, 8), 16);\n var f = String(e).substr(0, 6);\n var g = f.length;\n if (g < 6) {\n f += d('0', Math.abs(6 - g))\n }\n return f\n };\n F.createPid = function () {\n var n = new Date();\n var y = n.getFullYear() + '';\n var m = n.getMonth() + 1;\n if (m < 10) m = \"0\" + m;\n var d = n.getDate();\n if (d < 10) d = \"0\" + d;\n var H = n.getHours();\n if (H < 10) H = \"0\" + H;\n var M = n.getMinutes();\n if (M < 10) M = \"0\" + M;\n var S = n.getSeconds();\n if (S < 10) S = \"0\" + S;\n var a = \"00\" + n.getMilliseconds();\n a = a.substr(a.length - 3, 3);\n var b = Math.floor(100000 + Math.random() * 900000);\n var c = Math.floor(100000 + Math.random() * 900000);\n var e = y + m + d + H + M + S + a + b + c + F.hashKey;\n var f = F.md5(e);\n f = F.hash(f);\n return y + m + d + H + M + S + a + f + b + c\n };\n F.checkPid = function (a) {\n if (a) {\n if (a.length != 35) {\n return false\n }\n return true\n }\n return false\n };\n o.DDF = F\n})(window);\nvar DDT;\n(function (j, l) {\n var F = DDF;\n var T = {\n __url: \"\",\n __referrer: \"\",\n __out_refer: '',\n __meta_data: '',\n __perm_id: '',\n __udid: '',\n __client_version: '',\n __page_id: '',\n __platform: '',\n __platform_name: 'platform',\n __meta_platform: '',\n __meta_viewport: '',\n __meta_listen_name: 'listen',\n __meta_listen: '',\n __ios_protocol: 'applewebdata://',\n __android_protocol: 'androidwebdata://',\n __pm_page: 'ddt-page',\n __pm_area: 'ddt-area',\n __pm_pit: 'ddt-pit',\n __pm_src: 'ddt-src',\n __pm_attrs: ['ddt-src', 'ddt-pit', 'ddt-area', 'ddt-page'],\n __pm_nested: ['ddt-area'],\n __pm_click: '',\n __rpm_name: 'rpm',\n __rpm_key: 'ddt-rpm',\n __rpm: '',\n __rpms: '',\n __rpms_line: '',\n __rpm_attrs: ['ddt-page', 'ddt-area', 'ddt-pit'],\n __rpm_max: 50,\n __rpm_split: '|',\n __pm_key: 'ddt-data-key',\n __pm_data: 'ddt-data',\n __data: '',\n __ddc_pid: '__permanent_id',\n __ddc_out_refer: '__out_refer',\n __ddc_rpm: '__rpm',\n __sites: ['dangdang.com', 'globaldangdang.hk', 'dangm.cn']\n };\n T.domainReg = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = /^((https?|androidwebdata|applewebdata):\\/\\/((([\\w\\-]+\\.)*)([\\w\\-]+\\.[A-Za-z]+))(\\:\\d+)?)\\/?(.)*?/;\n return b.exec(a)\n };\n T.domainProtocol = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = T.domainReg(a);\n return b[2] ? b[2] : null\n };\n T.domain = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = T.domainReg(a);\n return b[3] ? b[3] : null\n };\n T.topDomain = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = T.domainReg(a);\n return typeof b[6] != 'undefined' ? b[6] : null\n };\n T.getMeta = function () {\n var e = F(\"meta\");\n e.each(function () {\n var a = F(this);\n var b = a.attr('name'), content = a.attr('content');\n if (b && content) {\n if (b == T.__platform_name) {\n T.__meta_platform = content\n }\n if (b == 'viewport' && content.indexOf('width=device-width') >= 0) {\n T.__meta_viewport = 'touch'\n }\n var c = /ddclick_(.*)/i;\n var d = b.match(c);\n if (d) {\n T.__meta_data += d[1] + '=' + content + ';'\n }\n if (b == T.__meta_listen_name && content != '') {\n T.__meta_listen = content\n }\n }\n })\n };\n T.platform = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = 'pc';\n var c = navigator.userAgent.toLowerCase();\n if (T.__meta_platform != '') {\n return T.__meta_platform\n }\n if (c.indexOf('dangdang-android') >= 0) {\n return 'android'\n }\n if (c.indexOf('dangdang-ios') >= 0) {\n return 'iphone'\n }\n if (T.__meta_viewport != '') {\n return T.__meta_viewport\n }\n if (c.indexOf('miniProgram') >= 0) {\n return 'miniprogram'\n }\n if (c.indexOf('micromessenger') >= 0) {\n return 'wechat'\n }\n return b\n };\n T.serverUrl = function () {\n var a = T.domainProtocol();\n return a + '://' + 'databack.dangdang.com/ddt.php'\n };\n T.currentUrl = function () {\n return j.location.href || document.URL || document.location.href\n };\n T.decodeUrl = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b;\n try {\n b = decodeURIComponent(a)\n } catch (e) {\n b = a.replace(/%3a/ig, \":\");\n b = a.replace(/%2f/ig, \"/\")\n }\n return b\n };\n T.referUrl = function (a) {\n a = a ? a : T.__url || T.currentUrl();\n var b = '';\n if (a.indexOf(\"#dd_refer=http\") > 0) {\n b = (a.substring(a.indexOf(\"#dd_refer=\") + \"#dd_refer=\".length));\n b = T.decodeUrl(b)\n } else {\n if (document.referrer) {\n b = document.referrer\n } else {\n try {\n if (j.opener && j.opener.location) {\n b = j.opener.location.href\n }\n } catch (e) {\n }\n }\n }\n return b\n };\n T.isOutRefer = function (a) {\n a = a ? a : T.__referrer || T.referUrl();\n return F.inArray(T.topDomain(a), T.__sites) >= 0 ? false : true\n };\n T.outRefer = function (a) {\n a = a ? a : T.__referrer || T.referUrl();\n if (T.isOutRefer(a)) {\n var b = Math.round(F.now() / 1000);\n var c = T.domain(a);\n var d = b + \"|!|\" + c;\n var e = [[\"baidu\", \"word\"], [\"baidu\", \"wd\"], [\"baidu\", \"w\"], [\"baidu\", \"kw\"], [\"google\", \"q\"], [\"soso\", \"w\"], [\"soso\", \"key\"], [\"sogou\", \"query\"], [\"sogou\", \"keyword\"], [\"youdao\", \"q\"], [\"bing\", \"q\"], [\"yahoo\", \"p\"], [\"ask\", \"q\"], [\"360\", \"q\"], [\"yahoo\", \"p\"], [\"ask\", \"q\"], [\"360\", \"q\"], [\"jike\", \"q\"], [\"ucweb\", \"keyword\"], [\"ucweb\", \"word\"], [\"so\", \"q\"], [\"haosou\", \"q\"]];\n var f = T.topDomain(a);\n var g = F.urlParam(a);\n for (var i = 0; i < e.length; i++) {\n if (e[i][0] == f && typeof g[e[i][1]] != 'undefined') {\n d += \"|!|\" + g[e[i][1]].substring(0, 10);\n return d;\n break\n }\n }\n return d\n }\n return ''\n };\n T.permId = function (a) {\n a = a || T.currentUrl();\n var b;\n if (a.indexOf(T.__android_protocol) >= 0 || a.indexOf(T.__ios_protocol) >= 0) {\n var c = new RegExp(\"PermanentID=([^&]*)\", 'i');\n var d = c.exec(a);\n if (d != null && typeof d[1] != 'undefined') {\n b = d[1]\n }\n } else {\n var e = {key: T.__ddc_pid, domain: T.topDomain()};\n if (F.urlParam(a, 'permanent_id')) {\n b = F.urlParam(a, 'permanent_id')\n } else {\n b = F.cookie(e);\n if (!b) {\n b = F.createPid();\n e.value = b;\n F.cookie(e)\n }\n }\n }\n return b\n };\n T.udId = function (a) {\n a = a || T.currentUrl();\n if (a.indexOf(T.__ios_protocol) >= 0 || a.indexOf(T.__ios_protocol) >= 0) {\n var b = new RegExp(\"UDID=([^&]*)\", 'i');\n var c = b.exec(a);\n if (c != null && typeof c[1] != 'undefined') {\n return c[1]\n }\n }\n if (F.urlParam(a, 'udid')) {\n var d = F.urlParam(a, 'udid');\n return d\n }\n return ''\n };\n T.clientVersion = function (a) {\n a = a || T.currentUrl();\n if (F.urlParam(a, 'client_version')) {\n var b = F.urlParam(a, 'client_version');\n return b\n }\n return ''\n };\n T.pageId = function () {\n var a = F('body').attr(T.__pm_page);\n if (a) {\n a = a.replace(/\\s+/g, '');\n return a\n }\n return ''\n };\n T.isClickElem = function (a) {\n if (a == null || a.nodeType !== 1) return false;\n var b = F(a);\n var c = b.tagName();\n var d = ['a', 'button', 'input', 'submit', 'reset', 'area', 'img'];\n if (b.attr('onclick') != null || b.attr('ddt-click') != null) {\n return true\n }\n if (F.inArray(c, d) >= 0) {\n return true\n }\n return false\n };\n T.getClickAttr = function (a) {\n var b = {};\n var c = F(a);\n var d = null;\n if (d = c.attr('id')) {\n b['oi'] = d\n }\n if (d = c.attr('name')) {\n b['on'] = d\n }\n if (d = c.attr('ddt-click') || c.attr('dd_name')) {\n b['dn'] = d\n }\n if (d = c.attr('ddt-src') || c.attr('dd_src')) {\n b['ds'] = d\n }\n if (c.tagName() == 'a' && !b['ds']) {\n var e = a.getElementsByTagName('img');\n var f = /(<.*>)/;\n var g = (e.length == 1 && e[0].alt) || ((!f.test(a.innerHTML)) && a.innerHTML) || '';\n if (g) b['ds'] = g\n }\n if (d = c.attr('nname')) {\n b['nn'] = d\n }\n return F.isEmptyObject(b) ? false : b\n };\n T.getClickInfo = function (a) {\n var b = {\n objId: '',\n objName: '',\n objHref: a.href ? a.href : '',\n objTagName: a.tagName ? a.tagName : '',\n objDdName: '',\n objDdSrc: '',\n regionIds: '',\n regionDdNames: '',\n };\n var c = T.getClickAttr(a);\n if (c) {\n b['objId'] = c['oi'] || '';\n b['objName'] = c['on'] || c['oi'] || '';\n b['objDdName'] = c['dn'] || '';\n b['objDdSrc'] = c['ds'] || '';\n if (c['oi']) b['regionIds'] = c['oi'] + ',';\n if (c['on']) b['regionIds'] = c['on'] + ',';\n if (c['dn']) b['regionDdNames'] = c['dn'] + ','\n }\n F(a).parents().each(function () {\n c = T.getClickAttr(this);\n if (!b['objHref'] && this.href) {\n b['objHref'] = this.href\n }\n if (c) {\n if (c['oi']) {\n b['regionIds'] += c['oi'] + ','\n } else if (c['on']) {\n b['regionIds'] += c['on'] + ','\n }\n if (c['dn']) b['regionDdNames'] += c['dn'] + ',';\n if (!b['objDdSrc'] && c['ds']) {\n b['objDdSrc'] = c['ds']\n }\n }\n });\n return b\n };\n T.getMisc = function (e) {\n var a = 0, sHeight = 0;\n if (j.screen) {\n a = j.screen.width;\n sHeight = j.screen.height\n }\n var b = 0, dHeight = 0;\n if ((document.body) && (document.body.clientWidth)) {\n b = document.body.clientWidth\n }\n if ((document.body) && (document.body.scrollHeight)) {\n dHeight = document.body.scrollHeight\n }\n var c = '';\n if (e) {\n var d = (e.pageX || (e.clientX + (document.documentElement.scrollLeft || document.body.scrollLeft))) || 'null';\n var f = (e.pageY || (e.clientY + (document.documentElement.scrollTop || document.body.scrollTop))) || 'null';\n if (d != null && f != null) {\n c = d + ',' + f\n }\n }\n return a + ',' + sHeight + '|' + c + '|' + b + ',' + dHeight\n };\n T.lastRpm = function (a) {\n var b = '';\n a = a ? a : F.cookie({key: T.__ddc_rpm});\n if (a && (T.__referrer || T.referUrl()) != '') {\n b = a.slice(a.indexOf(T.__rpm_split) + 1)\n }\n return b\n };\n T.getPm = function (a) {\n var b = {};\n var c = T.__pm_attrs;\n var d = F.merge([a], F(a).parents());\n F.each(d, function (k, v) {\n for (var i = 0; i < c.length; i++) {\n if (F(v).attr(c[i]) !== null) {\n if (c[i] == T.__pm_nested) {\n b[c[i]] = b[c[i]] ? b[c[i]] : [];\n b[c[i]].push(F(v).attr(c[i]).replace(/\\s+/g, ''))\n } else {\n b[c[i]] = F(v).attr(c[i]).replace(/\\s+/g, '')\n }\n }\n }\n });\n return b\n };\n T.getClickPm = function (a) {\n var b = '';\n if (a) {\n var c = T.__pm_attrs.slice().reverse();\n for (var i = 0; i < c.length; i++) {\n if (a[c[i]]) {\n if (F.isArray(a[c[i]])) {\n b += a[c[i]].slice().reverse().join(',') + '.'\n } else {\n b += a[c[i]] + '.'\n }\n } else {\n b += '.'\n }\n }\n }\n return b\n };\n T.createRpm = function (a) {\n var b = '', lrpm = T.__rpm, rAttrs = T.__rpm_attrs;\n if (a) {\n for (var i = 0; i < rAttrs.length; i++) {\n if (a[rAttrs[i]]) {\n if (F.isArray(a[rAttrs[i]])) {\n b += a[rAttrs[i]].slice().reverse().join(',') + '.'\n } else {\n b += a[rAttrs[i]] + '.'\n }\n } else {\n b += '.'\n }\n }\n b += new Date().getTime()\n }\n return b\n };\n T.getSrc = function (a) {\n var b = '';\n if (a && a[T.__pm_src]) {\n b = a[T.__pm_src]\n }\n return b\n };\n T.collectData = function (b) {\n var c = {};\n if (b) {\n var d = F(b);\n if (d.attr(T.__pm_key)) {\n c[d.attr(T.__pm_key)] = d.attr(T.__pm_data)\n }\n } else {\n F(\"*[\" + T.__pm_key + \"]\").each(function () {\n var a = F(this);\n if (a.attr(T.__pm_key)) {\n if (typeof c[a.attr(T.__pm_key)] != 'undefined') {\n c[a.attr(T.__pm_key)] += ', ' + a.attr(T.__pm_data)\n } else {\n c[a.attr(T.__pm_key)] = a.attr(T.__pm_data)\n }\n }\n })\n }\n return F.isEmptyObject(c) ? '' : JSON.stringify(c)\n };\n T.server = function (a) {\n if (top.location == self.location) {\n var b = {\n ctr_id: '',\n ctr_ids: '',\n refer_url: encodeURIComponent(T.__referrer || T.referUrl()),\n out_refer: encodeURIComponent(T.__out_refer || T.outRefer()),\n url: encodeURIComponent(T.__url || T.currentUrl()),\n to_url: '',\n type: 1,\n title: encodeURIComponent(document.title),\n ctr_type: '',\n charset: document.charset,\n perm_id: T.__perm_id || T.permId(),\n udid: T.__udid || T.udId(),\n client_version: T.__client_version || T.clientVersion(),\n meta_data: encodeURIComponent(T.__meta_data),\n misc: '',\n ctr_dns: '',\n ctr_dn: '',\n ctr_src: '',\n cif: '',\n platform: T.__platform || T.platform(),\n page_id: T.__page_id || T.pageId(),\n data: '',\n website: T.topDomain()\n };\n a = F.extend(b, a);\n F.each(a, function (k, v) {\n if (v == '') {\n delete a[k]\n }\n });\n F.ajax({\n url: T.serverUrl(), dataType: 'jsonp', data: a, jsonp: 'callback', success: function () {\n }\n })\n }\n };\n T.setBasic = function () {\n T.__url = T.currentUrl();\n T.__referrer = T.referUrl(T.__url);\n T.__out_refer = T.outRefer(T.__referrer);\n T.getMeta();\n T.__perm_id = T.permId();\n T.__udid = T.udId();\n T.__client_version = T.clientVersion();\n T.__page_id = T.pageId();\n T.__platform = T.platform();\n T.__rpm = T.lastRpm();\n T.__data = T.collectData()\n };\n T.trackLoad = function (a) {\n var b = {type: 1, misc: T.getMisc(), data: T.__data || T.collectData()};\n if (a) b.url = a;\n T.server(b)\n };\n T.trackClick = function () {\n F(document).bind('click', function (e) {\n e = e || j.event;\n var a = e.target || e.srcElement;\n if (!T.isClickElem(a) && a != document) {\n a = F(a).parentsUntil(function (o) {\n if (T.isClickElem(o)) return true;\n return false\n }).dom()\n }\n if (a && a.nodeType == 1) {\n var b = T.getClickInfo(a);\n var c = T.getPm(a);\n var d = T.createRpm(c);\n var f = T.__rpm;\n var g = f + T.__rpm_split + d;\n var h = new Date();\n h.setTime(h.getTime() + 24 * 60 * 60 * 1000);\n F.cookie({key: T.__ddc_rpm, value: g, expires: h, domain: T.topDomain()});\n T.__url = T.currentUrl();\n T.__perm_id = T.permId();\n T.__udid = T.udId();\n T.__client_version = T.clientVersion();\n T.__page_id = T.pageId();\n T.__platform = T.platform();\n T.__pm_click = T.getClickPm(c);\n var i = {\n ctr_id: encodeURIComponent(b['objName']),\n ctr_ids: encodeURIComponent(b['regionIds']),\n to_url: encodeURIComponent(b['objHref']),\n type: 2,\n ctr_type: encodeURIComponent(b['objTagName']),\n misc: T.getMisc(e),\n ctr_dns: encodeURIComponent(b['regionDdNames']),\n ctr_dn: encodeURIComponent(b['objDdName']),\n ctr_src: encodeURIComponent(T.getSrc(c) || b['objDdSrc']),\n cif: T.__pm_click,\n data: T.collectData(a)\n };\n T.server(i)\n }\n })\n };\n T.trackSpa = function () {\n if (T.__meta_listen == 'hash') {\n if (\"onhashchange\" in j) {\n j.onhashchange = function (e) {\n T.__url = e.newURL;\n T.__referrer = e.oldURL;\n var a = {type: 1, misc: T.getMisc(), data: T.__data || T.collectData()};\n T.server(a)\n }\n }\n }\n };\n T.init = function () {\n T.setBasic();\n T.trackLoad();\n T.trackClick();\n T.trackSpa()\n };\n T.api = function () {\n this.trackPingPHP = function (a) {\n var b = T.referUrl(a);\n var c = {refer_url: b, url: a, type: 5};\n T.server(c)\n };\n this.trackRecommend = function (a) {\n var b = T.referUrl(a);\n var c = {refer_url: b, url: a, type: 6};\n T.server(c)\n };\n this.track_transfer = function (a) {\n var b = T.referUrl(a);\n if (a.indexOf('#dd_refer') == -1) {\n a += '#dd_refer=' + b\n }\n var c = {refer_url: b, url: a, type: 0};\n T.server(c);\n return a\n }\n };\n j.DDT = T;\n DDT.Api = new T.api();\n DDT.init()\n})(window);\nvar DDE;\n(function (j, l) {\n var F = DDF;\n var T = DDT;\n var E = {\n __expose_attr: 'ddt-expose',\n __server_url: 'databack.dangdang.com/dde.php',\n __doms: [],\n __doms_all: [],\n __doms_attr: {},\n __doms_relative: {},\n __dom_time: 500,\n __time: 600,\n __epm: {},\n __error_count: 0,\n __max_error_count: 10,\n __epm_attrs: ['ddt-page', 'ddt-area', 'ddt-pit', 'ddt-src'],\n __dom_pid: '',\n __loop_pid: ''\n };\n E.attrReg = /^attr(\\[(.*)\\])?/;\n E.relativeReg = /^relative(\\[(.*)\\])?/;\n E.serverUrl = function () {\n var a = T.domainProtocol();\n E.__server_url = a + '://' + E.__server_url;\n return E.__server_url\n };\n E.isVisible = function (a) {\n var b = F(a);\n if (!b.location()) return false;\n var c = b.location().right;\n var d = b.location().bottom;\n var e = b.height();\n var f = b.width();\n var g = F(j).height();\n var h = F(j).width();\n if (d >= (e / 2) && d <= g + (e / 2)) {\n if (c >= (f / 2) && c <= (f / 2) + h) {\n return true\n }\n }\n return false\n };\n E.getDoms = function () {\n var d = F(\"*[\" + E.__expose_attr + \"]\");\n if (d.length > 0) {\n F.merge(E.__doms, d.selector);\n d.each(function (k, a) {\n var b = E.getEpm(a);\n var c = E.__doms_all.push(a) - 1;\n if (c >= 0) E.__epm[c] = b;\n E.__doms_attr[c] = F(a).attr(E.__expose_attr)\n });\n d.attr(E.__expose_attr, null)\n }\n };\n E.getEpm = function (a) {\n var b = T.getPm(a), attrs = E.__epm_attrs, epm = '';\n if (b) {\n for (var i = 0; i < attrs.length; i++) {\n if (b[attrs[i]]) {\n if (F.isArray(b[attrs[i]])) {\n epm += b[attrs[i]].slice().reverse().join(',') + '.'\n } else {\n epm += b[attrs[i]] + '.'\n }\n } else {\n epm += '.'\n }\n }\n }\n return epm.substring(0, epm.length - 1)\n };\n E.isExpose = function (a) {\n var b = F.inArray(a, E.__doms_all);\n if (E.isVisible(a) && b >= 0) {\n var c = E.__doms_attr[b];\n if (c == \"on\") {\n return true\n } else if (E.attrReg.test(c)) {\n return E.byAttr(a, c)\n } else if (E.relativeReg.test(c)) {\n return E.byRelative(a, c)\n } else {\n return false\n }\n }\n return false\n };\n E.byAttr = function (a, b) {\n var c = 0, attr, $el = F(a);\n var d = E.attrReg.exec(b);\n if (!d && !d[2]) return false;\n var e = d[2].split(\",\");\n for (var i = 0, len = e.length; i < len; i++) {\n attr = e[i].split(\"=\");\n if ($el.css(attr[0]) == attr[1]) {\n c++\n }\n }\n return (c == e.length) ? true : false\n };\n E.byRelative = function (a, b) {\n var c = E.relativeReg.exec(b);\n var d = F(a);\n var e;\n if (c && c[1]) {\n if (E.__doms_relative[c[1]]) {\n e = E.__doms_relative[c[1]]\n } else {\n e = F(a).parentsUntil(\"*\" + c[1]);\n E.__doms_relative[c[1]] = e\n }\n } else {\n e = d.parent()\n }\n if (e) {\n var f = d.location().left, _pLeft = e.location().left;\n var g = d.location().top, _pTop = e.location().top;\n var h = d.height(), _domWidth = d.width();\n var i = e.height(), _pWidth = e.width();\n if (f + (_domWidth / 2) >= _pLeft && f + (_domWidth / 2) - _pLeft <= _pWidth) {\n if (g + (h / 2) >= _pTop && g + (h / 2) - _pTop <= i) {\n return true\n }\n }\n }\n return false\n };\n E.server = function (a) {\n if (top.location == self.location) {\n var b = {\n platform: T.__platform || T.platform(),\n type: 3,\n url: encodeURIComponent(T.__url || T.currentUrl()),\n charset: document.charset,\n perm_id: T.__perm_id || T.permId(),\n page_id: T.__page_id || T.pageId(),\n website: T.topDomain(),\n expose: '',\n };\n a = F.extend(b, a);\n F.ajax({\n url: E.__server_url, dataType: 'jsonp', data: a, jsonp: 'callback', error: function () {\n E.__error_count++\n }\n });\n if (E.__error_count >= E.__max_error_count) {\n clearInterval(E.__dom_pid);\n clearInterval(E.__loop_pid)\n }\n }\n };\n E.trackExpose = function () {\n var c = {}, exposeDoms = [], expose = [];\n F.each(E.__doms, function (k, a) {\n if (E.isExpose(a)) {\n var b = F.inArray(a, E.__doms_all);\n if (b >= 0 && E.__epm[b]) {\n expose.push(E.__epm[b])\n }\n exposeDoms.push(a)\n }\n });\n for (var i in exposeDoms) {\n E.__doms.splice(F.inArray(exposeDoms[i], E.__doms), 1)\n }\n if (expose.length > 0) {\n var d = F.chunkArray(expose, 10);\n F.each(d, function (k, a) {\n c['expose'] = encodeURIComponent(JSON.stringify(a));\n E.server(c)\n })\n }\n };\n E.loop = function () {\n E.getDoms();\n E.trackExpose();\n E.__dom_pid = j.setInterval(function () {\n E.getDoms()\n }, E.__dom_time);\n E.__loop_pid = j.setInterval(function () {\n E.trackExpose()\n }, E.__time)\n };\n E.init = function () {\n E.serverUrl();\n E.loop()\n };\n j.DDE = E;\n DDE.init()\n})(window);\n(function (e, f) {\n var F = DDF;\n var T = DDT;\n var L = {\n __listen_attr: 'ddt-listen',\n __server_url: 'databack.dangdang.com/ddl.php',\n __time: 500,\n __error_count: 0,\n __max_error_count: 10,\n __loop_pid: '',\n __data: ''\n };\n L.serverUrl = function () {\n var a = T.domainProtocol();\n L.__server_url = a + '://' + L.__server_url;\n return L.__server_url\n };\n L.server = function (a) {\n if (top.location == self.location) {\n var b = {\n platform: T.__platform || T.platform(),\n type: 4,\n url: encodeURIComponent(T.__url || T.currentUrl()),\n charset: document.charset,\n perm_id: T.__perm_id || T.permId(),\n page_id: T.__page_id || T.pageId(),\n website: T.topDomain(),\n data: ''\n };\n a = F.extend(b, a);\n F.ajax({\n url: L.__server_url, dataType: 'jsonp', data: a, jsonp: 'callback', error: function () {\n L.__error_count++\n }\n });\n if (L.__error_count >= L.__max_error_count) {\n clearInterval(L.__loop_pid)\n }\n }\n };\n L.listen = function () {\n var b = F(\"*[\" + L.__listen_attr + \"]\");\n if (b.length > 0) {\n var c = {};\n b.each(function () {\n var a = F(this);\n if (a.attr(T.__pm_key)) {\n if (typeof c[a.attr(T.__pm_key)] != 'undefined') {\n c[a.attr(T.__pm_key)] += ', ' + a.attr(T.__pm_data)\n } else {\n c[a.attr(T.__pm_key)] = a.attr(T.__pm_data)\n }\n }\n });\n if (!F.isEmptyObject(c) && JSON.stringify(c) != L.__data) {\n var d = {};\n d.data = JSON.stringify(c);\n L.server(d)\n }\n L.__data = JSON.stringify(c)\n }\n };\n L.loop = function () {\n L.__loop_pid = e.setInterval(function () {\n L.listen()\n }, L.__time)\n };\n L.init = function () {\n L.serverUrl();\n L.loop()\n };\n L.init()\n})(window);" } ]
17
boylewang/python_tools
https://github.com/boylewang/python_tools
664e5bfd10ee819dce3ea4e64f020a93ab5e39fa
2c9b5224b7d0c797360ebe89ff86e302dd322cfa
cccdc9149912d9efb5b0bcf03a7b7a68743a18be
refs/heads/master
2020-03-26T21:51:46.134338
2018-08-22T14:26:42
2018-08-22T14:26:42
145,413,193
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8153846263885498, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 15.5, "blob_id": "e91b6296ba55517b76ccad1b847b75cfd182d604", "content_id": "0332b429f3cd6e9e92e1e18bada0135621d57eb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/web_browse.py", "repo_name": "boylewang/python_tools", "src_encoding": "UTF-8", "text": "import webbrowser\nimport shutil\n\nwebbrowser.open(\"www.baidu.com\")" }, { "alpha_fraction": 0.6761032342910767, "alphanum_fraction": 0.6835970282554626, "avg_line_length": 25.711111068725586, "blob_id": "88bf6eca5eb694c0e77e20e60b636088ec650552", "content_id": "d9b03735ed736bf5adb2c863a44aa8ed091da278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "no_license", "max_line_length": 138, "num_lines": 45, "path": "/copy_files.py", "repo_name": "boylewang/python_tools", "src_encoding": "UTF-8", "text": "import os\nimport shutil\n#标准库shutils提供了文件和目录管理接口\ndef newfile(path):\n path=path.strip()\n path=path.rstrip(\"\\\\\")\n # 判断路径是否存在\n isExists=os.path.exists(path)\n # 不存在\n if not isExists:\n # 创建目录操作函数\n os.makedirs(path)\n print(path+' 创建成功')\n return True\n #存在\n else:\n print(path+' 目录已存在')\n return False\n\n# 定义要创建的目录\nnewpath=\"F:\\\\14\"\n# 调用函数\nnewfile(newpath)\nsrc_files = \"F:/ut/gtest\"\ndst_files = \"F:/24\"\n\n#递归拷贝文件夹下的文件,不包含该文件夹\n#shutil.copytree('F:/ut/gtest', 'F:/24')\n\n#只copy文件内容:shutil.copyfile(src, dst)\n#shutil.copyfile(\"test.txt\",\"test_copyfile.txt\")\n\n#拷贝文件和权限:shutil.copy(src, dst) \n#shutil.copy(\"test.txt\",\"test_copy.txt\")\n\n#拷贝文件和状态信息:shutil.copy2(src, dst)\n#shutil.copy2(\"test.txt\",\"test_cpoy2.txt\")\n\n#shutil.ignore_patterns(*patterns)  (忽略哪个文件,有选择性的拷贝,用于拷贝时的ignore选项)\n#shutil.ignore_patterns(\"test.txt\",\"*.py\")\n#递归的去拷贝文件夹:shutil.copytree(src, dst, symlinks=False, ignore=None)\n#shutil.copytree('E:\\Git\\git_tools\\python_tools','E:\\Git\\git_tools\\python_tools_new',symlinks=True, ignore=shutil.ignore_patterns('*.py'))\n\n#shutil.rmtree(path[, ignore_errors[, onerror]])递归的去删除文件,文件夹内可以有文件加,但不能有文件\nshutil.rmtree('E:\\Git\\git_tools\\python_tools_new')" } ]
2
sindhu819/Strings-3
https://github.com/sindhu819/Strings-3
3afddf18532955dcae8b8445e06366ae5a3aec37
b1dc747c0a9754f1b44320ac6803b1e21a2f4298
4a64e35652de6351cb7317b0dfe0a4902d7f6b29
refs/heads/master
2020-09-20T18:01:39.487653
2019-11-29T22:48:26
2019-11-29T22:48:26
224,554,387
0
0
null
2019-11-28T02:30:04
2019-08-18T12:17:16
2019-11-28T01:48:30
null
[ { "alpha_fraction": 0.390193372964859, "alphanum_fraction": 0.39917126297950745, "avg_line_length": 27.02083396911621, "blob_id": "478ea582d8baad84a3267cabdd51e36ad018abba", "content_id": "1dec16abfd3e612c1a746c5617a868208b7c118f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 158, "num_lines": 48, "path": "/Problem-98.py", "repo_name": "sindhu819/Strings-3", "src_encoding": "UTF-8", "text": "'''\nleet code- 227 - Basic Calculator -II - https://leetcode.com/problems/basic-calculator-ii/\ntime complexity - O(N)\nspace complexity -0(N)\nApproach - Stacks for addition and subtraction we sppend the elements into stack, for \"*\" and \"/\" we pop the lement form stack and multiply with next element.\n'''\n\n\n\n\n\nclass Solution(object):\n def calculate(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n stack=[]\n num=0\n sign='+'\n \n for i in range(len(s)):\n c=s[i]\n if c.isdigit():\n \n num=num*10 +int(c)\n if not c.isdigit() and c!= \" \" or i==len(s)-1:\n if sign=='+':\n stack.append(num)\n elif sign=='-':\n stack.append(-num)\n elif sign=='*':\n temp=stack.pop()\n stack.append(temp*num)\n elif sign==\"/\":\n temp=stack.pop()\n if temp//num<0 and temp%num!=0:\n stack.append(int(temp /num+1))\n else:\n stack.append(int(temp /num))\n sign=c\n num=0\n \n print(stack)\n res=0\n while stack:\n res=res+stack.pop()\n return res\n \n \n \n \n \n " } ]
1
pratham-pay/Dockertest
https://github.com/pratham-pay/Dockertest
bc34a0a8540ea486505facc2ed18fbb45526beb5
1d79f57dd82eca16ea99fb5b3c586af191240a9f
c921a43828f06e6f2115c76c2f035e3e56a4e49d
refs/heads/master
2022-12-30T21:22:37.691443
2020-06-02T18:47:44
2020-06-02T18:47:44
268,785,276
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5312675833702087, "alphanum_fraction": 0.5430985689163208, "avg_line_length": 27.174602508544922, "blob_id": "15c3cbd04bce48c4e168429e96404734bfb0033c", "content_id": "bb82948e73f5a918a95ee76a2e45a710d729b30f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3550, "license_type": "no_license", "max_line_length": 74, "num_lines": 126, "path": "/main_code.py", "repo_name": "pratham-pay/Dockertest", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nfrom datetime import datetime\nimport re\nimport numpy as np\n\n\nVALID_ACCOUNT_TYPES = {\n 123: \"Personal Loan\"\n}\n\ndef parse(json_obj):\n out_dict = {}\n\n for customer_id, account_list in json_obj.items():\n out_list = []\n if account_list:\n account_list = json.loads(account_list)\n if isinstance(account_list, list):\n for account in account_list:\n parsed_account = parse_account(account)\n if parsed_account:\n out_list.append(parsed_account)\n out_dict[customer_id] = out_list\n else:\n out_dict[customer_id] = \"No accounts found\"\n return out_dict\n\ndef get_nper(open_dt, bal_dt):\n nper = 12*(bal_dt.year-open_dt.year) + (bal_dt.month-open_dt.month) -1\n return nper\n\ndef parse_account(account):\n try:\n account_id = account['ACCOUNT_NB']\n account_type = int(account['ACCT_TYPE_CD'])\n open_date = datetime.strptime(account['OPEN_DT'], \"%Y/%m/%d\")\n balance_dt = datetime.strptime(account['BALANCE_DT'], \"%Y/%m/%d\")\n amount = int(account['ORIG_LOAN_AM'])\n except:\n return \"Error in input data\"\n\n if account_type not in VALID_ACCOUNT_TYPES:\n return \"invalid account type\"\n\n total_paid_period = get_nper(open_date, balance_dt)\n\n p1 = re.compile('BALANCE_AM_\\d{2}')\n bal_keys = sorted(list(filter(p1.match, account.keys())))\n\n p2 = re.compile('DAYS_PAST_DUE_\\d{2}')\n dpd_keys = sorted(list(filter(p2.match, account.keys())))\n\n bal_array = []\n\n for b, d in zip(bal_keys, dpd_keys):\n \n if not pd.isna(account[d]) and account[d] == 0:\n bal = account[b]\n if not pd.isna(bal) and bal > 0 and bal < amount:\n per = total_paid_period - int(b[-2:]) + 1\n bal_array.append([bal, per])\n \n if len(bal_array) > 0:\n ret = calc_emi(open_date, amount, bal_array)\n if ret:\n parsed_account = {}\n parsed_account['account_id'] = str(account_id)\n rate, tenure, emi = ret\n parsed_account['rate'] = float(rate)\n parsed_account['tenure'] = int(tenure)\n parsed_account['emi']= int(emi)\n return parsed_account\n else:\n return \"Model returned no values\"\n else:\n return \"Insufficient data for this account\"\n\n\ndef calc_emi(open_date, amount, bal):\n np_rates = np.arange(10, 40, 0.5) \n np_tenure = np.arange(1, 60, 1)\n \n rt_pairs = [(r,t) for r in np_rates for t in np_tenure]\n \n new_bal_array = []\n \n for tup in bal:\n balance = tup[0]\n nper = tup[1]\n \n new_bal_array.append((balance, nper))\n \n def multiproc(pair):\n r,t = pair\n nper_ = [1 if y>t else 0 for x,y in new_bal_array]\n \n if any(nper_) > 0:\n return None\n \n rate = r/1200\n emi = -np.pmt(rate, t, amount)\n diff=0\n\n for tup in new_bal_array:\n bal, nper = tup\n calc_balance = np.fv(rate, nper, emi, -amount)\n diff = diff+ (bal-calc_balance)**2\n\n balance_diff = np.sqrt(diff/len(new_bal_array))\n return balance_diff\n \n res = map(multiproc, rt_pairs)\n \n min_diff = float('inf')\n \n for tup, diff in zip(rt_pairs, res):\n if diff and diff < min_diff:\n min_diff = diff\n r,t=tup\n\n if r and t:\n emi = -np.pmt(r/1200, t, amount)\n return (r,t,emi)\n else:\n None\n" }, { "alpha_fraction": 0.7080292105674744, "alphanum_fraction": 0.7299270033836365, "avg_line_length": 21.83333396911621, "blob_id": "f16e5c58042d6ca09f0e002b05fe4326cfaa59fa", "content_id": "9b6088b8e72043c00bb7e1f2d5b075236ccc7404", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 137, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/Dockerfile", "repo_name": "pratham-pay/Dockertest", "src_encoding": "UTF-8", "text": "FROM python:3.7\nCOPY requirements.txt /\nRUN pip install -r /requirements.txt\nCOPY . /app\nWORKDIR /app\nCMd [\"python\", \"prodcode_api2.py\"]\n" }, { "alpha_fraction": 0.6114285588264465, "alphanum_fraction": 0.6399999856948853, "avg_line_length": 19.58823585510254, "blob_id": "a3c903cd7f0f2a78928f7f82d86f9c2a9bc95fbb", "content_id": "5f5e1937b585d53a53499d8cb155d46ac576d2b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/api_code.py", "repo_name": "pratham-pay/Dockertest", "src_encoding": "UTF-8", "text": "import flask\nfrom flask import request\n\nfrom main_code import parse\n\napp= flask.Flask(__name__)\napp.config[\"DEBUG\"]=True\n\[email protected]('/', methods=['POST', 'GET'])\ndef api_method():\n if request.is_json:\n return parse(request.get_json())\n else:\n print(request)\n return \"Input not right\"\n\napp.run(host='127.0.0.1', port=8000)\n" } ]
3
thesteady/myprojecteuler
https://github.com/thesteady/myprojecteuler
b9f653d4825c1a4c860be82a46733ec84bd9bf19
95939c9e9aaf7fb59dbf248c1b790b11667b8d86
b13f58313b2f961a2f80e6480fe19ce17ab02266
refs/heads/master
2020-06-06T10:48:18.758857
2012-11-26T17:29:46
2012-11-26T17:29:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6733466982841492, "alphanum_fraction": 0.7174348831176758, "avg_line_length": 51.578948974609375, "blob_id": "668806e0970031b1fcae2b9feea1d0137eb412e4", "content_id": "c1a407d6756b4637b76950b87b555d08b65e01bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "no_license", "max_line_length": 141, "num_lines": 19, "path": "/peuler6.py", "repo_name": "thesteady/myprojecteuler", "src_encoding": "UTF-8", "text": "#Project Euler, Problem 6 \"http://projecteuler.net/problem=6\":\n#The sum of the squares of the first ten natural numbers is 12 + 22 + ... + 102 = 385\n#The square of the sum of the first ten natural numbers is (1 + 2 + ... + 10)2 = 552 = 3025\n#Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640.\n#Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\n#Declare empty variables so I know what variables I have:\nx=0\ny=0\nans=0\n#for all integers in range, add them appropriately to the sum of squares(x) or sum (y)\nfor i in range(1,101):\n #add the squared value of the integer to the previous ones\n x=x + i**2\n #add the value to the previous values\n y=y+i\n#Find the answer to the question:\nans= (y**2)-x\nprint \"The difference between the sum of the squares of the first one hundred natural numbers and the square of the sum is \" + str(ans) + \".\"" }, { "alpha_fraction": 0.6202783584594727, "alphanum_fraction": 0.6878727674484253, "avg_line_length": 37.769229888916016, "blob_id": "f1a7b9c00f2e5bc4729fed121e1f2dc2c8ca9e9f", "content_id": "dbd638f3d197bac8f115bb20d82c15e2c351a628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 503, "license_type": "no_license", "max_line_length": 96, "num_lines": 13, "path": "/peuler1.py", "repo_name": "thesteady/myprojecteuler", "src_encoding": "UTF-8", "text": "#Project Euler problem 1 \"http://projecteuler.net/problem=1\":\n#If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.\n#The sum of these multiples is 23.\n#Find the sum of all the multiples of 3 or 5 below 1000.\n\n#Declare empty variable to hold sum:\nnum=0\n#For all numbers below 1000, if the number is a multiple of 3 or 5, add it to others.\nfor i in range(1,1000):\n if i%3 == 0 or i%5 == 0:\n num = num + i\n#Print answer\nprint \"The answer is \" + str(num) + \".\"" }, { "alpha_fraction": 0.6064400672912598, "alphanum_fraction": 0.663685142993927, "avg_line_length": 27, "blob_id": "293d2b7a8f473eeb2a53edd6b0d437f254e2bcc6", "content_id": "6d3d0ed79cb5cfcf0b10d26046da40f40f5e4c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 141, "num_lines": 20, "path": "/peuler2.py", "repo_name": "thesteady/myprojecteuler", "src_encoding": "UTF-8", "text": "#Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:\n#1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n#By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.\n\n#initial variables\nx=1\ny=2\nz=0\nevenNums=2\n\n#start loop\nwhile z <4000000:\n z=x+y\n x=y\n y=z\n #if y is even number, add to variable.\n if y%2==0:\n evenNums+= y\n \nprint \"The sum of even numbers is \" + str(evenNums) + \".\"" } ]
3
Thezap/AdventOfCode
https://github.com/Thezap/AdventOfCode
3af9be0ef39b14d1e6a97fb4300c5ac0cca500a3
0a18872810ab5fc0aa2aa94ece0237baa7f81955
b4b45bd87d1ad38a621381900e334ad0564da872
refs/heads/master
2023-02-12T02:41:12.649948
2020-12-25T17:27:41
2020-12-25T17:27:41
319,792,052
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3768656849861145, "alphanum_fraction": 0.4011194109916687, "avg_line_length": 25.799999237060547, "blob_id": "ca1077cba6c0032c13c4bbe0a236ed862530eafe", "content_id": "089ccadaa5c1868422b273955259de30b3ccf85e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/day8/main.py", "repo_name": "Thezap/AdventOfCode", "src_encoding": "UTF-8", "text": "with open('input') as f:\n lines = [line.rstrip().split(' ') for line in f]\n print(lines)\n stop = False\n pc = 0\n acc = 0\n while not stop:\n if lines[pc][0] == 'YO':\n break\n if lines[pc][0] == 'acc':\n lines[pc][0] = 'YO'\n acc += int(lines[pc][1])\n pc += 1\n elif lines[pc][0] == 'nop':\n lines[pc][0] = 'YO'\n pc += 1\n elif lines[pc][0] == 'jmp':\n lines[pc][0] = 'YO'\n pc += int(lines[pc][1])\n print(acc)\n" }, { "alpha_fraction": 0.4429967403411865, "alphanum_fraction": 0.46254071593284607, "avg_line_length": 28.95121955871582, "blob_id": "086755e2ca1b373689dc7a48d0537550857c87dc", "content_id": "7ff668243cacc23406fa92862e160ec4eadb7466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 66, "num_lines": 41, "path": "/day8/main2.py", "repo_name": "Thezap/AdventOfCode", "src_encoding": "UTF-8", "text": "import copy\n\nwith open('input2') as f:\n lines_raw = [line.rstrip().split(' ') + [False] for line in f]\n lines = copy.deepcopy(lines_raw)\n print(lines)\n stop = False\n pc = 0\n acc = 0\n\n modif_instruction_line = 0\n while not stop:\n if pc == len(lines) - 1:\n print('WIN')\n print(lines)\n print(pc)\n break\n if lines[pc][2] == True:\n lines = copy.deepcopy(lines_raw)\n while lines[modif_instruction_line][0] == 'acc':\n modif_instruction_line += 1\n if lines[modif_instruction_line][0] == 'jmp':\n lines[modif_instruction_line][0] = 'nop'\n else:\n lines[modif_instruction_line][0] = 'jmp'\n modif_instruction_line += 1\n pc = 0\n acc = 0\n print('New Try ', modif_instruction_line)\n if lines[pc][0] == 'acc':\n lines[pc][2] = True\n acc += int(lines[pc][1])\n pc += 1\n elif lines[pc][0] == 'nop':\n lines[pc][2] = True\n pc += 1\n elif lines[pc][0] == 'jmp':\n lines[pc][2] = True\n pc += int(lines[pc][1])\n print(lines)\n print(acc)\n" }, { "alpha_fraction": 0.33879780769348145, "alphanum_fraction": 0.3639344274997711, "avg_line_length": 25.171428680419922, "blob_id": "03750b07ccc73ac37cb8a1b46ae449ef2232477a", "content_id": "02aeb01dfce8b0d7a43de18168dff58a266a008d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "no_license", "max_line_length": 72, "num_lines": 35, "path": "/day9/main.py", "repo_name": "Thezap/AdventOfCode", "src_encoding": "UTF-8", "text": "import copy\nimport os\nimport sys\n\nwith open('input') as f:\n lines_raw = [[int(line.rstrip())] + [False] for line in f]\n lines = copy.deepcopy(lines_raw)\n print(lines)\n i = 4\n while i < len(lines) - 1:\n print(lines[i])\n i += 1\n target_sum = lines[i][0]\n\n x = i - 25\n y = i - 25\n while x < i:\n if lines[x][1]:\n x += 1\n continue\n while y < i:\n if y == x:\n y += 1\n continue\n if lines[y][1]:\n y += 1\n continue\n if lines[x][0] + lines[y][0] == target_sum:\n print(\"Found\", lines[x][0], lines[y][0], target_sum)\n lines[x][1] = True\n lines[y][1] = True\n y += 1\n y = i - 25\n x += 1\n print(lines[i])" } ]
3
Aaryan-8684/my_python_project
https://github.com/Aaryan-8684/my_python_project
6b749d9b578f79cfb1aaa6ca896eec48a19beca8
9e1f5954cfd7886e60be8c58218ed89004fb951d
dcac1336beb35c3d8e8cf9dfcb909cc054136dbe
refs/heads/main
2023-02-05T02:21:28.329075
2020-12-31T08:58:07
2020-12-31T08:58:07
310,375,757
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6487758755683899, "alphanum_fraction": 0.6813559532165527, "avg_line_length": 39.227272033691406, "blob_id": "9cf84b64c3bed050cdff6b5d9f47259e9b9ae692", "content_id": "96ed6554612dc2a91d312e19d7fd158bbd8cc91b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5310, "license_type": "no_license", "max_line_length": 107, "num_lines": 132, "path": "/my_project.py", "repo_name": "Aaryan-8684/my_python_project", "src_encoding": "UTF-8", "text": "\"\"\"\nFilename: my_project.py\nUsage: This script will measure dimension(in cm) of different objects in the frame\nusing a reference object of known dimension.\nThe object with known dimension must be the leftmost object.\nAuthor: Aryan Gupta\n\"\"\"\n# Import necessary packages...\nfrom scipy.spatial.distance import euclidean\nfrom imutils import perspective\nfrom imutils import contours\nimport numpy as np\nimport imutils\nimport cv2\n\n# midpoint method will compute the midpoint between two set of (x,y) coordinate...\ndef midpoint(ptA, ptB):\n return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\n# img_path variable will hold the path of our image and\n# load the input image and display the sample3.jpg...\nimg_path = \"sample4.jpg\"\nimage = cv2.imread(img_path)\ncv2.imshow(\"Input image\", image)\ncv2.waitKey(0)\n\n\n# convert image to grayscale, and blur(smooth) it slightly to remove noise...\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (3, 3), 0)\ncv2.imshow(\"Blurred image\", gray)\ncv2.waitKey(0)\n\n# perform edge detection, then perform a dilation + erosion to\n# close gaps in between object edges in the edge map...\nedged = cv2.Canny(gray, 50, 100)\n\ncv2.imshow(\"canny image\", edged)\ncv2.waitKey(0)\nedged = cv2.dilate(edged, None, iterations=1)\ncv2.imshow(\"dilate image\", edged)\ncv2.waitKey(0)\nedged = cv2.erode(edged, None, iterations=1)\ncv2.imshow(\"erode image\", edged)\ncv2.waitKey(0)\n\n# find contours(i.e, outlines) that corresponds to the objects in our edge map...\nitems = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(items)\n\n# sort the contours from left-to-right (hepls us to ditact our reference object) and\n# initialize the 'pixels_Per_cm' calibration variable to None...\n(cnts, _) = contours.sort_contours(cnts)\npixels_Per_cm = None\n\n# loop over the contours individually...\nfor c in cnts:\n# if the contour is not sufficiently large, discard it...\n if cv2.contourArea(c) < 100:\n continue\n rect = cv2.minAreaRect(c)\n BoxPoints = cv2.boxPoints(rect)\n BoxPoints = np.array(BoxPoints, dtype=\"int\")\n\n\t# arrange our rotated bounding rectangle coordinates such that they appear\n\t# in top-left, top-right, bottom-right, and bottom-left order(i.e, clockwise)...\n BoxPoints = perspective.order_points(BoxPoints)\n\n # draw the outline of the object in black colour...\n cv2.drawContours(image, [BoxPoints.astype(\"int\")], -1, (0, 0, 0), 2)\n\n\t# This line will draw small red circles as a vertices of the bounding rectangular BoxPoints\n # by looping over the original points...\n for (x, y) in BoxPoints:\n cv2.circle(image, (int(x), int(y)), 5, (0, 0, 255), -1, cv2.FILLED)\n\n \t# unpack the ordered bounding BoxPoints, then compute the midpoint\n\t# between the top-left and top-right coordinates, followed by\n\t# the midpoint between bottom-left and bottom-right coordinates...\n (tl, tr, br, bl) = BoxPoints\n (tltrX, tltrY) = midpoint(tl, tr)\n (blbrX, blbrY) = midpoint(bl, br)\n\n\t# also compute the midpoint between the top-left + bottom-left points,\n\t# followed by the midpoint between the top-right and bottom-right...\n (tlblX, tlblY) = midpoint(tl, bl)\n (trbrX, trbrY) = midpoint(tr, br)\n\n\t# it will draw the blue midpoints on our image...\n cv2.circle(image, (int(tltrX), int(tltrY)), 5, (89, 33, 32), -1, cv2.FILLED)\n cv2.circle(image, (int(blbrX), int(blbrY)), 5, (89, 33, 32), -1, cv2.FILLED)\n cv2.circle(image, (int(tlblX), int(tlblY)), 5, (89, 33, 32), -1, cv2.FILLED)\n cv2.circle(image, (int(trbrX), int(trbrY)), 5, (89, 33, 32), -1, cv2.FILLED)\n # cv2.imshow(\"image image\", image)\n # cv2.waitKey(0)\n\n\t# it will connect the blue midpoint by drawing pink lines between the midpoints...\n cv2.line(image, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 1) # Vertical line\n cv2.line(image, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 1) # Horizontal line\n\n\t# compute the Euclidean distance between the blue midpoints,\n # dA variable will contain the height distance in pixels and\n # dB will hold width distance in pixels\n dA = euclidean((tltrX, tltrY), (blbrX, blbrY))\n dB = euclidean((tlblX, tlblY), (trbrX, trbrY))\n\n # here we make a check on Line 101 to see\n\t# if our pixels_Per_cm variable has not been initialized, then\n\t# compute it as the ratio of pixels to supplied metric\n\t# (in this case, cm)...\n if pixels_Per_cm is None:\n width_in_cm = 3\n pixels_Per_cm = dB / width_in_cm\n \"\"\"\n pixels_Per_cm = object_width / known_width.\n The square Box has a known_width 2.2cm.\n \"\"\"\n\t# compute the dimension of object(in cm) by\n # dividing the respective euclidean distance by the pixels_Per_cm value...\n dimA = dA / pixels_Per_cm\n dimB = dB / pixels_Per_cm\n\n # draw the dimension of objects in brown color on our output image...\n cv2.putText(image, \"{:.1f}cm\".format(dimA), (int(tltrX - 15), int(tltrY -10)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (33, 67, 101), 1)\n cv2.putText(image, \"{:.1f}cm\".format(dimB), (int(trbrX + 10), int(trbrY)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (33, 67, 101), 1)\n\n # display the output image...\n cv2.imshow(\"Output image\", image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n" } ]
1
sfuerte/zbx-elastic
https://github.com/sfuerte/zbx-elastic
ec71ce431bba7d44a2b5839e11e72e39d6dabdfb
437da72f23181380a6c9b30909b0ef19312d05c8
73831b278277448d8675ae8d5fffeb256065ee76
refs/heads/master
2020-03-29T17:24:05.422524
2018-09-24T20:48:00
2018-09-24T20:48:00
150,160,474
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6201353669166565, "alphanum_fraction": 0.6319797039031982, "avg_line_length": 22.396039962768555, "blob_id": "e4ee812093c7ed435a951a513e2d5afe697e0f52", "content_id": "31743d5114dd389efa689c6c7ec6fc227056eb86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2364, "license_type": "permissive", "max_line_length": 73, "num_lines": 101, "path": "/agent/elastic.py", "repo_name": "sfuerte/zbx-elastic", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os\nimport sys\nimport json\nimport urllib2\nimport time\nimport errno\n\nttl = 60\n\nstats = {\n\t'cluster': 'http://{0}:{1}/_cluster/stats',\n\t'nodes': 'http://{0}:{1}/_nodes/stats',\n\t'indices': 'http://{0}:{1}/_stats',\n\t'health': 'http://{0}:{1}/_cluster/health'\n}\n\n\ndef created_file(name):\n\ttry:\n\t\tfd = os.open(name, os.O_WRONLY | os.O_CREAT | os.O_EXCL)\n\t\tos.close(fd)\n\t\treturn True\n\texcept OSError, e:\n\t\tif e.errno == errno.EEXIST:\n\t\t\treturn False\n\t\traise\n\n\ndef is_older_then(name, ttl):\n\tage = time.time() - os.path.getmtime(name)\n\treturn age > ttl\n\ndef get_cache(eshost, esport, api):\n\tcache = '/tmp/elastizabbix-{0}.json'.format(api)\n\tlock = '/tmp/elastizabbix-{0}.lock'.format(api)\n\tshould_update = (not os.path.exists(cache)) or is_older_then(cache, ttl)\n\tif should_update and created_file(lock):\n\t\ttry:\n\t\t\td = urllib2.urlopen(stats[api].format(eshost, esport)).read()\n\t\t\twith open(cache, 'w') as f:\n\t\t\t\tf.write(d)\n\t\texcept Exception as e:\n\t\t\tpass\n\t\tif os.path.exists(lock):\n\t\t\tos.remove(lock)\n\tif os.path.exists(lock) and is_older_then(lock, 300):\n\t\tos.remove(lock)\n\tret_data = {}\n\ttry:\n\t\twith open(cache) as data_file:\n\t\t\tret_data = json.load(data_file)\n\texcept Exception as e:\n\t\tret_data = json.loads(\n\t\t\turllib2.urlopen(stats[api].format(eshost, esport)).read())\n\treturn ret_data\n\n\ndef get_stat(eshost, esport, api, stat):\n\td = get_cache(eshost, esport, api)\n\tkeys = []\n\tfor i in stat.split('.'):\n\t\tkeys.append(i)\n\t\tkey = '.'.join(keys)\n\t\tif key in d:\n\t\t\td = d.get(key)\n\t\t\tkeys = []\n\treturn d\n\n\ndef discover_nodes(eshost, esport):\n\td = {'data': []}\n\tfor k, v in get_stat(eshost, esport, 'nodes', 'nodes').iteritems():\n\t\td['data'].append({'{#NAME}': v['name'], '{#NODE}': k})\n\treturn json.dumps(d)\n\ndef discover_indices(eshost, esport):\n\td = {'data': []}\n\tfor k, v in get_stat(eshost, esport, 'indices', 'indices').iteritems():\n\t\td['data'].append({'{#NAME}': k})\n\treturn json.dumps(d)\n\n\nif __name__ == '__main__':\n\tapi = sys.argv[1]\n\tstat = sys.argv[2]\n\teshost = sys.argv[3] if len(sys.argv) > 3 else \"localhost\"\n\tesport = sys.argv[4] if len(sys.argv) > 4 else \"9200\"\n\n\tif api == 'discover':\n\t\tif stat == 'nodes':\n\t\t\tprint discover_nodes(eshost, esport)\n\t\tif stat == 'indices':\n\t\t\tprint discover_indices(eshost, esport)\n\n\telse:\n\t\tstat = get_stat(eshost, esport, api, stat)\n\t\tif isinstance(stat, dict):\n\t\t\tprint ''\n\t\telse:\n\t\t\tprint stat\n\n" } ]
1
jpatts/DOTA2-predictor
https://github.com/jpatts/DOTA2-predictor
029b7748ef3922698a671b2adfca93e80225b52a
34b6aacc3cc527b2f3ace4e92bbb84c60545188a
f0dbeb76a8a580031007f1c8c542c8bf058cb8ca
refs/heads/master
2020-03-09T08:13:02.522442
2018-04-19T17:28:10
2018-04-19T17:28:10
128,683,732
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5875810980796814, "alphanum_fraction": 0.6218721270561218, "avg_line_length": 26.423728942871094, "blob_id": "3c68d06fbf6a2b1f037ac4d34449e50ea81f4d10", "content_id": "7149520940875bced671adc8288ae17dc5eb07f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3237, "license_type": "no_license", "max_line_length": 702, "num_lines": 118, "path": "/utils/preprocessing.py", "repo_name": "jpatts/DOTA2-predictor", "src_encoding": "UTF-8", "text": "\nimport requests, json, os\nimport pandas as pd\n\nfrom random import randint\n\n\ndef get_data(normalize=False):\n '''\n Author: Jordan Patterson\n\n Function to set parameters on data and get it\n\n Parameters\n ----------\n\n Optional\n ---------\n normalize: boolean\n Whether data to be returned should be normalized or not\n \n Returns\n ----------\n data: Object\n Contains the data requested ('match_id', 'win', 'estimate', 'score')\n\n '''\n\n # specify number of matches to parse\n matches = 50000\n # find most recent matches\n max_match = call_api('https://api.opendota.com/api/explorer?sql=SELECT%0Amax(match_id)%0AFROM%20matches')['max'][0]\n\n # get N matches in range < match_id\n data = call_api('https://api.opendota.com/api/explorer?sql=SELECT%0Amatches.match_id%2C%0A((player_matches.player_slot%20%3C%20128)%20%3D%20matches.radiant_win)%20win%2C%0Ammr_estimates.estimate%2C%0Ahero_ranking.score%0AFROM%20matches%0AJOIN%20player_matches%20using(match_id)%0AJOIN%20heroes%20on%20heroes.id%20%3D%20player_matches.hero_id%0AJOIN%20mmr_estimates%20on%20mmr_estimates.account_id%20%3D%20player_matches.account_id%0AJOIN%20hero_ranking%20on%20(hero_ranking.hero_id%20%3D%20heroes.id%20and%20hero_ranking.account_id%20%3D%20player_matches.account_id)%0AWHERE%20match_id%20%3C%20' + str(max_match) + '%0AORDER%20BY%20matches.match_id%20DESC%20NULLS%20LAST%0ALIMIT%20' + str(matches))\n\n data.to_csv(\"openDotaNew.csv\")\n\n # normalize data\n if normalize:\n data = (data - data.min()) / (data.max() - data.min())\n \n return data\n\n \ndef call_api(api):\n '''\n Author: Jordan Patterson\n\n Function to get data from OpenDota api\n\n Parameters\n ----------\n api: string\n The api being called\n \n Returns\n ----------\n data: Object\n Contains the data requested in pandas table\n\n '''\n\n # call endpoint\n r = requests.get(api)\n\n # ensure successful call\n while r.status_code != 200:\n print('Failed to access OpenDota servers: try decreasing number of matches')\n r = requests.get(api)\n\n # convert to json\n json = r.json()\n\n # put data in pandas format after parsing JSON\n return pd.DataFrame(json['rows'], columns=[x['name'] for x in json['fields']])\n\n \ndef read_csv(name, dtypes, normalize=False):\n '''\n Author: Jordan Patterson\n\n Function to search csv files for desired information\n\n Parameters\n ----------\n name: string\n Name/path of the .csv file being parsed\n \n dtypes: dictionary\n Key / value pair specifying type of each column/header\n \n Optional\n ---------\n normalize: boolean\n Whether data to be returned should be normalized or not\n\n Returns\n ----------\n data: Object\n Contains the data requested\n\n '''\n\n # ensures valid .csv file was passed\n if os.path.isfile(name) and name[name.rfind('.'):] == '.csv':\n \n # reads .csv into table\n data = pd.read_csv(name, dtype=dtypes)\n\n # normalize data\n if normalize:\n data = (data - data.min()) / (data.max() - data.min())\n\n return data\n\n else:\n print(\"Error: invalid .csv file\")\n sys.exit(0)\n" }, { "alpha_fraction": 0.7537523508071899, "alphanum_fraction": 0.7593808770179749, "avg_line_length": 32.3125, "blob_id": "e7195db1e0084a2cdf7353f9d99441b3f9ebcbb2", "content_id": "7256625d4cef6e1f16a78b29f91fc71a6767ab72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2132, "license_type": "no_license", "max_line_length": 179, "num_lines": 64, "path": "/README.md", "repo_name": "jpatts/DOTA2-predictor", "src_encoding": "UTF-8", "text": "# Description\n\nUses scikit-learns random forest classifier to predict the outcome of a DOTA2 match before it has begun\n\nTo run:\n\n```\npython3 prediction.py\n```\n\nOptional arguments:\n\n```\npython3 prediction.py --new\n```\n\nThis specifies to train on new data from OpenDota, rather than the default data in openDota.csv\n\nHowever, testing will then be done on openDota.csv to avoid bias (if training is done on openDota.csv, testing is done on new OpenDota data)\n\n\n#### Requirements\n\n* a working Python 3.6 development environment\n* [pip3](https://pip.pypa.io/en/latest/installing.html) to install Python dependencies (must be latest version -> pip install --upgrade pip)\n* [pipenv](https://github.com/pypa/pipenv) to manage dependencies\n\n#### Pipfile Requirements\n\n* [requests](http://docs.python-requests.org/en/master/) to get the data\n* [pandas](https://pandas.pydata.org/pandas-docs/stable/install.html) to parse data\n* [scikit-learn](http://pytorch.org/) for the random regression classifier\n* [tqdm](https://pypi.python.org/pypi/tqdm) to view progress throughout runtime\n\npipenv will install all of the Pipfile required packages.\n\nTo do so, run the following command:\n```\npipenv install\n```\n\n#### Dataset\n\nUses the [OpenDota](https://docs.opendota.com/) API\n\nGets 'match_id', 'win', 'estimate', 'score' with an api call\n\nA valid match will have 10 identical match_ids, as each represents a players data in the match\n\nprediction.py uses 'win', 'estimate', and 'score' for each player, grouping players on a team by whether they won/lost in a given match\n\n'win' is True/False\n\n'estimate' is an estimation of the players MMR\n\n'score' is determined by OpenDota, based on how good a player has been playing the past several games\n\n#### Results\n\nTraining on openDota.csv and testing on new data from the OpenDota server, accuracy is consistently ~69%\n\nTraining on new data from the OpenDota server and testing on openDota.csv, accuracy is consistently ~65%\n\nUnfortunately, we were unable to test on the new OpenDota data with the OpenDota trained model, as we could not find enough complete and different matches to get reliable accuracy\n" }, { "alpha_fraction": 0.5761759281158447, "alphanum_fraction": 0.5882164239883423, "avg_line_length": 25.84482765197754, "blob_id": "4b74f42a0f56790816f5abda41265f8168cce914", "content_id": "5a95b4dc56e3785d8a72bb15361331f9781c8b54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6229, "license_type": "no_license", "max_line_length": 105, "num_lines": 232, "path": "/prediction.py", "repo_name": "jpatts/DOTA2-predictor", "src_encoding": "UTF-8", "text": "\nimport os, argparse\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\n\nfrom utils.preprocessing import get_data, read_csv\n\n\ndef process(data):\n '''\n Author: Jordan Patterson\n\n Function to format pandas data into two datasets, data and labels\n\n Parameters\n ----------\n data: Object\n Contains the data parsed from the OpenDota api\n\n Returns\n ----------\n data: npArray\n Shape (N, 20), where N is number of matches, and 20 is 10 players 'estimate' and 'score' data\n\n labels: npArray\n Shape (N,) where N in number of matches, and contains 1.0 if first team won, and 0.0 if they lost\n\n '''\n\n matches_win = {}\n matches_lose = {}\n # loops through .csv file\n for row in tqdm(range(len(data))):\n # sets match_id column to key, and data type to list\n key = data.iloc[row, 0]\n matches_win.setdefault(key, [])\n matches_lose.setdefault(key, [])\n # group data based on outcome\n if data.iloc[row, 1] == 1.0:\n matches_win[key].insert(0, data.iloc[row, 3])\n matches_win[key].insert(0, data.iloc[row, 2])\n matches_lose[key].append(data.iloc[row, 2])\n matches_lose[key].append(data.iloc[row, 3])\n else:\n matches_lose[key].insert(0, data.iloc[row, 3])\n matches_lose[key].insert(0, data.iloc[row, 2])\n matches_win[key].append(data.iloc[row, 2])\n matches_win[key].append(data.iloc[row, 3])\n\n # loop through all matches\n for match in list(matches_win.keys()):\n # remove incomplete matches (less than 10 players -> 10*2 features)\n if len(matches_win[match]) != 20 or len(matches_lose[match]) != 20:\n del matches_win[match]\n del matches_lose[match]\n\n # create labels\n labels_win = np.ones(np.asarray(list(matches_win)).shape[0])\n labels_lose = np.zeros(np.asarray(list(matches_lose)).shape[0])\n labels = np.concatenate((labels_win, labels_lose), axis=0)\n # merge match datasets\n data = np.asarray(list(matches_win.values()) + list(matches_lose.values()))\n\n return data, labels\n\n\ndef train(data, labels):\n '''\n Author: Jordan Patterson\n\n Function to train models\n\n Parameters\n ----------\n data: nparray\n Contains the data parsed from the OpenDota api\n\n labels: nparray\n Contains the labels for the parsed data\n\n Returns\n ----------\n model: Object\n The model created with the highest accuracy\n\n '''\n\n # size of train partition\n N = int(len(data) - (0.1 * len(data)))\n # parameters and output placeholders\n min_acc = 100\n max_acc = 0\n avg_acc = 0\n num_epocs = 100\n best_model = RandomForestClassifier().fit(X=data[0:N, :], y=labels[0:N])\n # generates models\n print(\"Beginning training...\")\n for i in tqdm(range(num_epocs)):\n # create array of random values\n s = np.arange(data.shape[0])\n np.random.shuffle(s)\n\n # shuffle data\n labels = labels[s]\n data = data[s]\n\n # train on model\n model = RandomForestClassifier().fit(X=data[0:N, :], y=labels[0:N])\n\n # create test data from data split\n test_data = data[N:, :]\n test_labels = labels[N:]\n\n # get test accuracy\n acc = model.score(test_data, test_labels) * 100\n\n # update output\n if acc > max_acc:\n max_acc = acc\n best_model = model\n if acc < min_acc:\n min_acc = acc\n \n avg_acc += acc\n\n avg_acc /= num_epocs\n\n print(\"Minimum accuracy: \" + str(min_acc)[0:4] + \"%\")\n print(\"Average accuracy: \" + str(avg_acc)[0:4] + \"%\")\n print(\"Best model accuracy: \" + str(max_acc)[0:4] + \"%\")\n return best_model\n\n\ndef test(model, new, dtypes):\n '''\n Author: Jordan Patterson\n\n Function to test live data on a pretrained model\n\n Parameters\n ----------\n model: Object\n Any scikit learn generated model\n\n new: boolean\n Whether we trained on new data from OpenDota or not\n\n dtypes: dictionary\n Key / value pair specifying type of each column/header\n\n '''\n\n print(\"Getting test data...\")\n # don't test on same data we trained on\n if new == True:\n data = read_csv('data/openDota.csv', dtypes, True)\n else:\n data = get_data(True)\n \n print(\"Processing test data...\")\n data, labels = process(data)\n\n print(\"Number of complete matches: \" + str(data.shape[0]))\n\n # create array of random values\n s = np.arange(data.shape[0])\n np.random.shuffle(s)\n\n # shuffle data\n test_labels = labels[s]\n test_data = data[s]\n\n # get test accuracy\n acc = model.score(test_data, test_labels) * 100\n print(\"Test accuracy: \" + str(acc))\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n\n # parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--new', action='store_true')\n args = parser.parse_args()\n\n # tells pandas what data types the columns of the .csv file are\n dtypes = {\n 'match_id': int,\n 'win': bool,\n 'estimate': int,\n 'score': float\n }\n\n # use new data\n if args.new == True:\n print('Using new OpenDota api data')\n print('Gathering data...' )\n # from preprocessing.py\n data = get_data(True)\n \n # defaults to old data\n else:\n print('Using openDota.csv data')\n print('Gathering data...' )\n # get data from csv file\n data = read_csv('data/openDota.csv', dtypes, True)\n\n print('Processing data...')\n # process data into required shapes (N, 20) and (N, )\n data, labels = process(data)\n\n print(\"Number of complete matches: \" + str(data.shape[0]))\n while data.shape[0] < 1000:\n print(\"Not enough matches, trying again\")\n data = get_data(True)\n data, labels = process(data)\n\n best_model = train(data, labels)\n\n print(\"Beginning testing on new data...\")\n test(best_model, args.new, dtypes)\n\n # path to save your model\n open('best_model.pkl', 'w')\n path = os.getcwd() + '/best_model.pkl'\n joblib.dump(best_model, path)\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
3
becer89/resolution_checker
https://github.com/becer89/resolution_checker
9974f9e1d425b4241ec8fec19d347407804d23dd
bafcf97bca8d4974febbad911026dc31f6fc5836
f175ec350f839facd01cb8653d7ed146ae69fc09
refs/heads/master
2020-06-25T00:38:54.996571
2019-07-30T18:01:35
2019-07-30T18:01:35
199,142,491
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5766806602478027, "alphanum_fraction": 0.5997899174690247, "avg_line_length": 31.27118682861328, "blob_id": "46e973c29cba70a5e0fb6894d19519a56b0a5512", "content_id": "020a5ef8566da06ef61d2ae3ef559dc1db964ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 111, "num_lines": 59, "path": "/resolution3.py", "repo_name": "becer89/resolution_checker", "src_encoding": "UTF-8", "text": "import imghdr\nfrom PIL import Image\nimport os\n\ndir_name = 'Z:\\\\1_modeling\\\\Vanguard\\\\VANG0119\\\\Textures\\\\HDPIPELINE\\\\' # directory to check\nresolution_format = 4096 # 4096 for 4k, 2048 for 2k etc.\n\n\ndef correct_resolution(width, height, resolution):\n if width != resolution or height != resolution:\n return False\n return True\n\n\ndef is_png_file(img_file):\n if imghdr.what(img_file) == \"png\":\n return True\n else:\n return False\n\n\ndef get_list_of_files(dir_name):\n list_of_files = os.listdir(dir_name)\n all_files = list()\n for subfolder in list_of_files:\n full_path = os.path.join(dir_name, subfolder)\n if os.path.isdir(full_path):\n all_files = all_files + get_list_of_files(full_path)\n else:\n all_files.append(full_path)\n return all_files\n\n\ndef check_dir(log):\n for i, img_file in enumerate(get_list_of_files(dir_name)):\n img_type = (\".jpg\", \".png\", \".jpx\", \".gif\", \".webp\", \".cr2\", \".tif\", \".bmp\", \".jxr\", \".ico\", \".heic\")\n if not img_file.endswith(img_type):\n continue # skip non-img files\n img = Image.open(img_file)\n width = img.size[0]\n height = img.size[1]\n is_correct = correct_resolution(width, height, resolution_format)\n if is_correct is False or is_png_file is False:\n print(\"| %147s | %4s %1s %4s | %18s | %16s |\" %\n (img_file, width, \"x\", height, is_correct, is_png_file(img_file)))\n log.write('{}\\t{}x{}\\t{}\\t{}\\n'.format(img_file, width, height, is_correct, is_png_file(img_file)))\n\n\ntable_width = 205\nprint(\"-\" * table_width)\nprint(\"| %147s | %11s | %18s | %16s |\" % (\"Path\", \"Resolution\", \"Correct Resolution\", \"Is PNG File\"))\nprint(\"*\" * table_width)\nlog = open('log.tsv', 'w')\nlog.write('Path\\tResolution\\tCorrect Resolution\\tIs PNG File\\n')\ncheck_dir(log)\n\n\nlog.close()\nprint(\"-\" * table_width)\n" } ]
1
jesseyu0323/python-challenge
https://github.com/jesseyu0323/python-challenge
8d73d5ec526f54f969566a377cf2e4c859553db7
42ee49dd500a3a142dece092882f45e10421a94c
a560ae7598b007c1e8ba23330966ad31eeab8b78
refs/heads/master
2020-03-25T19:24:35.001241
2018-08-10T00:06:40
2018-08-10T00:06:40
143,352,570
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6121723651885986, "alphanum_fraction": 0.6183918118476868, "avg_line_length": 35.88524627685547, "blob_id": "c81237ca7dda1438117d5c3fdc5a77c33726fd2c", "content_id": "798aa1bde96b945a92172eb2f94ff6350d97e3c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 118, "num_lines": 61, "path": "/PyBank/main.py", "repo_name": "jesseyu0323/python-challenge", "src_encoding": "UTF-8", "text": "import os\nimport csv\n\n\nwith open(\"budget_data.csv\", 'r') as csvfile:\n\n csvreader = csv.reader(csvfile, delimiter=',')\n\n header = next(csvreader)\n\n total_months = 0 \n total_profits_losses = 0\n first_month = 0\n change_in_profitslosses = 0\n total_change = 0\n average_change = 0\n greatest_increase = 0\n greatest_increase_date = ''\n greatest_decrease = 0\n greatest_decrease_date = ''\n\n for row in csvreader:\n \ttotal_months = total_months + 1\n \ttotal_profits_losses = total_profits_losses + int(row[1])\n \tchange_in_profitslosses = (int(row[1]) - first_month) \n \tfirst_month = int(row[1])\n \ttotal_change = change_in_profitslosses + total_change\n \n \tif change_in_profitslosses > greatest_increase:\n \t\tgreatest_increase = change_in_profitslosses\n \t\tgreatest_increase_date = row[0]\n\n \tif change_in_profitslosses < greatest_decrease:\n \t\tgreatest_decrease = change_in_profitslosses\n \t\tgreatest_decrease_date = row[0]\n\n average_change = total_change/total_months\n\n print()\n print(\"Financial Analysis\")\n print(\"----------------------------\")\n print(\"Total Months: \" + str(total_months))\n print(\"Total Revenue: \" + \"$\" + str(total_profits_losses))\n print(\"Average Change: \" + \"$\" + str(average_change))\n print(\"Greatest Increase in Profits:\" + greatest_increase_date + \" ($\" + str(greatest_increase) + \")\")\n print(\"Greatest Decrease in Profits:\" + greatest_decrease_date + \" ($\" + str(greatest_decrease) + \")\")\n\n with open(\"output_file.txt\" , \"w\") as text_file:\n \ttext_file.write(\"Financial Analysis\")\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"----------------------------\")\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"Total Months: \" + str(total_months))\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"Total Revenue: \" + \"$\" + str(total_profits_losses))\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"Average Change: \" + \"$\" + str(average_change))\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"Greatest Increase in Profits: \" + greatest_increase_date + \" ($\" + str(greatest_increase) + \")\")\n \ttext_file.write(\"\\n\")\n \ttext_file.write(\"Greatest Decrease in Profits: \" + greatest_decrease_date + \" ($\" + str(greatest_decrease) + \")\")\n\n" }, { "alpha_fraction": 0.5107582211494446, "alphanum_fraction": 0.5174180269241333, "avg_line_length": 28.044776916503906, "blob_id": "717d7a9e9dc1884f0025f98ab30eba9d177d36c1", "content_id": "605a680734b7bc29ec836ea1b8d70ede0685a19f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1952, "license_type": "no_license", "max_line_length": 112, "num_lines": 67, "path": "/PyPoll/main.py", "repo_name": "jesseyu0323/python-challenge", "src_encoding": "UTF-8", "text": "import os\nimport csv\n\n\nwith open(\"election_data.csv\", 'r') as csvfile:\n\n csvreader = csv.reader(csvfile, delimiter=',')\n\n header = next(csvreader)\n\n total_votes = 0\n list_of_candidates = []\n x = []\n percent_won = {}\n winner = ''\n most_voted = 0\n\n for row in csvreader:\n \n \ttotal_votes = total_votes + 1\n \tcandidate_name = row[2]\n\n \tif candidate_name not in list_of_candidates:\n \t\tlist_of_candidates.append(candidate_name)\n \t\tpercent_won[candidate_name] = 1\n \telse:\n \t\tpercent_won[candidate_name] = percent_won[candidate_name] + 1\n\t\n print(\"Election Results\")\n print(\"---------------------------\")\n print(\"Total Votes \" + str(total_votes))\n print(\"---------------------------\")\n \n for candidate in percent_won:\n \tnumber_of_votes = percent_won.get(candidate)\n \tvote_percentage = number_of_votes / total_votes * 100\n\n \tif (number_of_votes > most_voted):\n \t\tmost_voted = number_of_votes\n \t\twinner = candidate\n \t\n \tx.append(candidate + \": \" + \"{0:.3f}\".format(vote_percentage) + \"%\" + \" \" \"(\" + str(number_of_votes) + \")\")\n \tprint(candidate + \": \" + \"{0:.3f}\".format(vote_percentage) + \"%\" + \" \" \"(\" + str(number_of_votes) + \")\")\n \n print(\"---------------------------\")\n print(\"Winner: \" + winner)\n print(\"---------------------------\")\n\nwith open(\"output_file.txt\" , \"w\") as text_file:\n text_file.write(\"Election Results\")\n text_file.write(\"\\n\")\n text_file.write(\"---------------------------\")\n text_file.write(\"\\n\")\n text_file.write(\"Total Votes \" + str(total_votes))\n text_file.write(\"\\n\")\n text_file.write(\"---------------------------\")\n for xx in x:\n \tprint()\n \ttext_file.write(\"\\n\")\n \ttext_file.write(xx)\n\n text_file.write(\"\\n\")\n text_file.write(\"---------------------------\")\n text_file.write(\"\\n\")\n text_file.write(\"Winner: \" + winner)\n text_file.write(\"\\n\")\n text_file.write(\"---------------------------\")\n \n\n" } ]
2
dyarnall/python_immersive
https://github.com/dyarnall/python_immersive
f138af0c3d178764f280ae85c23aed71bfdf7253
0af616060bc28acd2cfff65af78e7749dc367e54
c2492bf331c40837dc4fed0a5514298800a18104
refs/heads/master
2020-03-24T09:00:39.356391
2018-07-29T21:26:33
2018-07-29T21:26:33
142,615,979
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6320000290870667, "alphanum_fraction": 0.6520000100135803, "avg_line_length": 25.263158798217773, "blob_id": "aeb10b4204bb8152e2b0809a670a690786a16158", "content_id": "a59117ae6a13a15f0756f7d4ea6626b64fc05a36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/class_one_ex_9.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "user_word = input(\"Please enter a word: \")\nuser_word = user_word.lower()\n# word_list1 = []\n# word_list2 = []\n\n# for char in user_word:\n# \tword_list1.append(char)\n# for char in user_word[-1::-1]:\n# \tword_list2.append(char)\n# if word_list1 == word_list2:\n# \tprint(user_word, \"is a palindrome.\")\n# else:\n# \tprint(user_word, \"is not a palindrome.\")\n\nbackwards_word = user_word[-1::-1]\nif user_word == backwards_word:\n\tprint(user_word, \"is a palindrome.\")\nelse:\n\tprint(user_word, \"is not a palindrome.\")\n\n" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.6511628031730652, "avg_line_length": 16.399999618530273, "blob_id": "f44f1bb851be412733208211ec3a4a6c9646332b", "content_id": "9fee1f1a68c44db333821f2232810c981c642347", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/built_in_functions.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "alist = [1,2,3,4,5]\nblist = \"apple\"\n\nzip_list = list(zip(alist,blist))\nprint(zip_list)" }, { "alpha_fraction": 0.6022099256515503, "alphanum_fraction": 0.6353591084480286, "avg_line_length": 20.352941513061523, "blob_id": "37234a345c09fee6ccd5151efb26a3f02adca9e0", "content_id": "409f32748d4bd9d0409f4459ce6c69ae8884ba2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 52, "num_lines": 17, "path": "/map.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "alist = [1,2,3,4,5]\nblist = [1,2,3,4,5]\n\ndef add_one(item):\n\t#item = item + 1\n\treturn item + 1\n\nnew_list = list(map(add_one,alist))\nprint(\"new list\", new_list,\"\\nold list\", alist)\n\ndef add_lists(a,b):\n\t# a + b\n\treturn a + b\n\n#new_new_list = list(map(add_lists,alist,blist))\nnew_new_list = list(map(lambda a,b:a+b,alist,blist))\nprint(\"new new list\", new_new_list)" }, { "alpha_fraction": 0.5173454284667969, "alphanum_fraction": 0.5716440677642822, "avg_line_length": 17.44444465637207, "blob_id": "935794623eda621f1f29f131e0fd5514f592834c", "content_id": "7d0b38e1342329a01e8ae72c0aca92d97cfdf956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 50, "num_lines": 36, "path": "/anagrams.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# word1 = input(\"enter word 1: \")\n# word2 = input(\"enter word 2: \")\n# d1 = {}\n# d2 = {}\n\n# for i in word1:\n# \td1[i] = d1.get(i,1) + 1\n# for i in word2:\n# \td2[i] = d2.get(i,1) + 1\n# if d1 == d2:\n# \tprint(word1, \"and\", word2, \"are anagrams.\")\n# else:\n# \tprint(word1, \"and\", word2, \"are not anagrams.\")\n\n\ndef counter(word):\n\td = {}\n\tfor i in word:\n\t\td[i] = d.get(i,1) + 1\n\treturn d\n\t\ndef anagrams():\n\tword1 = user_input()\n\tword2 = user_input()\n\n\td1 = counter(word1)\n\td2 = counter(word2)\n\n\tif d1 == d2:\n\tprint(word1, \"and\", word2, \"are anagrams.\")\n\telse:\n\tprint(word1, \"and\", word2, \"are not anagrams.\")\n\ndef user_input():\n\tword = input(\"enter a word: \")\n\treturn word" }, { "alpha_fraction": 0.6063829660415649, "alphanum_fraction": 0.6329787373542786, "avg_line_length": 20.823530197143555, "blob_id": "c04c1546aa1ab9ffef649b9b52afd99d32fc14a0", "content_id": "7f3eaf7b6d504f40b55546f6045a22a5ee2e1536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/big_.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "number = int(input(\"enter a number one to seven \"))\nalist = [1,2,3,5,6,7,8]\nend = len(alist)-1\nstart = 0\nflag = False\n\nwhile not flag and start != end:\n\tmiddle_index = (start + end) // 2\n\n\tif number == alist[middle_index]:\n\t\tprint(\"your number is in the list\")\n\t\tflag = True\n\telse:\n\t\tif number < alist[middle_index]:\n\t\t\tend = middle_index\n\t\telse:\n\t\t\tstart = middle_index\t\n\n\n\n\n" }, { "alpha_fraction": 0.5303030014038086, "alphanum_fraction": 0.560606062412262, "avg_line_length": 14.117647171020508, "blob_id": "d0e4a1d0b3ed00741d2801cfe7abe628d36e20db", "content_id": "e703666cbebe402d75ff999bc3dd8693ffb82c4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/class_one_ex_6.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "#star = \"*\"\n#star_range = []\n\n#for var in range(1,6):\n#\tstar_range.append(star * var)\n\n#for var in star_range:\n#\tprint(var)\n\n# for var in star_range[-2::-1]:\n#\tprint(var)\n\nfor var in range (6):\n\tprint(var * \"*\")\n\nfor var in range(6,0,-1):\n\tprint(var * \"*\")\n\n\n\n\n\t\n\n" }, { "alpha_fraction": 0.4871794879436493, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 16.636363983154297, "blob_id": "add91bda6a97f66c4b10deee8f7afae2fa8bd3fe", "content_id": "78f624408d6a22997f96bab71cc149fc9aa0e921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 25, "num_lines": 11, "path": "/class_one_ex_8.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# start_list = []\n\n# for var in range(0,11):\n# \tstart_list.append(var)\n# \tprint(var, end = ' ')\n# print(\"\\n\")\n\nfor i in range(1,11):\n\tfor w in range (1,11):\n\t\tprint(i * w, end=\" \")\n\tprint(\"\\n\")\t\n" }, { "alpha_fraction": 0.6240000128746033, "alphanum_fraction": 0.6240000128746033, "avg_line_length": 24.100000381469727, "blob_id": "f219ee1b79f735d33982652e74528766cab62c86", "content_id": "7a916844245ff07d03d5980810b53eb236cad1c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/class_one_ex_5.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "letter = input(\"Please enter a letter: \")\nletter = letter.lower()\nvowel = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n\nif letter in vowel:\n\tprint(letter, \"is a vowel.\")\nelif letter == \"y\":\n\tprint(letter, \"is sometimes a vowel.\")\nelse:\n\tprint(letter, \"is a consonant.\")" }, { "alpha_fraction": 0.4956217110157013, "alphanum_fraction": 0.5464097857475281, "avg_line_length": 18.724138259887695, "blob_id": "9e896addc3904408f08599cb886b1543e8afd439", "content_id": "f965b723b3199cc59ec0620f33c2c1ba8df2bb67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 48, "num_lines": 29, "path": "/list_comprehensions.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# old_list = [1,2,3,4,2,1,1]\n# new_list = [i for i in old_list if i==1]\n# print(new_list)\n\n# def cond(i):\n# \tif i % 3 == 0 and i % 5 == 0:\n# \t\treturn \"FizzBuzz\"\n# \telif i % 3 == 0:\n# \t\treturn \"Fizz\"\n# \telif i % 5 == 0:\n# \t\treturn \"Buzz\"\n# \telse:\n# \t\treturn i\n\n# new_list = [cond(i) for i in range(1,16)]\n\n# print(new_list)\n\ndef odd_or_even(i):\n\tif i % 10 == 0:\n\t\treturn \"{} is a Multiple of 10\".format(i)\n\telif i % 2 == 0:\n\t\treturn \"{} Is Even\".format(i)\n\telse:\n\t\treturn \"{} Is Odd\".format(i)\n\nnew_list = [odd_or_even(i) for i in range(1,31)]\nfor i in new_list:\n\tprint(i)" }, { "alpha_fraction": 0.634765625, "alphanum_fraction": 0.64453125, "avg_line_length": 23.428571701049805, "blob_id": "032172c6ecb7631dd79e52fcaaf8fb7c52b967b4", "content_id": "9264d44eded7c4cb45e66e878293f852c6f39606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/if_else.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# weather = input(\"What is the weather outside? \")\n# weather = weather.lower()\n\n# if weather == \"sunny\":\n# \tprint(\"Wear sunglasses!\")\n# elif weather == \"rainy\":\n# \tprint(\"Please take your umbrella.\")\n# elif weather == \"chilly\":\n# \tprint(\"Please take a sweater.\")\n# else:\n# \tprint(\"Have a great day!\")\n\n\nage = int(input(\"Please enter your age: \"))\n\nif age < 21 and age > 0:\n\tprint(\"Sorry, you are too young to enter.\")\nelif age >= 21:\n\tprint(\"Welcome to the club!\")\nelse:\n\tprint(\"The age you entered is invalid.\")" }, { "alpha_fraction": 0.7621247172355652, "alphanum_fraction": 0.7806004881858826, "avg_line_length": 47.22222137451172, "blob_id": "76d9d3979c0d9f28ef82b2a95e949cebfc6d47a3", "content_id": "848d4654df01dc476a665c77642a911196f01d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/class_one_ex_4.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "temperature_celsius = float(input(\"What is the temperature Celsius? \"))\ntemperature_fahrenheit = temperature_celsius * 9/5 + 32\n\nif temperature_fahrenheit > 90:\n\tprint(temperature_fahrenheit, \"degrees fahrenheit. There is a heat warning\")\nelif temperature_fahrenheit < 30:\n\tprint(temperature_fahrenheit, \"degrees fahrenheit. There is a cold warning\")\nelse:\n\tprint(temperature_fahrenheit, \"degrees fahrenheit. It is a beautiful day!\")" }, { "alpha_fraction": 0.3970588147640228, "alphanum_fraction": 0.43382352590560913, "avg_line_length": 10.166666984558105, "blob_id": "0f9b783d05b37f0b59aab5c5f08a15976d31bd0e", "content_id": "14a924b531daae490d8770c361ef32289b8dcf05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 22, "num_lines": 12, "path": "/dictionaries.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "d = {}\nword = \"applleeee\"\ncnt = 0\n\nfor i in word:\n\td[i] = d.get(i,1) + 1\n\n# \tif i in d:\n# \t\td[i] += 1\n# \telse:\n# \t\td[i] = 1\t\nprint(d)\n\n\t" }, { "alpha_fraction": 0.5138248801231384, "alphanum_fraction": 0.5276497602462769, "avg_line_length": 13.5, "blob_id": "ab1bf19c23a8892aa0862a7e6609ff3d4563262f", "content_id": "ba9130f94fd70326289bab8d15ddba5aec124e46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/functions.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# def add(a,b):\n# \ttotal = a + b\n# \treturn total\n\n# # print(add(9,1))\n\n# def sub(a,b):\n# \ttotal = a - b\n# \treturn total\n\n# def main(a,b):\n# \tx = add(a,b)\n# \ts = sub(a,b)\n# \tprint(\"I am adding {} and {} and sub the results {} and {}\".format(a,b,x,s))\n\n# main(6,7)\n\n\ndef triangles(n):\n\t'''\n\tThis makes triangles of width n\n\t'''\n\n\n\tfor i in range(n):\n\t\tprint(i * \"*\")\n\tfor i in range(n,0,-1):\n\t\tprint(i * \"*\")\n\ntriangles(int(input(\">\")))" }, { "alpha_fraction": 0.6555891036987305, "alphanum_fraction": 0.6676737070083618, "avg_line_length": 22.714284896850586, "blob_id": "3536bac4e72d24869a959a04f2b8d9d6146d1661", "content_id": "94124127c2eba7a7a59036ca0b13ba8cad74a4d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 55, "num_lines": 14, "path": "/tax_and_tip_calculator.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "cost_of_meal = float(input(\"How much was your meal? \"))\n# print(\"Before\", type(cost_of_meal))\n# cost_of_meal = float(cost_of_meal)\n# print(\"After\", type(cost_of_meal))\n\ntax_rate = .08\ntip = .18\n\ntax_total = cost_of_meal * tax_rate\ntip_total = cost_of_meal * tip\n\nmeal_total = cost_of_meal + tax_total + tip_total\n\nprint(meal_total)" }, { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6399999856948853, "avg_line_length": 19.18181800842285, "blob_id": "360b69496f76f9ce46cde2fd15f16746768b4868", "content_id": "68beb95ba1fa0520910c60f0ed1023fab40d2828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/duplicates.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "word_list = []\nflag = True\n\nwhile flag:\n\tuser_word = input(\"Please enter a word: \")\n\tif user_word == \"quit\":\n\t\tflag = False\n\telif user_word not in word_list:\t\n\t\tword_list.append(user_word)\n for i in word_list:\n\t\t\tprint(i)\n\n\n\t" }, { "alpha_fraction": 0.5755724906921387, "alphanum_fraction": 0.5877862572669983, "avg_line_length": 18.454545974731445, "blob_id": "dfb51c4118736ffd4a3c32a59fe2f2230860ce38", "content_id": "f054d603ddf66597f830d0d2b74fc08bbd5b4426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 59, "num_lines": 33, "path": "/text_files.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "dic = {}\nfakelist = [\"wonder.\", \"help!\", \".nope\"]\n\ndef split_char(word):\n\tmy_string = [\"?\", \".\", \",\", \"!\"]\n\n\tfor f in word:\n\t\tif f in my_string:\n\t\t\t#print(\"f\", f)\n\t\t\t#print(\"word\", word)\n\t\t\tword = word.strip(f)\n\t\t\t#print(\"final\",word)\n\treturn word\n\ndef unpacking(tup):\n\treturn tup[1]\n\ndef counter(data):\n\tfor word in data:\n\t\tword = split_char(word)\n\t\tdic[word] = dic.get(word,0) + 1\n\ndef main():\n\twith open('article.txt','r', encoding=\"utf-8\") as article:\n\t\tdata = article.read().lower().split()\n\t\t#print(data[:30])\n\t\tcounting = counter(data)\n\n\t\talist = list(dic.items())\n\t\talist.sort(key=unpacking, reverse=True)\n\t\tprint(alist[:20])\n\nmain()\n\n\n\n\t\t\n\t\t\n\n\n\n\n" }, { "alpha_fraction": 0.5880597233772278, "alphanum_fraction": 0.6059701442718506, "avg_line_length": 17.61111068725586, "blob_id": "cd20c2546958ef06d788d0bdbb5cb0cde45101c4", "content_id": "8f971a2edfcf9ce1e09053c545bcf2ffef67b93f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/class_two_ex_x2.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "num = int(input(\"Enter the first number number: \"))\nsize_list = []\nflag = True\n\nwhile flag:\n\tnum2 = int(input(\"Enter the next number: \"))\n\tif num2 == 0:\n\t\tflag = False\n\telif num2 == num:\n\t\tsize = \"Same\"\n\telif num2 > num:\n\t\tsize = \"Up\"\n\telse:\n\t\tsize = \"Down\"\n\tsize_list.append(size)\n\tnum = num2\nfor i in size_list:\n\tprint(i, end = \" \")\n" }, { "alpha_fraction": 0.5597609281539917, "alphanum_fraction": 0.6015936136245728, "avg_line_length": 16.85714340209961, "blob_id": "94a14b365b7b746a79f2f3de836c6960651c63fe", "content_id": "0c18c10d4c8d0a5967749188dd17ee2f3918c3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 65, "num_lines": 28, "path": "/sets.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# a = [1,2,3,3,2,1,5,6,5,5,5]\n# b = [1,4,6,6,7,3,9,0,0,0]\n# new_set_a = set(a)\n# new_set_b = set(b)\n# print(new_set_a, new_set_b)\n\n# intersect = new_set_a.intersection(new_set_b)\n# print(intersect)\n\nnew_set = set()\n\ndef add_to_set(word):\n\tnew_set.add(word)\n\tif word == \"quit\":\n\t\tflag = False\n\telse: \n\t\tflag = True\n\treturn flag\n\ndef main():\n\tflag = True\n\n\twhile flag:\n\t\tword = input(\"Add any word: \")\n\t\tflag = add_to_set(word)\n\nmain()\nprint(\"These are all of the unique words you entered: \", new_set)\n\n\n" }, { "alpha_fraction": 0.47093021869659424, "alphanum_fraction": 0.5406976938247681, "avg_line_length": 18.11111068725586, "blob_id": "807f3c6abec5cdb7584d500e7265de32a3537604", "content_id": "cae15f4135c0690b19e2e9438f55581ce3d60720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/class_one_ex_7.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "for var in range(1,101):\n\tif var % 3 == 0 and var % 5 == 0:\n\t\tprint(\"FizzBuzz\")\n\telif var % 5 == 0:\n\t\tprint(\"Buzz\")\n\telif var % 3 == 0:\n\t\tprint(\"Fizz\")\n\telse:\n\t\tprint(var)\n" }, { "alpha_fraction": 0.6446540951728821, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 14.699999809265137, "blob_id": "0ed64b54bc48f110c8402c5845b4217482b63d31", "content_id": "aacd4edad13eecd4d485fdb67677925500ed3527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 33, "num_lines": 20, "path": "/enumerate.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "alist = [1,2,3,4,5]\nelist = list(enumerate(alist))\n\nprint(alist)\nprint(elist)\n\nprint(\"change the count:\")\ndlist = list(enumerate(alist,10))\nprint(dlist)\n\n\nprint(\"the range way:\")\nfor i in range(len(alist)):\n\tprint(alist[i],end=\"\")\n\nprint()\n\nprint(\"enumerate way\")\nfor count,i in enumerate(alist):\n\tprint(i,end=\"\")\n\n\n\n\n" }, { "alpha_fraction": 0.6040955781936646, "alphanum_fraction": 0.6092150211334229, "avg_line_length": 23.45833396911621, "blob_id": "397d10c29e1d17783d385be9ce8f492d5e295c74", "content_id": "9a5c517f6b4ebd88ca8f28cf90212357f6583cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/lists.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# name = \"David\"\n\n# time = int(input(\"What time is it? \"))\n# friends = [\"Mark\", \"Emily\", \"John\"]\n# friends = \"David\"\n\n# for name in friends:\n\t# if time < 7:\n\t# \tprint(\"Hello,\",name.upper(), \"Party starts at 7.\")\n\t# elif time == 7:\n\t# \tprint(\"Hello,\", name, \"you are right on time.\")\n\t# else:\n\t# \tprint(name, \"You are late.\")\n\n\nshopping_list = [\"bananas\", \"Oranges\", \"Apples\", \"Coconut Water\"]\nnew_item = input(\"What else should I get? \")\nshopping_list.append(new_item)\n\nfor name in shopping_list:\n\tif name == \"bananas\":\n\t\tprint(\"Hmm, I like\", name)\n\telse:\n\t\tprint(name, \"is also fine.\")" }, { "alpha_fraction": 0.517808198928833, "alphanum_fraction": 0.5424657464027405, "avg_line_length": 10.806451797485352, "blob_id": "a259f76c1261de7f1b06ae256070a340b8c272d9", "content_id": "70473a24ece79de5bca709277e1a39943db33ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 36, "num_lines": 31, "path": "/while_loops.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "food = [\"apple\", \"banana\", \"tomato\"]\n\nflag = True\nindex = 0\n\nwhile flag:\n\tprint(food[index])\n\tindex += 1\n\tif food[index][0] == \"t\":\n\t\tflag = False\n\telif index == len(food)-1:\n\t\tflag = False\n\t\n\n\n\n\n\n\n# value = 0\n# flag = True\n\n# while flag:\n# \tprint(value)\n# \tvalue += 1\n# \tif value == 3:\n# \t\tflag = False\n\n# while value != 7:\n# \tprint(value, end = ' ')\n# \tvalue += 1" }, { "alpha_fraction": 0.6704545617103577, "alphanum_fraction": 0.6704545617103577, "avg_line_length": 28.5, "blob_id": "7bee18909b07949eca6f3464878a6c335ec79a61", "content_id": "5747510244a7f0e4d6267109ef4318234d41f41a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 55, "num_lines": 6, "path": "/area_calculator.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "width = float(input(\"How wide is your room? \"))\nlength = float(input(\"How long is your room? \"))\n\narea = width * length\n\nprint(\"The area of your room is\", area, \"square feet.\")" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 23, "blob_id": "8cb741e0a0b55808bb0fee27e092898eafb69180", "content_id": "e6b75898f5340b99ee68730c5002b11aad73ab89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 49, "num_lines": 4, "path": "/class_one_ex_3.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "word_list = [\"python\", \"star\", \"green\", \"yellow\"]\n\nfor word in word_list:\n\t print(word[-1::-1])" }, { "alpha_fraction": 0.6217948794364929, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 25.16666603088379, "blob_id": "a5f80cc1fcd31a5dc1c765c7d03ddcc5ef17b98d", "content_id": "00e5a129f93361687682f9b25e0eff8ed1889abb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/even_or_odd.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "number = int(input(\"Please enter an integer: \"))\n\nif number % 2 == 0:\n\tprint(\"The number\", number, \"is even.\")\nelse:\n\tprint(\"The number\", number, \"is odd.\")" }, { "alpha_fraction": 0.7035176157951355, "alphanum_fraction": 0.7075377106666565, "avg_line_length": 25.91891860961914, "blob_id": "1d548ee0ee2822c26a630456102cd85ab427e189", "content_id": "dee3d8b361c712076d9f28705cd3ece0d6b3a19b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 84, "num_lines": 37, "path": "/classes.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "class Customer:\n\tdef __init__(self, name, balance=0.0):\n\t\tself.name = name\n\t\tself.balance = balance\n\n\tdef withdraw(self, amount):\n\t\tif amount > self.balance:\n\t\t\traise RuntimeErro('Amount greater than balance')\n\t\tself.balance -= amount\n\t\treturn self.balance\n\n\tdef deposit(self, amount):\n\t\tself.balance += amount\n\t\treturn self.balance\n\ndef new_account():\n\tnew_self = input(\"Please enter a user id: \")\n\tnew_name = input(\"Please enter your name: \")\n\tnew_balance = input(\"Please enter the beginning balance: \")\t\n\n\tnew_self = Customer(new_name, new_balance)\n\treturn new_self.name, new_self.balance\n\ndef deposit_to_account():\n\tuserid = input(\"what is your user id? \")\n\tdep_amount = int(input(\"How much to deposit? \"))\n\tprint(\"your old balance was $\", userid.balance)\n\tuserid.deposit(dep_amount)\n\tprint(\"your new balance is $\", userid.balance)\n\treturn userid.balance\n\n\n\nnewaccount = new_account()\nprint(\"your name is\", newaccount[0], \"and your beginning balance is\", newaccount[1])\n\ndeposit_to_account()" }, { "alpha_fraction": 0.5692199468612671, "alphanum_fraction": 0.606465220451355, "avg_line_length": 21.203125, "blob_id": "2d176550a75736759827b5073bbf5ea2874fb42a", "content_id": "0cc9cc26234a93de8ff6a003914c92c2b59c17e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 40, "num_lines": 64, "path": "/credit_card.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "class Credit_card:\n\tdef __init__(self, number):\n\t\tself.number = number\n\t\tself.length = self.check_length()\n\t\tself.type = self.determine_card_type()\n\t\tself.luhn = self.luhn_check()\n\n\tdef check_length(self):\n\t\tif len(self.number) == 16 or 15:\n\t\t\tself.length = True\t\n\t\telse:\n\t\t\tprint(\"card is not valid\")\n\t\t\tself.length = False\n\t\treturn self.length\n\n\tdef determine_card_type(self):\n\t\tvisa = [\"4\"]\n\t\tmast = [\"51\",\"52\",\"53\",\"54\",\"55\"]\n\t\tamex = [\"34\",\"37\"]\n\t\tdisc = [\"6011\"]\n\t\t\t\n\t\tif self.number[0] in visa:\n\t\t\tself.type = \"visa\"\n\t\telif self.number[:2] in mast:\n\t\t\tself.type = \"mast\"\n\t\telif self.number[:2] in amex:\n\t\t\tself.type = \"amex\"\n\t\telif self.number[:4] in disc:\n\t\t\tself.type = \"disc\"\n\t\telse:\n\t\t\tself.type = \"not valid\"\n\t\treturn self.type\n\n\tdef luhn_check(self):\n\t\treverse_number = self.number[::-1]\n\t\tluhn_modulo = 0\n\t\tif self.type != \"not valid\":\n\t\t\tfor i in reverse_number:\n\t\t\t\ti = int(i)\n\t\t\t\t# print(\"modulo\", luhn_modulo)\n\t\t\t\tif i % 2 != 0:\n\t\t\t\t\tdouble_it = i * 2\n\t\t\t\t\t# print(\"double it\", double_it)\n\t\t\t\t\tif double_it > 9:\n\t\t\t\t\t\tdouble_it -= 9\n\t\t\t\t\tluhn_modulo += double_it\n\t\t\t\telse:\n\t\t\t\t\tluhn_modulo += i\n\t\tif luhn_modulo % 10 == 0:\n\t\t\tprint (\"card is valid\")\n\t\t\tself.luhn = True\n\t\t\treturn self.luhn\n\t\telse:\n\t\t\tself.luhn = False\n\t\t\tprint(\"card not valid\")\n\t\t\treturn self.luhn\n\n\n\nuser = Credit_card(\"5515460934365316\")\nuser.check_length()\nuser.determine_card_type()\nuser.luhn_check()\nprint(user.number, user.type)\n\n\n" }, { "alpha_fraction": 0.5330396294593811, "alphanum_fraction": 0.6035242080688477, "avg_line_length": 21.5, "blob_id": "cd2e94b695a7f8b4a1f0ba96764f8b20ea7cffa5", "content_id": "e7fe0d4e5de939a7b773f22d16a3b04bd605fab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/class_one_ex_1.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "a_list = [1,2,55,1,9,4,56,2,1,8]\n\n# print(\"There are\", a_list.count(1), \"ones in the list.\")\n\ntotal_ones = 0\n\nfor var in a_list:\n\tif var == 1:\n\t\ttotal_ones = total_ones + 1\nprint(\"There are\", total_ones, \"ones in the list.\")\n\t\n" }, { "alpha_fraction": 0.6952381134033203, "alphanum_fraction": 0.699999988079071, "avg_line_length": 26.032258987426758, "blob_id": "230858f0d4c80f4cd8d8dcafe051564947863569", "content_id": "3bfbcb83ecde0c2eda4ce647d09f8f58fe200a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 76, "num_lines": 31, "path": "/class_two_ex_2.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "def capitalize_first_letter(user_string):\n\tuser_string = user_string.capitalize()\n\treturn user_string\n\ndef capitalize_new_sentence(user_string):\n\tspecial_char = \".?!\"\n\tstr_for_user = \"\"\n\tfor index in range(0, len(user_string)):\n\t\tif user_string[index] in special_char:\n\t\t\tuser_string = user_string[0:index+2] + user_string[index+2:].capitalize()\n\t\t\tstr_for_user += user_string[index]\n\t\telse:\n\t\t\tstr_for_user += user_string[index]\n\treturn str_for_user\n\ndef capitalize_i(str_for_user):\n\tlow_i = \" i \"\n\tcap_i = \" I \"\n\n\tif low_i in str_for_user:\n\t\t\tnew_string = str_for_user.replace(low_i, cap_i)\n\t\t\tprint(new_string)\n\ndef main():\n\tuser_string = input(\"Please enter something: \")\n\tcapitalized_string = capitalize_first_letter(user_string)\n\tcapitalized_all = capitalize_new_sentence(capitalized_string)\n\tcapitalize_i(capitalized_all)\n\n\nmain()\n\n\n" }, { "alpha_fraction": 0.5228070020675659, "alphanum_fraction": 0.621052622795105, "avg_line_length": 19.35714340209961, "blob_id": "bfd60ae52748539b5b9e3e6b16897543c9a3a8d0", "content_id": "9ca109321db303c98eef605d81a02b5fd6a90eac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/bubble_swap.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "alist = [4,9,7,101,44,2,11,43,65,23,2,4,99,34,21]\nlength = len(alist)\n\ndef swap_numbers(alist):\n\t\n\tfor a in range(length-1):\n\t\tfor a in range(length-1):\n\t\t\tb = a + 1\n\t\t\tif alist[a] > alist[b]:\n\t\t\t\talist[a], alist[b] = alist[b],alist[a]\n\treturn alist\t\n\nswap_numbers(alist)\nprint(alist)\n" }, { "alpha_fraction": 0.6103739738464355, "alphanum_fraction": 0.6550060510635376, "avg_line_length": 24.121212005615234, "blob_id": "af8cc41ca28295ce04e6a7bbebf8174c2970f8b5", "content_id": "365c23871aafdf2662862f6465dec0ad84bce3b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "no_license", "max_line_length": 65, "num_lines": 33, "path": "/insertion_sort.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "# alist = [1,9,4,7,2,2]\n# sorted_portion = [alist[0]]\n# unsorted_portion = alist[1:]\n\n# for s in range(0,len(alist)-1):\n# \tindex_to_put_it = 0\n# \tfor i in range(len(sorted_portion)-1,-1,-1):\n# \t\tif unsorted_portion[0] > sorted_portion[i]:\n# \t\t\tindex_to_put_it = i+1\n\n# \t\t\tbreak\n\n# \tsorted_portion.insert(index_to_put_it,unsorted_portion.pop(0))\n\t\n# # print(\"sorted_portion\",sorted_portion)\n\n# print(\"final sorted_portion\",sorted_portion)\n# \t#print(\"unsorted_portion\",unsorted_portion)\n\n\n\nalist = [4,9,7,101,44,2]\nunsorted_list = alist[1:]\nsorted_list = [alist[0]]\nfor s in range(0,len(alist)-1):\n\tindex_to_put_it = 0\n\tfor i in range(len(sorted_list)-1,-1,-1):\n\t\tif unsorted_list[0] > sorted_list[i]:\n\t\t\tindex_to_put_it = i+1\n\t\t\tbreak\n\tsorted_list.insert(index_to_put_it,unsorted_list.pop(0))\n\nprint(unsorted_list,sorted_list)\n" }, { "alpha_fraction": 0.5514285564422607, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 13.583333015441895, "blob_id": "d8b5efaa7fff0a558507d7f3d79209b4348bc765", "content_id": "6adbd4058cfa98b0370f6d1dfb18b3b45e42c178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 33, "num_lines": 24, "path": "/class_three_ex_2.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "def main():\n\tvalue = input(\"enter a word: \")\n\treverse = reverse_look_up(value)\n\ndef reverse_look_up(value):\n\tkeys = list(d.items())\n\tfor i in keys:\n\t\tif value == i[1]:\n\t\t\tprint(i[0])\n\ndef unpacking(tup):\n\treturn tup[0]\n\n\nd = {}\n\nd[\"le\"] = \"the\"\nd[\"chat\"] = \"cat\"\nd[\"livre\"] = \"book\"\nd[\"pomme\"] = \"apple\"\nd[\"wrong\"] = \"the\"\nd[\"again\"] = \"the\"\n\nmain()\n" }, { "alpha_fraction": 0.6077057719230652, "alphanum_fraction": 0.6199649572372437, "avg_line_length": 16.84375, "blob_id": "6f005d2bc97e716107c50bcfd4d8e99a475cf687", "content_id": "07f4fdf99578c7f50ab30c4078eb0e5832955fc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 43, "num_lines": 32, "path": "/pig_latin.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "def is_a_vowel(letter):\n\tmy_string = \"aeiou\"\n\n\tif letter in my_string:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef assess(word):\n\tif is_a_vowel(word[0]):\n\t\tpig_word = word + \"yay\"\n\t\t# return new_word\n\telse:\n\t\tfor i in word:\n\t\t\tif is_a_vowel(i):\n\t\t\t\tlog = word.index(i)\n\t\t\t\tnew_word1 = word[log:]\n\t\t\t\tnew_word2 = word[0:log]\n\t\t\t\tpig_word = new_word1 + new_word2 + \"ay\"\n\t\t\t\t# return new_word\n\t\t\t\tbreak\n\treturn pig_word\n\ndef validation():\n\tword = input(\"Please enter a word: \")\n\tif word.isalpha() and len(word) > 1:\n\t\tprint(assess(word))\n\telse:\n\t\tprint(\"error\")\n\n\nvalidation()\n" }, { "alpha_fraction": 0.6305084824562073, "alphanum_fraction": 0.6372881531715393, "avg_line_length": 21.69230842590332, "blob_id": "adf169a0ed1c99210775fcdc7b41a11cc41d705b", "content_id": "c637b6c19b3c09b707cc4e447d9eb25efe4de335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 53, "num_lines": 13, "path": "/class_one_ex_10.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "user_word = input(\"Please enter a word to encrypt: \")\nshift = int(input(\"Please enter a shift amount: \"))\nshift = shift % 26\nencrypt = []\n\nfor i in user_word:\n\tif i.isalpha() == True:\n\t\ti = ord(i) + shift\n\t\tencrypt.append(chr(i))\n\telse:\n\t\tencrypt.append(i)\nfor i in encrypt:\n\tprint(i, end = '')\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6653845906257629, "avg_line_length": 25.100000381469727, "blob_id": "8ac93df1a2d2174e5f3b4263ce4ebce310b72c02", "content_id": "6d342d3f903f809cbd79a3a2ec73f789e35bacc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/class_one_ex_2.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "new_string = input(\"Please enter something: \")\nletters = 0\nnumbers = 0\n\nfor var in new_string:\n\tif var.isalpha() == True:\n\t\tletters = letters + 1\n\tif var.isdigit() == True:\n\t\tnumbers = numbers + 1\nprint(\"There are\", letters, \"letters and\", numbers, \"numbers.\")" }, { "alpha_fraction": 0.5396825671195984, "alphanum_fraction": 0.5608465671539307, "avg_line_length": 16.272727966308594, "blob_id": "ef653549e850378c931d8b2b7e955fa9893573d4", "content_id": "a147da5761fb6eb0b247e5f6d30ff13ceefb7111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/class_three_ex_3.py", "repo_name": "dyarnall/python_immersive", "src_encoding": "UTF-8", "text": "def store_string():\n\tstring = input(\"Enter string: \")\n\td = {}\n\n\tfor i in string:\n\t\td[i] = d.get(i,0) + 1\n\t\toutput = list(d.items())\n\tfor x in output:\n\t\tprint(x[0], \",\",x[1])\n\nstore_string()" } ]
36
daniellapombo/ML379
https://github.com/daniellapombo/ML379
93a8194557b60aa8c4553aa89076ebe22923ec6f
7ded5d65dc861a4a341f91761623565731fc5ddd
79baa38f354c19e2a6ac50f52053952984395426
refs/heads/master
2022-03-28T18:06:26.003859
2020-01-13T18:41:54
2020-01-13T18:41:54
205,613,130
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5853211283683777, "alphanum_fraction": 0.5913761258125305, "avg_line_length": 41.8870964050293, "blob_id": "1ba243addc6318bde70929decdc0dc8201693f54", "content_id": "38b879656d16f223bc3dffd262936ce5b98c7ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5450, "license_type": "no_license", "max_line_length": 163, "num_lines": 124, "path": "/TitanicDataProcessing.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 21 12:41:13 2019\r\n\r\n@author: danie\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pan\r\nimport seaborn as sns\r\n\r\nclass TitanicData():\r\n def __init__(self, fileName, runStats, runGraphs):\r\n self.fileName = fileName #File name as string\r\n self.runStats = runStats #Boolean indicates if would like to generate statistical table\r\n self.runGraphs = runGraphs #Boolean indicates if would liek to generate graphs/diagrams\r\n \r\n self.import_clean_TitanicData() #Exectues uploading & cleaning of the data\r\n \r\n if self.runStats:#Execute stats function if True\r\n self.stats()\r\n \r\n if self.runGraphs: #Execute stats function if True\r\n self.scatterPlots()\r\n \r\n \r\n def import_clean_TitanicData(self): #returns tuple (np.array(train['Sex']), np.array(train_labels), np.array(test['Sex']), np.array(test_labels))\r\n ti_file =pan.read_csv(self.fileName)\r\n \r\n \"\"\"col = list(ti_file.columns) \r\n ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', \r\n 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']\"\"\"\r\n \r\n #self.tiDat = ti_file.iloc[:, [0,1,2,4,5,6,7,9]] #Keeping only columns/features required\r\n #New data frame has columns ['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n self.tiDat = ti_file[['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']]\r\n lbls = [] #initalizing list to store label values (survival information)\r\n \r\n convert = self.tiDat.iloc[:,1:7] #Creates new data frame w/ features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\n #Change strings to binary integer counterparts\r\n #FEMALE IS 0 and MALE IS 1\r\n \r\n convert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\n convert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n \r\n \"\"\"colP = list(convert.columns)\r\n ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch']\"\"\"\r\n \r\n #Convert nan values to mean value of given column\r\n \r\n #Find mean value of each column\r\n class_avg = convert[\"Pclass\"].mean()\r\n sex_avg = convert['Sex'].mean()\r\n age_avg = convert['Age'].mean()\r\n sib_avg = convert['SibSp'].mean()\r\n parch_avg = convert['Parch'].mean()\r\n\r\n \r\n #Replace nan value w/ mean values\r\n convert[\"Pclass\"].fillna(class_avg, inplace = True)\r\n convert[\"Sex\"].fillna(sex_avg, inplace = True)\r\n convert[\"Age\"].fillna(age_avg, inplace = True)\r\n convert[\"SibSp\"].fillna(sib_avg, inplace = True)\r\n convert[\"Parch\"].fillna(parch_avg, inplace = True)\r\n \r\n \r\n self.size = convert.count().max() #Find the maximum length of column\r\n \r\n wholeData = convert #Storage cleaned and corrected all of training data w/out labels\r\n \r\n \r\n np.random.shuffle(wholeData.values) #Makes the selection of samples random for creating training data set\r\n #print(wholeData)\r\n \r\n train = [] #Initalize list that will store only a fragement of the training data\r\n\r\n train = wholeData.sample(frac=0.7)\r\n\r\n test = wholeData.loc[~wholeData.index.isin(train.index)] #Take the rest of wholeData that was Not used in train and uses those unuses values to create test\r\n \r\n \r\n #print(train.columns.values) #Prints the values as nparray format\r\n \r\n train_labels = train[\"Survived\"] #Extract training labels and create new np.dataFrame train_labels\r\n \r\n train.drop(\"Survived\", inplace=True, axis=1)\r\n \r\n test_labels = test[\"Survived\"] #Extract testing labels and creates new np.dataFrame test_labels\r\n \r\n test.drop(\"Survived\" , inplace=True, axis=1)\r\n \r\n return(np.array(train['Sex']), np.array(train_labels), np.array(test['Sex']), np.array(test_labels))\r\n \r\n def stats(self): #Generates cross tabulated table \r\n dat = self.tiDat.groupby(['Survived', 'Pclass', 'Sex'])['Survived'].count()\r\n #Generates a cross tabulated table based on the survival of passengers\r\n print(dat) \r\n \r\n tisum = self.tiDat['Survived'].sum() \r\n print(\"Total passengers survived\", tisum)\r\n \r\n def scatterPlots(self): #Generates various statistical diagrams\r\n \r\n print(\"1 indicates survived, 0 indicates death\")\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n clas = sns.catplot(x='Pclass', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n clas\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen = sns.catplot(x='Sex', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n gen\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen_age = gen = sns.catplot(x='Sex', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n gen_age\r\n \r\n clas_age = sns.catplot(x='Pclass', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n clas_age\r\n \r\n #Generates categorical scatter plot w/ boundry lines\r\n age = sns.lmplot(x=\"PassengerId\", y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n age\r\n " }, { "alpha_fraction": 0.5677486062049866, "alphanum_fraction": 0.5762407779693604, "avg_line_length": 41.26530456542969, "blob_id": "551352f73b45b597660819ed21bfbda4beab561a", "content_id": "ccb5679f710d82c0a4aea003a737b10032f4be06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10598, "license_type": "no_license", "max_line_length": 130, "num_lines": 245, "path": "/workingPerceptronHw2-RobertTAHelp.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 26 15:36:02 2019\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pan\r\nimport seaborn as sns\r\n\r\n#Perceptron single layer neural network\r\nclass Perceptron:\r\n def __init__(self, tr_inpt, labels, epoch, Lr):\r\n #Initalize paramters\r\n self.epoch = epoch #Number of iterations through the whole data set\r\n self.tr_inpt = tr_inpt #Training data set w/out labels\r\n self.Lr = Lr #Learning rate\r\n self.labels = labels #Training data labels\r\n print(self.tr_inpt.shape)\r\n if self.tr_inpt.ndim == 1:\r\n self.sz = 1\r\n else:\r\n self.sz = self.tr_inpt.shape[1] #Length of row (number of features per sample)\r\n #.shape returns a tuple and element at index 1 indicates the length of the row\r\n \r\n self.w = self.weights() #Initalizing weights to random numbers\r\n self.fit() #Calling execution of learning algorithm\r\n self.plotErrors() #Plots the errors per epoch to demonstrate convergency of data\r\n \r\n\r\n def z_input(self, x): \r\n #generate dot product between w and features x\r\n return np.dot(self.w[1:], x) + self.w[0] #Returns dot product of weights, bias and sample\r\n # return np.dot(np.transpose(self.w),x)\r\n \r\n def weights(self):\r\n self.w = np.random.random(self.sz+1) #Creates a weight vector of size self.sz+1 composed of random variables\r\n return self.w\r\n \r\n def predict(self, z): #Step function:Activation function\r\n if z >= 0.0:\r\n return 1.0\r\n else:\r\n return 0\r\n \r\n def fit(self):\r\n self.updates = [] #Initalize vector to store update number per epoch, the update is dependent on the error\r\n \r\n for m in range(self.epoch):\r\n update_num = 0 #Initialize total update per epoch to 0\r\n for k in range(self.tr_inpt.shape[0]): #Iterates through each row within data set\r\n z = self.z_input(self.tr_inpt[k]) #Net input\r\n prediction = self.predict(z) #Activation function\r\n target = self.labels[k] \r\n error = target - prediction\r\n \r\n dw = self.Lr*error*self.tr_inpt[k] #Value to update the weights by\r\n \r\n #self.w += dw\r\n self.w[1:] += dw #Update the weights\r\n self.w[0] += self.Lr*error #Update the bias\r\n \r\n update_num += int(self.Lr*error != 0.0) #Increments the updates \r\n \r\n self.updates.append(update_num) #Store the total updates for the epoch\r\n \r\n\r\n \r\n def testIt(self, testDat, testLabels): #Test after train\r\n test_result = [] #Initalize storage of predictions for test data\r\n right = 0 #Initalize number of right predictions \r\n for k in range(testDat.shape[0]): #Iterate through the whole test data set sample by sample\r\n \r\n z = self.z_input(testDat[k]) #Net input\r\n \r\n prediction = int(self.predict(z)) #Step function\r\n \r\n test_result.append(prediction) #Storge the results in vector\r\n \r\n if prediction == testLabels[k]: #Count the number of correct predictions\r\n right += 1\r\n \r\n return (right/len(test_result))*100 #Returns the accuracy of the perpectron on the training data set\r\n \r\n def plotErrors(self):\r\n errorFig = plt.figure() #Initalizes new plot\r\n plt.title(\"Number of updates vs Epochs\") \r\n plt.plot(range(1,len(self.updates)+1), self.updates) #range(1,len(self.updates)+1) is the epochs\r\n #x = epochs, y = self.updates (number of updates per epoch)\r\n plt.xlabel('Epochs')\r\n plt.ylabel(\"Number of updates\")\r\n plt.show() #Generates/shows the plot\r\n \r\nclass TitanicData():\r\n def __init__(self, fileName, runStats, runGraphs):\r\n self.fileName = fileName #File name as string\r\n self.runStats = runStats #Boolean indicates if would like to generate statistical table\r\n self.runGraphs = runGraphs #Boolean indicates if would liek to generate graphs/diagrams\r\n \r\n self.import_clean_TitanicData() #Exectues uploading & cleaning of the data\r\n \r\n if self.runStats:#Execute stats function if True\r\n self.stats()\r\n \r\n if self.runGraphs: #Execute stats function if True\r\n self.scatterPlots()\r\n \r\n \r\n def import_clean_TitanicData(self):\r\n ti_file =pan.read_csv(self.fileName)\r\n \r\n \"\"\"col = list(ti_file.columns) \r\n ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', \r\n 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']\"\"\"\r\n \r\n #self.tiDat = ti_file.iloc[:, [0,1,2,4,5,6,7,9]] #Keeping only columns/features required\r\n #New data frame has columns ['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n self.tiDat = ti_file[['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']]\r\n lbls = [] #initalizing list to store label values (survival information)\r\n \r\n convert = self.tiDat.iloc[:,1:7] #Creates new data frame w/ features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\n #Change strings to binary integer counterparts\r\n #FEMALE IS 0 and MALE IS 1\r\n \r\n convert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\n convert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n \r\n \"\"\"colP = list(convert.columns)\r\n ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch']\"\"\"\r\n \r\n #Convert nan values to mean value of given column\r\n \r\n #Find mean value of each column\r\n class_avg = convert[\"Pclass\"].mean()\r\n sex_avg = convert['Sex'].mean()\r\n age_avg = convert['Age'].mean()\r\n sib_avg = convert['SibSp'].mean()\r\n parch_avg = convert['Parch'].mean()\r\n\r\n \r\n #Replace nan value w/ mean values\r\n convert[\"Pclass\"].fillna(class_avg, inplace = True)\r\n convert[\"Sex\"].fillna(sex_avg, inplace = True)\r\n convert[\"Age\"].fillna(age_avg, inplace = True)\r\n convert[\"SibSp\"].fillna(sib_avg, inplace = True)\r\n convert[\"Parch\"].fillna(parch_avg, inplace = True)\r\n \r\n \r\n self.size = convert.count().max() #Find the maximum length of column\r\n \r\n wholeData = convert #Storage cleaned and corrected all of training data w/out labels\r\n \r\n \r\n np.random.shuffle(wholeData.values) #Makes the selection of samples random for creating training data set\r\n #print(wholeData)\r\n \r\n train = [] #Initalize list that will store only a fragement of the training data\r\n\r\n train = wholeData.sample(frac=0.7)\r\n\r\n test = wholeData.loc[~wholeData.index.isin(train.index)]\r\n \r\n print(\"Train samples\", len(train))\r\n print(\"Test samples\", len(test))\r\n \r\n #print(train.columns.values) #Prints the values as nparray format\r\n \r\n train_y = train[\"Survived\"]\r\n \r\n train.drop(\"Survived\", inplace=True, axis=1)\r\n \r\n test_y = test[\"Survived\"]\r\n \r\n test.drop(\"Survived\" , inplace=True, axis=1)\r\n \r\n return(np.array(train['Sex']), np.array(train_y), None , np.array(test['Sex']), np.array(test_y))\r\n \r\n def stats(self):\r\n dat = self.tiDat.groupby(['Survived', 'Pclass', 'Sex'])['Survived'].count()\r\n #Generates a cross tabulated table based on the survival of passengers\r\n print(dat) \r\n \r\n tisum = self.tiDat['Survived'].sum() \r\n print(\"Total passengers survived\", tisum)\r\n \r\n def scatterPlots(self): #Generates various statistical diagrams\r\n \r\n print(\"1 indicates survived, 0 indicates death\")\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n clas = sns.catplot(x='Pclass', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n clas\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen = sns.catplot(x='Sex', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n gen\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen_age = gen = sns.catplot(x='Sex', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n gen_age\r\n \r\n clas_age = sns.catplot(x='Pclass', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n clas_age\r\n \r\n #Generates categorical scatter plot w/ boundry lines\r\n age = sns.lmplot(x=\"PassengerId\", y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n age\r\n \r\ndef main():\r\n \r\n learning_rate = 0.0003#float(input(\"Enter in learning rate:\"))\r\n \r\n num_epochs = 500#int(input(\"Enter in max epochs:\"))\r\n \r\n typeData = 'titanic' #input(\"Enter type of data would like to run program on: linear, nonlinear, titanic, TBadaline \").lower()\r\n if typeData == \"linear\":\r\n seperable = LinSep_data() #testD, testL, trainD, trainL\r\n \r\n print(\"Shape\")\r\n print(seperable[0].shape)\r\n print(\"High\")\r\n pDogs = Perceptron(seperable[3], seperable[4], num_epochs, learning_rate) #self, tr_inpt, labels, epoch, Lr\r\n print(\"Accuracy\", pDogs.testIt(seperable[0], seperable[1]), \"%\")\r\n \r\n elif typeData == \"nonlinear\":\r\n notSep = Not_linSep_data() #testD, testL, sz, trainD, trainL\r\n \r\n pets = Perceptron(notSep[3], notSep[4], num_epochs, learning_rate)\r\n print(\"Accuracy\", pets.testIt(notSep[0], notSep[1]), \"%\")\r\n \r\n elif typeData == \"titanic\": \r\n #\"C:\\\\Users\\\\danie\\\\Documents\\\\School\\\\CS\\\\COMP 379\\Hw\\\\train.csv\"\r\n #tncFile = input(\"Enter in titanic file name\")\r\n TD = TitanicData(\"C:\\\\Users\\\\danie\\\\Documents\\\\School\\\\CS\\\\COMP 379\\Hw\\\\train.csv\", False, False) \r\n \"\"\"Returns tuple of (trainingData, trainingLabels, size, testData, testDataLabels)\"\"\"\r\n \r\n titanic = TD.import_clean_TitanicData() #Returns tuple of (trainingData, trainingLabels, size, testData, testDataLabels)\r\n \r\n p = Perceptron(titanic[0], titanic[1], num_epochs, learning_rate) #self, tr_inpt, labels, epoch, Lr\r\n print(\"Accuracy\", p.testIt(titanic[3], titanic[4]), \"%\")\r\n \r\nmain()" }, { "alpha_fraction": 0.5327844619750977, "alphanum_fraction": 0.5559438467025757, "avg_line_length": 37.176334381103516, "blob_id": "8bd6378244abcfe82185928299d42f912c43dd45", "content_id": "3b2162fe563947ce2e99cbb4ddb82d93f3fdfd0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16883, "license_type": "no_license", "max_line_length": 170, "num_lines": 431, "path": "/Pombo_Hw2Perceptron.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 21 12:36:45 2019\r\n\r\n@author: danie\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pan\r\nimport seaborn as sns\r\n\r\nclass AdalineSGD:\r\n def __init__(self, tr_inpt, labels, epoch, Lr):\r\n #Intialize parameters\r\n self.epoch = epoch\r\n self.tr_inpt = tr_inpt\r\n \r\n if self.tr_inpt.ndim == 1:#If self.tr_input.shape == (a, ) where a member of integer set\r\n self.sz = 1 #For 1D array there is 1 column = column length/width is 1 rather than nothing\r\n else:\r\n self.sz = self.tr_inpt.shape[1] #Length of row (number of features per sample)\r\n #.shape returns a tuple and element at index 1 indicates the length of the row\r\n \r\n self.w = self.weights() #weights \r\n self.Lr = Lr #Learning rate\r\n self.labels = labels #Training data labels\r\n \r\n self.learning() #Learning algorithm execution\r\n self.plotErrors_Cost() #Generate log(cost) vs epoch graph\r\n \r\n \r\n def weights(self):\r\n #where sz is the size of x (number of x)\r\n self.w = np.random.random(self.sz+1)\r\n\r\n #random.randfl ? generate float of random wieght\r\n return self.w\r\n \r\n def z(self, x):\r\n #generate dot product between w and features x\r\n return np.dot(self.w[1:], x) + self.w[0]\r\n # return np.dot(np.transpose(self.w),x)\r\n \r\n def Id(self,z_): #Identity function - Activation function\r\n return z_\r\n \r\n def learning(self):\r\n self.cost = []\r\n for e in range(self.epoch):\r\n cst = 0\r\n for k in range(self.tr_inpt.shape[0]):\r\n \r\n X = self.tr_inpt[k] #Row k\r\n Z = self.z(X) #Net input\r\n \r\n error = (self.labels[k]-self.Id(Z)) #\r\n dw = self.Lr*error*X \r\n \r\n self.w[1:] += dw\r\n self.w[0] += self.Lr*error\r\n \r\n cst += .5*(error**2)\r\n \r\n self.cost.append(cst)\r\n \r\n def quantizer(self, z):\r\n if z >= 0.0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n def testIt(self, testDat, testLabels):\r\n test_result = []\r\n right = 0\r\n for k in range(testDat.shape[0]):\r\n \r\n z = self.z(testDat[k])\r\n prediction = self.quantizer(z)\r\n \r\n test_result.append(prediction)\r\n \r\n if prediction == testLabels[k]:\r\n right += 1\r\n \r\n return (right/len(test_result))*100\r\n \r\n def plotErrors_Cost(self):\r\n costFig = plt.figure() #Initalizes new plot\r\n plt.title(\"Number of updates vs Epochs\") \r\n plt.plot(range(1,len(self.cost)+1), np.log10(self.cost)) #range(1,len(self.updates)+1) is the epochs\r\n #x = epochs, y = self.updates (number of updates per epoch)\r\n plt.xlabel('Epochs')\r\n plt.ylabel(\"Log(cost)\")\r\n plt.show() #Generates/shows the plot\r\n \r\n\r\n#Perceptron single layer neural network\r\nclass Perceptron:\r\n def __init__(self, tr_inpt, labels, epoch, Lr):\r\n #Initalize paramters\r\n self.epoch = epoch #Number of iterations through the whole data set\r\n self.tr_inpt = tr_inpt #Training data set w/out labels\r\n self.Lr = Lr #Learning rate\r\n self.labels = labels #Training data labels\r\n\r\n if self.tr_inpt.ndim == 1:#If self.tr_input.shape == (a, ) where a member of integer set\r\n self.sz = 1 #For 1D array there is 1 column = column length/width is 1 rather than nothing\r\n else:\r\n self.sz = self.tr_inpt.shape[1] #Length of row (number of features per sample)\r\n #.shape returns a tuple and element at index 1 indicates the length of the row\r\n \r\n self.w = self.weights() #Initalizing weights to random numbers\r\n self.fit() #Calling execution of learning algorithm\r\n self.plotErrors() #Plots the errors per epoch to demonstrate convergency of data\r\n \r\n\r\n def z_input(self, x): \r\n #generate dot product between w and features x\r\n return np.dot(self.w[1:], x) + self.w[0] #Returns dot product of weights, bias and sample\r\n # return np.dot(np.transpose(self.w),x)\r\n \r\n def weights(self):\r\n self.w = np.random.random(self.sz+1) #Creates a weight vector of size self.sz+1 composed of random variables\r\n return self.w\r\n \r\n def predict(self, z): #Step function:Activation function\r\n if z >= 0.0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n def fit(self):\r\n self.updates = [] #Initalize vector to store update number per epoch, the update is dependent on the error\r\n \r\n for m in range(self.epoch):\r\n update_num = 0 #Initialize total update per epoch to 0\r\n for k in range(self.tr_inpt.shape[0]): #Iterates through each row within data set\r\n \r\n z = self.z_input(self.tr_inpt[k]) #Net input\r\n prediction = self.predict(z) #Activation function\r\n target = self.labels[k] \r\n error = target - prediction\r\n \r\n dw = self.Lr*error*self.tr_inpt[k] #Value to update the weights by\r\n \r\n #self.w += dw\r\n self.w[1:] += dw #Update the weights\r\n self.w[0] += self.Lr*error #Update the bias; inspired by the text book \"Python Machine Learning\" by Sebastian Raschka\r\n \r\n update_num += int(self.Lr*error != 0.0) #Increments the updates, inspired by textbook \"Python Machine Learning\" by Sebastian Raschka\r\n \r\n self.updates.append(update_num) #Store the total updates for the epoch\r\n \r\n\r\n \r\n def testIt(self, testDat, testLabels): #Test after train\r\n test_result = [] #Initalize storage of predictions for test data\r\n right = 0 #Initalize number of right predictions \r\n for k in range(testDat.shape[0]): #Iterate through the whole test data set sample by sample\r\n \r\n z = self.z_input(testDat[k]) #Net input\r\n \r\n prediction = int(self.predict(z)) #Step function\r\n \r\n test_result.append(prediction) #Storge the results in vector\r\n \r\n if prediction == testLabels[k]: #Count the number of correct predictions\r\n right += 1\r\n \r\n return (right/len(test_result))*100 #Returns the accuracy of the perpectron on the training data set\r\n \r\n def plotErrors(self):\r\n errorUpdateFig = plt.figure() #Initalizes new plot\r\n plt.title(\"Number of updates vs Epochs\") \r\n plt.plot(range(1,len(self.updates)+1), self.updates) #range(1,len(self.updates)+1) is the epochs\r\n #x = epochs, y = self.updates (number of updates per epoch)\r\n plt.xlabel('Epochs')\r\n plt.ylabel(\"Number of updates\")\r\n plt.show() #Generates/shows the plot\r\n \r\n \r\nclass TitanicData():\r\n def __init__(self, fileName, runStats, runGraphs):\r\n self.fileName = fileName #File name as string\r\n self.runStats = runStats #Boolean indicates if would like to generate statistical table\r\n self.runGraphs = runGraphs #Boolean indicates if would liek to generate graphs/diagrams\r\n \r\n self.import_clean_TitanicData() #Exectues uploading & cleaning of the data\r\n \r\n if self.runStats:#Execute stats function if True\r\n self.stats()\r\n \r\n if self.runGraphs: #Execute stats function if True\r\n self.scatterPlots()\r\n \r\n \r\n def import_clean_TitanicData(self):\r\n ti_file =pan.read_csv(self.fileName)\r\n \r\n \"\"\"col = list(ti_file.columns) \r\n ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', \r\n 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']\"\"\"\r\n \r\n self.tiDat = ti_file[['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']]\r\n \r\n \r\n convert = self.tiDat.iloc[:,1:7] #Creates new data frame w/ features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\n #Change strings to binary integer counterparts\r\n #FEMALE IS 0 and MALE IS 1\r\n \r\n convert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\n convert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n \r\n \"\"\"colP = list(convert.columns)\r\n ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch']\"\"\"\r\n \r\n #Convert nan values to mean value of given column\r\n \r\n #Find mean value of each column\r\n class_avg = convert[\"Pclass\"].mean()\r\n sex_avg = convert['Sex'].mean()\r\n age_avg = convert['Age'].mean()\r\n sib_avg = convert['SibSp'].mean()\r\n parch_avg = convert['Parch'].mean()\r\n\r\n \r\n #Replace nan value w/ mean values\r\n convert[\"Pclass\"].fillna(class_avg, inplace = True)\r\n convert[\"Sex\"].fillna(sex_avg, inplace = True)\r\n convert[\"Age\"].fillna(age_avg, inplace = True)\r\n convert[\"SibSp\"].fillna(sib_avg, inplace = True)\r\n convert[\"Parch\"].fillna(parch_avg, inplace = True)\r\n \r\n \r\n self.size = convert.count().max() #Find the maximum length of column\r\n \r\n wholeData = convert #Storage cleaned and corrected all of training data w/out labels\r\n \r\n \r\n np.random.shuffle(wholeData.values) #Makes the selection of samples random for creating training data set\r\n #print(wholeData)\r\n \r\n train = [] #Initalize list that will store only a fragement of the training data\r\n\r\n train = wholeData.sample(frac=0.7)\r\n\r\n test = wholeData.loc[~wholeData.index.isin(train.index)] #Take the rest of wholeData that was Not used in train and uses those unuses values to create test\r\n \r\n \r\n #print(train.columns.values) #Prints the values as nparray format\r\n \r\n train_labels = train[\"Survived\"] #Extract training labels and create new np.dataFrame train_labels\r\n \r\n train.drop(\"Survived\", inplace=True, axis=1)\r\n \r\n test_labels = test[\"Survived\"] #Extract testing labels and creates new np.dataFrame test_labels\r\n \r\n test.drop(\"Survived\" , inplace=True, axis=1)\r\n \r\n return(np.array(train['Sex']), np.array(train_labels), None , np.array(test['Sex']), np.array(test_labels))\r\n \r\n def stats(self):\r\n dat = self.tiDat.groupby(['Survived', 'Pclass', 'Sex'])['Survived'].count()\r\n #Generates a cross tabulated table based on the survival of passengers\r\n print(dat) \r\n \r\n tisum = self.tiDat['Survived'].sum() \r\n print(\"Total passengers survived\", tisum)\r\n \r\n def scatterPlots(self): #Generates various statistical diagrams\r\n \r\n print(\"1 indicates survived, 0 indicates death\")\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n clas = sns.catplot(x='Pclass', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n clas\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen = sns.catplot(x='Sex', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n gen\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n gen_age = gen = sns.catplot(x='Sex', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n gen_age\r\n \r\n clas_age = sns.catplot(x='Pclass', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n clas_age\r\n \r\n #Generates categorical scatter plot w/ boundry lines\r\n age = sns.lmplot(x=\"PassengerId\", y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n age\r\n \r\n \r\ndef LinSep_data():\r\n #sex, height, species\r\n #male 0 female 1, 15-110 cm, toy dog species 0 regular/largedog species 1\r\n #linearly sperable\r\n \r\n #Create array of dog data where index 0 indicates PID, index 1 is gender, index 2 is height, index 3 is species\r\n d = [[45, 1], [37, 1], [123, 1],[134,1],\r\n [48,1],[99, 1], [78, 1], [35, 1], \r\n [88,1], [67,1], [40,1],[56,1], \r\n [34, 1],[89, 1], [69,1], [18, 0], \r\n [23, 0], [30, 0], [20, 0], [19, 0], \r\n [18, 0], [16,0],[32, 0], [25,0], \r\n [24, 0],[13,0], [12,0], [13,0]]\r\n \r\n dogs = pan.DataFrame(d, columns =[\"Height\", \"Type\"]) #Create pandas data frame\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n dds = sns.catplot(x='Type', y=\"Height\", hue = 'Type', data = dogs)\r\n dds\r\n \r\n #Generates categorical scatter plot w/ boundry lines\r\n sh = sns.lmplot(x = 'Type', y=\"Height\", hue = 'Type', data = dogs)\r\n sh\r\n \r\n \r\n testD = []\r\n testL = []\r\n trainD =[]\r\n trainL =[]\r\n\r\n \r\n np.random.shuffle(d)\r\n \r\n #Creating test data\r\n for k in range(0,len(d), 3):\r\n testD.append(np.array(d[k][0]))\r\n testL.append(np.array(d[k][1]))\r\n #testnp = np.append(testnp, np.array(d[k][0]), axis = 1)\r\n rshp_test = len(testD)\r\n \r\n np.random.shuffle(d)\r\n #Creating training data\r\n for k in range(0,int(len(d)*.70)):\r\n trainD.append(np.array(d[k][0]))\r\n trainL.append(np.array(d[k][1]))\r\n\r\n rshp_train = len(trainD)\r\n \r\n sz = len(testD)\r\n\r\n #print(testDnp)\r\n return(np.array(testD).reshape(rshp_test,1), np.array(testL).reshape(rshp_test,1), sz, np.array(trainD).reshape(rshp_train,1), np.array(trainL).reshape(rshp_train,1))\r\n\r\ndef Not_linSep_data():\r\n #male 0 female 1, 15-110 cm, cats 0 dogs 1\r\n #NOT linearly sperable\r\n p = [[45, 1], [20, 1], [123, 1], [45, 1], \r\n [78, 1], [35, 1], [34, 1], [89, 1], \r\n [69,1], [18, 0], [23, 0], [30, 1], \r\n [20, 0], [19, 0], [18, 1], [16,1], \r\n [32, 0], [25,0], [24, 1], [13,1], \r\n [12,1], [13,1], [134,1], [56,1], \r\n [48,1], [23,0],[23,0], [25,0], \r\n [34,0], [27,0], [16,0], [34, 0], \r\n [13,1], [10, 1], [42,0], [21,0]]\r\n \r\n pets = pan.DataFrame(p, columns =[\"Height\", \"Type\"]) #Create pandas data frame\r\n \r\n #Generates categorical scatter plot (looks like bar chart and scatter plot hybrid)\r\n ls = sns.catplot(x='Type', y=\"Height\", hue = 'Type', data = pets)\r\n ls\r\n \r\n \r\n #Generates categorical scatter plot w/ boundry lines\r\n hs = sns.lmplot(x=\"Type\", y=\"Height\", hue = 'Type', data = pets)\r\n hs\r\n \r\n testD = []\r\n testL = []\r\n trainD =[]\r\n trainL =[]\r\n \r\n \r\n np.random.shuffle(p)\r\n \r\n #Creating training data sets\r\n for k in range(0, int(len(p)*.70)):\r\n trainD.append(np.array(p[k][0]))\r\n trainL.append(np.array(p[k][1]))\r\n \r\n rshp_train = len(trainD)\r\n \r\n #Creating test data sets\r\n for k in range(int(len(p)*.70),len(p)):\r\n testD.append(np.array(p[k][0]))\r\n testL.append(np.array(p[k][1]))\r\n \r\n rshp_test = len(testD)\r\n \r\n sz = len(trainD)\r\n \r\n return(np.array(testD).reshape(rshp_test,1), np.array(testL).reshape(rshp_test,1), sz, np.array(trainD).reshape(rshp_train,1), np.array(trainL).reshape(rshp_train,1))\r\n\r\ndef main():\r\n \"\"\"learning_rate = 0.0003#float(input(\"Enter in learning rate:\"))\r\n \r\n num_epochs = 500#int(input(\"Enter in max epochs:\"))\"\"\"\r\n \r\n print(\"Titanic\")\r\n TD = TitanicData(\"train.csv\", False, False) \r\n \"\"\"Returns tuple of (trainingData, trainingLabels, size, testData, testDataLabels)\"\"\"\r\n \r\n titanic = TD.import_clean_TitanicData() #Returns tuple of (trainingData, trainingLabels, size, testData, testDataLabels)\r\n \r\n print(\"Perceptron\")\r\n \r\n p = Perceptron(titanic[0], titanic[1], 1000, 0.003) #self, tr_inpt, labels, epoch, Lr\r\n print(\"Titanic: Perceptron accuracy\", p.testIt(titanic[3], titanic[4]), \"%\")\r\n \r\n print(\"Adaline Stochastic Gradient Descent\")\r\n \r\n a = AdalineSGD(titanic[0], titanic[1], 1000, 0.0001)\r\n print(\"Titanic: AdalineSGD accuracy\", a.testIt(titanic[3], titanic[4]), \"%\")\r\n \r\n print(\"Linearly Seperable\")\r\n seperable = LinSep_data() #testD, testL, trainD, trainL\r\n\r\n pDogs = Perceptron(seperable[3], seperable[4], 1000, 0.001) #self, tr_inpt, labels, epoch, Lr\r\n print(\"linear: Perceptron accuracy\", pDogs.testIt(seperable[0], seperable[1]), \"%\")\r\n \r\n print(\"Nonlinearly Seperable\") \r\n notSep = Not_linSep_data() #testD, testL, sz, trainD, trainL\r\n \r\n pets = Perceptron(notSep[3], notSep[4], 250, 0.001)\r\n print(\"Nonlinear: Perceptron accuracy\", pets.testIt(notSep[0], notSep[1]), \"%\")\r\n \r\nmain()" }, { "alpha_fraction": 0.613172709941864, "alphanum_fraction": 0.6371084451675415, "avg_line_length": 32.20329666137695, "blob_id": "49e1861c32c5e65e8db90c1e99d764c7872ff5b2", "content_id": "ff312879cfdcbe1a6d1306a11ee61c86d87a62b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6225, "license_type": "no_license", "max_line_length": 129, "num_lines": 182, "path": "/FinalProjectDraft.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 18 21:55:52 2019\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\n#Preprocessing\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import precision_recall_curve\r\nfrom sklearn.metrics import auc\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn import preprocessing\r\n\r\n#Classifiers\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n#import seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n#for data import and visualization\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\n\r\n\r\n\r\n\r\nsns.set_style(\"dark\")\r\ncolors = [\"#800000\", \"#45ada8\", \"#2a363b\", \"#fecea8\", \"#99b898\", \"#e5fcc2\"]\r\nsns.set_palette(sns.color_palette(colors))\r\n\r\nbreast_data = pd.read_csv('data.csv')\r\n#breast_data = breast_data.drop(['ID','Unnamed: 32'],axis=1)\r\n\r\n#drop diagnosis, create X and Y\r\ny = breast_data['Diagnosis']\r\nx_ = breast_data.drop('Diagnosis', axis=1)\r\nx = x_.drop('ID', axis = 1)\r\n\r\n#replace M and B with 1s and 0s\r\ny = y.replace(['M', 'B'], [1, 0])\r\ncolumns = x.columns\r\n\r\nx = x.replace(0, np.nan)\r\n\r\n#replace missing values with mean\r\nfor col in x.columns:\r\n x[col].fillna(x[col].mean(), inplace=True)\r\n\r\n#standardize the dataset to have a mean of 0, allows us to compare different scales\r\nscaler = StandardScaler()\r\nstandardized_data = x.copy()\r\n\r\nstandardized_data[columns] = pd.DataFrame(scaler.fit_transform(standardized_data[columns]))\r\n\r\n#split the dataset, 70% training, 15% test, 15% development\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(x, y,test_size=0.3, random_state = 0)\r\n\r\ndef model_eval(clfr_var):\r\n prediction = clfr_var.predict(X_test)\r\n cm = confusion_matrix(y_test,prediction)\r\n acc_s = accuracy_score(y_test, prediction)\r\n #prc = precision_recall_curve(y_test, prediction, pos_label = 1)\r\n PRE, REC, _ = precision_recall_curve(y_test, prediction, pos_label = 1)\r\n AUC = auc(REC, PRE)\r\n f_s = f1_score(y_test, prediction)\r\n cr = classification_report(y_test, prediction)\r\n \r\n \r\n# print('Accuracy score on test set:', acc_s)\r\n# print('f1 score, combo of precision and recall, on test set:', f_s)\r\n# print('Confusion matrix on test set:', cm)\r\n# print(\"\")\r\n# #print('Precision recall curve on test set:', prc)\r\n# print('Area under the curve', AUC)\r\n \r\n return (AUC, f_s, acc_s, cr, clfr_var)\r\n \r\n\r\ndef mod_select_train(clfr_var, hypprm):\r\n #Grid search\r\n gs = GridSearchCV(clfr_var, param_grid = hypprm, cv = 10, scoring = 'f1', refit = True) #Verbose shows u wats going on\r\n gs.fit(X_train, y_train)\r\n# print('Best score from grid search:', gs.best_score_)\r\n# print('Best parameters', gs.best_params_)\r\n gs = gs.best_estimator_\r\n \r\n #k fold cross validation\r\n avg_acc = cross_val_score(estimator = gs, X = X_train, y = y_train, cv = 10, scoring = 'f1').mean()\r\n# print('Average accuracy on training data', gs, ':', avg_acc)\r\n return gs\r\n\r\ndef SupVM():\r\n print('Trial SVM')\r\n #grid_param = {'C' :[0.1, 1, 5, 10, 50], 'kernel' :['linear', 'rbf']}\r\n #grid_param = {'C' :[1, 5, 10, 50], 'kernel' :['linear', 'rbf']}\r\n grid_param = {'C' :[1], 'kernel' :['linear']}\r\n \r\n #Support vector machines\r\n s_run = SVC()\r\n s_run.fit(X_train, y_train)\r\n \r\n return model_eval(mod_select_train(s_run, grid_param))\r\n \r\ndef lr():\r\n #Logistic Regression\r\n grid_param = {'C' :[0.00001, 0.001, 1, 3, 5, 10, 50, 100, 1000]}\r\n lr_run = LogisticRegression()\r\n lr_run.fit(X_train, y_train)\r\n \r\n return model_eval(mod_select_train(lr_run,grid_param))\r\n \r\ndef SGD():\r\n #Adaline SGD\r\n grid_param = {'penalty' :['l1', 'l2'], 'max_iter':[25, 50, 100]}\r\n sgA_run = SGDClassifier()\r\n sgA_run.fit(X_train, y_train)\r\n #mod_select_train(sgA_run, grid_param)\r\n return model_eval(mod_select_train(sgA_run, grid_param))\r\n\r\ndef nnP():\r\n #Perceptron Neural Network\r\n grid_param = {'hidden_layer_sizes' : [(100,3), (5,2)], 'max_iter':[25, 50, 100], 'solver': ['adam'], 'activation': ['relu'] }\r\n nnP_run = MLPClassifier()\r\n nnP_run.fit(X_train, y_train)\r\n return model_eval(mod_select_train(nnP_run, grid_param))\r\n\r\ndef DT():\r\n grid_param = {'criterion' : ['gini', 'entropy'], 'max_depth' : [2, 4, 7, 10, 15], 'random_state' : [0]}\r\n tree = DecisionTreeClassifier()\r\n tree.fit(X_train, y_train)\r\n \r\n return model_eval(mod_select_train(tree, grid_param))\r\n \r\ndef knn():\r\n grid_param = {'algorithm' : ['brute', 'ball_tree', 'kd_tree'], 'n_neighbors' : [3, 5, 10, 15, 20]}\r\n knn_run = KNeighborsClassifier()\r\n knn_run.fit(X_train, y_train)\r\n return model_eval(mod_select_train(knn_run, grid_param))\r\n \r\ndef classifiers():\r\n \r\n best = [lr(), SGD(), DT(), nnP(), SupVM(), knn()]\r\n #(AUC, f_s, acc_s, cr, clfr_var)\r\n best.sort()\r\n print(\"\")\r\n \r\n \r\n out_file = open('FinalProjectModelEvaluationReport.txt', 'w')\r\n out_file.writelines('From most significant classifier to least' + '\\n')\r\n \r\n while len(best) > 0:\r\n report = best.pop()\r\n out_file.writelines('Classifier w/ optimal hyperameters: ' + '\\n')\r\n out_file.writelines(str(report[-1])+ '\\n')\r\n out_file.writelines('Confusion matrix'+ '\\n')\r\n out_file.writelines(str(report[-2])+ '\\n')\r\n out_file.writelines('Area under Precision and Recall Curve: ' + str(report[0])+ '\\n')\r\n out_file.writelines('F1s score: ' + str(report[1])+ '\\n')\r\n out_file.writelines('Accuracy: ' + str(report[2]) + '\\n')\r\n out_file.writelines('\\n')\r\n \r\n out_file.close()\r\n \r\nclassifiers()\r\n" }, { "alpha_fraction": 0.48958176374435425, "alphanum_fraction": 0.5009886026382446, "avg_line_length": 31.050251007080078, "blob_id": "155dfecfd9d1170d0ac9cfcd4b4be14da16c8054", "content_id": "84e53360af7d54f111a24b96359b5064e2fb074d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6575, "license_type": "no_license", "max_line_length": 148, "num_lines": 199, "path": "/Adaline.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 22 20:55:49 2019\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pan\r\nimport seaborn as sns\r\nimport random\r\n\r\nclass AdalineSGD:\r\n def __init__(self, tr_inpt, labels, epoch, Lr):\r\n self.epoch = epoch\r\n self.tr_inpt = tr_inpt\r\n self.sz = self.tr_inpt.shape[1] #.shape returns a tuple and element at index 1 indicates the length of the row\r\n self.w = self.weights()\r\n self.Lr = Lr\r\n self.labels = labels\r\n self.learning()\r\n \r\n \r\n def weights(self):\r\n self.w = np.random.random(self.sz+1)\r\n\r\n #random.randfl ? generate float of random wieght\r\n return self.w\r\n \r\n def z(self, x):\r\n #generate dot product between w and features x\r\n return np.dot(self.w[1:], x) + self.w[0]\r\n # return np.dot(np.transpose(self.w),x)\r\n \r\n def Id(self,x):\r\n\r\n return self.z(x)\r\n \r\n def learning(self):\r\n self.cost = []\r\n for e in range(self.epoch):\r\n cst = 0\r\n for k in range(self.tr_inpt.shape[0]):\r\n X = self.tr_inpt[k]\r\n error = (self.labels[k]-self.Id(X))\r\n dw = self.Lr*error*X\r\n \r\n self.w[1:] += dw\r\n self.w[0] += self.Lr*error\r\n \r\n cst += .5*(error**2)\r\n \r\n self.cost.append(cst)\r\n \r\n def quantizer(self, z):\r\n if z >= 0:\r\n return 1\r\n elif z < 0:\r\n return 0\r\n \r\n def testIt(self, testDat, testLabels):\r\n test_result = []\r\n right = 0\r\n for k in range(testDat.shape[0]):\r\n z = self.z(testDat[k])\r\n prediction = self.quantizer(z)\r\n \r\n test_result.append(prediction)\r\n \r\n if prediction == testLabels[k]:\r\n right += 1\r\n \r\n return (right/len(test_result))*100\r\n \r\n \r\nclass TitanicData():\r\n def __init__(self, fileName, runStats, runGraphs):\r\n self.fileName = fileName\r\n self.runStats = runStats\r\n self.runGraphs = runGraphs\r\n \r\n self.import_clean_TitanicData()\r\n \r\n if self.runStats:\r\n self.stats()\r\n \r\n if self.runGraphs:\r\n self.scatterPlots()\r\n \r\n \r\n def import_clean_TitanicData(self):\r\n ti_file =pan.read_csv(self.fileName)\r\n \r\n \"\"\"col = list(ti_file.columns) \r\n ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', \r\n 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']\"\"\"\r\n \r\n self.tiDat = ti_file.iloc[:, [0,1,2,4,5,6,7,9]] \r\n \r\n \"\"\"up_col = list(tiDat.columns) #['PassengerId', 'Survived', 'Pclass',\r\n 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\"\"\"\r\n \r\n lbls = []\r\n \r\n for t in self.tiDat.iloc[:,1]:\r\n lbls.append(np.array(t))\r\n \r\n convert = self.tiDat.iloc[:,2:]\r\n #FEMALE IS 0 and MALE IS 1\r\n convert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\n convert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n \r\n \"\"\"colP = list(convert.columns)\r\n ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\"\"\"\r\n \r\n class_avg = convert[\"Pclass\"].mean()\r\n sex_avg = convert['Sex'].mean()\r\n age_avg = convert['Age'].mean()\r\n sib_avg = convert['SibSp'].mean()\r\n parch_avg = convert['Parch'].mean()\r\n fare_avg = convert['Fare'].mean()\r\n \r\n convert[\"Pclass\"].fillna(class_avg, inplace = True)\r\n convert[\"Sex\"].fillna(sex_avg, inplace = True)\r\n convert[\"Age\"].fillna(age_avg, inplace = True)\r\n convert[\"SibSp\"].fillna(sib_avg, inplace = True)\r\n convert[\"Parch\"].fillna(parch_avg, inplace = True)\r\n convert[\"Fare\"].fillna(fare_avg, inplace = True)\r\n \r\n self.size = convert.count().max() \r\n \r\n wholeData = []\r\n #['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n for k in range(self.size):\r\n wholeData.append(np.array(convert.iloc[k,:]))\r\n \r\n train = []\r\n labels = []\r\n random.shuffle(wholeData)\r\n #Create split training data\r\n# \r\n for j in range(1, int(self.size*.70)): #!!! Why is it that when change increment to 3 accuracy goes down to about 40% but is 68% when at 10?\r\n train.append(wholeData[j])\r\n labels.append(lbls[j])\r\n print(len(train))\r\n \r\n #Convert input data into numpy array that is 2d\r\n #Make the training data into numpy arrays\r\n self.trainingData = np.array(train)\r\n self.trainingLabels = np.array(labels)\r\n random.shuffle(wholeData)\r\n \r\n test = []\r\n test_labels = []\r\n \r\n #Create split training data\r\n for j in range(1, int(self.size*.3)):\r\n test.append(wholeData[j])\r\n test_labels.append(lbls[j])\r\n \r\n print(len(test))\r\n \r\n self.testData = np.array(test)\r\n self.testDataLabels = np.array(test_labels)\r\n \r\n return(self.trainingData, self.trainingLabels, self.size, self.testData, self.testDataLabels)\r\n \r\n def stats(self):\r\n dat = self.tiDat.groupby(['Survived', 'Pclass', 'Sex'])['Survived'].count()\r\n print(dat)\r\n \r\n \r\n tisum = self.tiDat['Survived'].sum()\r\n print(\"Total passengers survived\", tisum)\r\n \r\n def scatterPlots(self):\r\n print(\"1 indicates survived, 0 indicates death\")\r\n clas = sns.catplot(x='Pclass', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n clas\r\n \r\n gen = sns.catplot(x='Sex', y=\"PassengerId\", hue = 'Survived', data=self.tiDat)\r\n gen\r\n \r\n age = sns.lmplot(x=\"PassengerId\", y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n age\r\n \r\n gen_age = gen = sns.catplot(x='Sex', y=\"Age\", hue = 'Survived', data=self.tiDat)\r\n gen_age\r\n \r\ndef main():\r\n TD = TitanicData(\"train.csv\", False, False) \r\n \"\"\"Returns tuple of (trainingData, trainingLabels, size, testData, testDataLabels)\"\"\"\r\n titanic = TD.import_clean_TitanicData()\r\n \r\n p = AdalineSGD(titanic[0], titanic[1], 100, 0.0001) #self, tr_inpt, labels, epoch, Lr\r\n print(\"Accuracy\", p.testIt(titanic[3], titanic[4]), \"%\")\r\n \r\nmain()" }, { "alpha_fraction": 0.5287445783615112, "alphanum_fraction": 0.5364171862602234, "avg_line_length": 34.84016418457031, "blob_id": "e6306efb372cf145e66357a93f090a7c91bc2370", "content_id": "0b46b77073fbc0803cb99d4063b16fb288a782f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8993, "license_type": "no_license", "max_line_length": 213, "num_lines": 244, "path": "/tr.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 12 23:57:28 2019\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\n#Preprocessing\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\n\r\n#Classifiers\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC, LinearSVC\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\n#Functions\r\nimport re\r\nimport random\r\nimport math\r\n\r\n#Objects\r\nimport numpy as np\r\nimport pandas as pan\r\n\r\nclass NLP_:\r\n def __init__(self, text1, class1, text2, class2, are_labeled):\r\n \r\n \r\n self.text1 = text1 #Is the first input file\r\n self.text2 = text2 #Is the second input file\r\n \r\n self.class1 = class1 #One unique type class/label for the samples\r\n self.class2 = class2 #One unique type class/label for the samples\r\n \r\n self.C = 0 #Best chosen classifier hyperparameter\r\n self.k = 0 #Best chosen n fold cross validation variable, the fold number\r\n #self.gamma = 0\r\n self.final_hyper = False\r\n \r\n self.are_labeled = are_labeled #Indicates is the input data came w/ labels, if false indicates that lables must be assigned\r\n \r\n #self.run_model(True) #Runs MLA \r\n \r\n \r\n \r\n def proprocessing(self): \r\n \"\"\"Imports, converts, cleans and splits input data for train and test sets\"\"\"\r\n \r\n if not self.are_labeled:\r\n pos = self.label_raw_dat(self.text1, self.class1) #Labels data \r\n neg = self.label_raw_dat(self.text2, self.class2) #Labels data\r\n #print(pos.head())\r\n #print(neg.head())\r\n \r\n dat = pan.concat([pos, neg]) #Concatenates 2 input data sets into one large one\r\n dat['Text'].str.lower() #Converts all words into lowercase characters\r\n dat.replace(to_replace = r'[^a-zA-Z]', value = ' ', regex = True, inplace = True) #Get help w/ Regex!!\r\n \r\n #dat.replace(to_replace = r'[^a-zA-Z0-9]', value = ' ', regex = True, inplace = True)\r\n #dat.replace(r'[^a-zA-Z0-9]', ' ',line)\r\n \r\n self.dat = dat.sample(frac=1).reset_index(drop=True)\r\n \r\n #print(self.dat.head())\r\n \r\n self.labels = np.array(self.dat['Label'])\r\n self.words = np.array(self.dat['Text']) \r\n \r\n self.train_txt, self.test_txt, self.train_label, self.test_label = train_test_split(self.words[:len(self.words)//1], self.labels[:len(self.labels)//1], test_size= .20, train_size = .80, shuffle = True)\r\n \r\n self.num_docs = self.train_txt.shape[0]\r\n \r\n print('Number of total documents w/ in train set', self.num_docs)\r\n \r\n #print(train_txt[1:10])\r\n #print(train_label[1:10])\r\n \r\n return (self.train_txt, self.train_label, self.test_txt, self.test_label) #Return sparse matrix of word frequencies w/ in document\r\n \r\n else: \r\n \r\n print('Must not include labels')\r\n return False\r\n \r\n def label_raw_dat(self, text1, class_):\r\n \"\"\"Transforms/assigns data numeric labels corresponding to string class labels\r\n Returns pandas dataframe w/ data mapped to corresponding class label\"\"\"\r\n \r\n txt = pan.read_csv(text1, sep = '\\n', names = ['Text', 'Label']) #Import and reads file\r\n \r\n if class_ == '+': #Positive class\r\n txt['Label'] = 1 #Assigns corresponding label\r\n \r\n elif class_ == '-': #Negative class\r\n txt['Label'] = 0 #Assigns corresponding label\r\n \r\n return txt \r\n \r\n def vectorizer(self, txt_dat):\r\n \"\"\"Tokenizes the data aka splits txt into list of words\"\"\"\r\n \r\n splt_words = [] #List of all words that appear in data\r\n \r\n for t in txt_dat:\r\n splt_words.extend(t.split()) #Adds words to splt_words\r\n \r\n return splt_words\r\n \r\n def unique_words(self, splt, txt_dat, n, m):\r\n \r\n ps = PorterStemmer() \r\n splt = [ps.stem(w) for w in splt] #List of root words\r\n \r\n stop_words_eng = set(stopwords.words('english')) #Set of english 'filler' words, non indicative words\r\n stop_words_esp = set(stopwords.words('spanish')) #Set of spanish 'filler' words, non indicative words\r\n \r\n stop_words = stop_words_eng.union(stop_words_esp) #Concatinates/united set of 'filler' words\r\n \r\n unique_words = {w for w in splt if w not in stop_words} #List of unique words (no duplicates/repetitions)\r\n \r\n list_unq = []\r\n \r\n #print('unique words', unique_words)\r\n \r\n for key in unique_words:\r\n \r\n \r\n doc_freq = splt.count(key)\r\n \r\n #print('key', key, 'count', doc_freq)\r\n \r\n if n <= doc_freq <= m:\r\n \r\n list_unq.append(key)\r\n\r\n else:\r\n continue\r\n \r\n #print('Unique words dictionary', unq_w_dic)\r\n \r\n #print('List of unique words', list_unq)\r\n \r\n \r\n return list_unq\r\n \r\n def coder(self, dic_keys):\r\n \r\n incode = {}\r\n i = 0\r\n \r\n for key in dic_keys:\r\n incode[key] = i\r\n \r\n return incode\r\n \r\n def elem_freq(self, txt_dat, unique_words, sparse_index):\r\n \"\"\"Converts data into unigram form - sparse matrix where the indices of the words\r\n are indicated by the parameter sparse_index\"\"\"\r\n \r\n print('in')\r\n \r\n doc_frq = dict()\r\n \r\n print('Length unique words', len(unique_words))\r\n \r\n b_of_w = np.zeros((txt_dat.shape[0], len(unique_words))) #Initalize sparse matrix\r\n \r\n ps = PorterStemmer()\r\n txt_dat = [[ps.stem(w) for w in txt.split()] for txt in txt_dat] #Convert input data into matrix of list of root words\r\n \r\n i = 0 #Index of row in sparse matrix b_of_w\r\n \r\n for txt in txt_dat: #Iterate over data\r\n for key in unique_words: #Iterate over key words\r\n #ls = re.escape(str(key))\r\n #print('txt', txt, 'key', key)\r\n #count = len(re.findall(r'\\A'+ re.escape(str(key)) + '.*\\b', txt))\r\n if key in txt:\r\n #count = txt.count(key) #Count how many times key word appears in document txt\r\n index = sparse_index[key] #Index of key word in spare matrix b_of_w\r\n b_of_w[i, index] = 1 #Add count of key word to sparse matrix\r\n else:\r\n index = sparse_index[key] #Index of key word in spare matrix b_of_w\r\n b_of_w[i, index] = 0 #Add count of key word to sparse matrix\r\n \r\n i += 1 #Increment index\r\n \r\n print(\"\")\r\n print('bag of words', b_of_w, len(b_of_w))\r\n \r\n return b_of_w\r\n \r\n \r\n def elem_unigram(self, txt_dat, n, m, if_FinalTrain = False, if_TestSet = False):\r\n \"\"\"Converts input data into proper unigram form depending on what data type\r\n the input data is\"\"\"\r\n \r\n txt_dat, txt_lab, tst_dat, tst_lab = self.proprocessing()\r\n \r\n \r\n split = self.vectorizer(txt_dat)\r\n \r\n \r\n if not if_FinalTrain and not if_TestSet:\r\n unq_list = self.unique_words(split, txt_dat, n, m)\r\n \r\n train_unigram = self.elem_freq(txt_dat, unq_list, self.coder(unq_list))\r\n \r\n return train_unigram\r\n \r\n if if_FinalTrain:\r\n self.unq_list = self.unique_words(split, txt_dat, n, m)\r\n self.decoder_tool = self.coder(self.unq_list)\r\n self.unigram = self.elem_freq(txt_dat, self.unq_list, self.decoder_tool)\r\n print('Pre fitting')\r\n self.r_ = LogisticRegression(C = 100)\r\n self.r_.fit(self.unigram, txt_lab)\r\n print('Post fitting')\r\n \r\n\r\n #return self.unigram\r\n \r\n if if_TestSet:\r\n print('Test')\r\n test_unigram = self.elem_freq(tst_dat, self.unq_list, self.decoder_tool)\r\n prediction = self.r_.predict(test_unigram)\r\n print('Accuracy score!', accuracy_score(prediction, tst_lab))\r\n \r\n return test_unigram\r\n \r\n \r\ndef main():\r\n \r\n run_ = NLP_(\"rt-polarity.pos.txt\", '+', \"rt-polarity.neg.txt\", '-', False)\r\n run_.elem_unigram(None, 50, 700, True, True)\r\n \r\nmain()\r\n " }, { "alpha_fraction": 0.6597937941551208, "alphanum_fraction": 0.6872852444648743, "avg_line_length": 20.33333396911621, "blob_id": "717b7e8d3249ab68a2a100f5f95b3a20330f94b6", "content_id": "46b1fc245c6361bcb7c7dc55533272a96369fc24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 52, "num_lines": 39, "path": "/StandardizeEx.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 12 23:57:28 2019\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\nfrom sklearn import datasets\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\niris = datasets.load_iris()\r\nX = iris.data[:, [2, 3]]\r\ny = iris.target\r\n\r\nprint('Class labels:', np.unique(y))\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n X, y, test_size=0.3, random_state=0)\r\n\r\nprint(X_train.shape)\r\nprint(y_train.shape)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nsc = StandardScaler()\r\nsc.fit(X_train)\r\nprint(\"X_Train\", X_train.shape, X_train.size)\r\nprint()\r\nX_train_std = sc.transform(X_train)\r\nX_test_std = sc.transform(X_test)\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nlr = LogisticRegression(C=1000.0, random_state=0)\r\nlr.fit(X_train_std, y_train)\r\n\r\n" }, { "alpha_fraction": 0.526324987411499, "alphanum_fraction": 0.5390515923500061, "avg_line_length": 27.577320098876953, "blob_id": "4edef472485357e7e88b76a7f0e97e4136af4f1e", "content_id": "340c22e2c1ca996590f123e7b61fc82612e9a05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5736, "license_type": "no_license", "max_line_length": 117, "num_lines": 194, "path": "/hw1.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "#https://www.kaggle.com/c/titanic\r\n#import NumPy\r\n#load dataset into NumPy array and then try to ID the attributes that will lead to the passengers to surive or die\r\n# as long as make resonable attempt to solve it will do fine, will not be grades on completion of solving the problem\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pan\r\n\r\n#conda update pandas\r\n\r\n\r\nti_file =pan.read_csv(\"C:\\\\Users\\\\danie\\\\Documents\\\\School\\\\CS\\\\COMP 379\\Hw\\\\train.csv\")\r\ncol = list(ti_file.columns)\r\nprint(col)\r\n\r\ntiDat = ti_file.iloc[:, [0,1,2,4,5,6,7,9]]\r\nup_col = list(tiDat.columns)\r\n#print(up_col)\r\n#['PassengerId', 'Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\ndef stats():\r\n dat = tiDat.groupby(['Survived', 'Pclass', 'Sex'])['Survived'].count()\r\n print(dat)\r\n \r\n \r\n tisum = tiDat['Survived'].sum()\r\n print(\"Total passengers survived\", tisum)\r\n\r\n#not Survived column is an integer\r\ndef scat():\r\n print(\"\")\r\n print(\"Blue survival, red is death\")\r\n\r\n x = tiDat['PassengerId']\r\n y = tiDat['Pclass']\r\n color = ('red', 'blue')\r\n groups = (int(0), int(1))\r\n plt.scatter(x, y, marker = 'o', label = groups, color = color)\r\n plt.xlabel(\"PassengerId\")\r\n plt.ylabel(\"Pclass\")\r\n plt.show()\r\n \r\n\r\n x = tiDat['Pclass']\r\n y = tiDat['Fare']\r\n color = ('red', 'blue')\r\n groups = (int(0), int(1))\r\n plt.scatter(x, y, marker = 'o', label = groups, color = color)\r\n plt.xlabel(\"Pclass\")\r\n plt.ylabel(\"Fare\")\r\n plt.show()\r\n\r\n x = tiDat['PassengerId']\r\n y = tiDat['Age']\r\n color = ('red', 'blue')\r\n groups = (int(0), int(1))\r\n plt.scatter(x, y, marker = 'o', label = groups, color = color)\r\n plt.xlabel(\"Passenger\")\r\n plt.ylabel(\"Age\")\r\n plt.show()\r\n \r\n \r\n x = tiDat['Age']\r\n y = tiDat['Fare']\r\n color = ('red', 'blue')\r\n groups = (int(0), int(1))\r\n plt.scatter(x, y, marker = 'o', label = groups, color = color)\r\n #plt.scatter(x, y, marker = 'o', label = 'Survived')\r\n #plt.title(\"Pclass\", \"vs. survived\")\r\n plt.xlabel(\"Age\")\r\n plt.ylabel(\"Fare\")\r\n plt.show()\r\n \r\n x = tiDat['Sex']\r\n y = tiDat['Fare']\r\n color = ('red', 'blue')\r\n groups = (int(0), int(1))\r\n plt.scatter(x, y, marker = 'o', label = groups, color = color)\r\n #plt.scatter(x, y, marker = 'o', label = 'Survived')\r\n #plt.title(\"Pclass\", \"vs. survived\")\r\n plt.xlabel(\"Sex\")\r\n plt.ylabel(\"Fare\")\r\n plt.show()\r\n \r\n#Print head of the input data\r\n#print(tiDat.head())\r\n\r\nlbls = []\r\nfor t in tiDat.iloc[:,1]:\r\n lbls.append(np.array(t))\r\n\r\nconvert = tiDat.iloc[:,2:]\r\n#FEMALE IS 0 and MALE IS 1\r\nconvert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\nconvert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n#print(convert.head())\r\ncolP = list(convert.columns)\r\n#['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\nsz = convert.count().max()\r\n\r\ntraining = []\r\n#['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\nfor k in range(sz):\r\n training.append(np.array(convert.iloc[k,:]))\r\n\r\nclass Perceptron:\r\n def __init__(self, tr_inpt,epoch, Lr, labels, si):\r\n self.epoch = epoch\r\n self.tr_inpt = tr_inpt\r\n self.sz = self.tr_inpt.shape[1]\r\n #.shape returns a tuple and I want only the element in the 0 position of that tuple\r\n self.w = self.weights(self.sz)\r\n self.Lr = 0.001\r\n self.labels = labels\r\n\r\n def z_input(self, x):\r\n #generate dot product between w and features x\r\n return np.dot(self.w[1:], x) + self.w[0]\r\n # return np.dot(np.transpose(self.w),x)\r\n \r\n \r\n def weights(self, sz):\r\n #where sz is the size of x (number of x)\r\n self.w = np.random.random(self.sz+1)\r\n #random.randfl ? generate float of random wieght\r\n return self.w\r\n \r\n def predict(self, z):\r\n if z >= 0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n def fit(self, test):\r\n num_rt = []\r\n count = 0\r\n for m in range(self.epoch):\r\n right = 0\r\n update = 0\r\n for k in range(self.tr_inpt.shape[0]): #pick one the number of rows to be length of iteration\r\n self.z_input(self.tr_inpt[k])\r\n z = self.z_input(self.tr_inpt[k])\r\n prediction = self.predict(z)\r\n target = self.labels[k]\r\n #Is this interpretation of updating weights correct?\r\n error = target - prediction\r\n dw = self.Lr*error*self.tr_inpt[k]\r\n print(dw)\r\n self.w[1:] += dw # Is this correct?\r\n self.w[0] += self.Lr*error\r\n #Check....\r\n count += 1\r\n if prediction == target:\r\n right += 1\r\n \r\n num_rt.append(right)\r\n accurate_percent = (sum(num_rt)/count)*100\r\n print(\"Num right\", num_rt)\r\n print(\"Count\", count)\r\n if test:\r\n test_result = []\r\n for k in range(self.tr_inpt.shape[0]):\r\n self.z_input(self.tr_inpt[k])\r\n prediction = self.predict(z)\r\n test_result.append(prediction)\r\n return test_result\r\n else:\r\n return accurate_percent\r\n \r\n\r\ntr1 = []\r\nlb1 = []\r\n\r\n#Create split training data\r\nfor j in range(1, sz, 100):\r\n tr1.append(training[j])\r\n lb1.append(lbls[j])\r\n\r\n#Convert input data into numpy array that is 2d\r\n#Make the training data into numpy arrays\r\nt1 = np.array(tr1)\r\nl1 = np.array(lb1)\r\n\r\ndef main():\r\n #Run the Graphs \r\n\r\n #def __init__(self, tr_inpt,epoch, Lr, labels, si)\r\n p = Perceptron(t1, 100, 0.001, l1, sz)\r\n print(\"Accuracy\", (p.fit(False)), \"%\")\r\n\r\n\r\n\r\nmain()" }, { "alpha_fraction": 0.5707652568817139, "alphanum_fraction": 0.5800520181655884, "avg_line_length": 34.32432556152344, "blob_id": "39a1d3295a43c08a65fcd0a687efde08e504729e", "content_id": "5c2187a568d4eaa6a029ac93af050907d9898dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5384, "license_type": "no_license", "max_line_length": 163, "num_lines": 148, "path": "/Trying.py", "repo_name": "daniellapombo/ML379", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 13 08:34:15 2019\r\n\r\n@author: danie\r\n\"\"\"\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.svm import SVC\r\n#from mlxtend.plotting import plot_decision_regions\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pan\r\n\r\n#Train - fit() , partialfit() - partial training do not do all the training at once\r\ndef run_LinReg(dat, lab, D, dLab, tst, tstLab, graph_decision = False):\r\n #L = LogisticRegression(C = 10, random_state = 1)\r\n L = LogisticRegression()\r\n L.fit(dat, lab)\r\n if graph_decision:\r\n plot_decision_regions(dat, lab, clf = lr) #lr is Logistic regressions\r\n plt.xlabel(\"Passenger Sex & Pclass\")\r\n plt.ylabel(\"Survived\")\r\n plt.legend(loc = \"upper right\")\r\n plt.show()\r\n Lpred = L.predict(D)\r\n score = accuracy_score(Lpred, dLab)\r\n print(\"Logistic Regression Accruacy:\", score)\r\n\r\nclass TitanicData():\r\n def __init__(self, fileName):\r\n self.fileName = fileName #File name as string\r\n \r\n self.import_clean_TitanicData() #Exectues uploading & cleaning of the data\r\n \r\n def import_clean_TitanicData(self):\r\n ti_file =pan.read_csv(self.fileName)\r\n \r\n \"\"\"col = list(ti_file.columns) \r\n ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', \r\n 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']\"\"\"\r\n \r\n self.tiDat = ti_file[['PassengerId', 'Survived', 'Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare']]\r\n \r\n \r\n convert = self.tiDat.iloc[:,1:7] #Creates new data frame w/ features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']\r\n\r\n #Change strings to binary integer counterparts\r\n #FEMALE IS 0 and MALE IS 1\r\n \r\n convert.loc[convert['Sex'] == 'male', 'Sex'] = 0\r\n convert.loc[convert['Sex'] == 'female', 'Sex'] = 1\r\n \r\n \"\"\"colP = list(convert.columns)\r\n ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch']\"\"\"\r\n \r\n #Convert nan values to mean value of given column\r\n \r\n #Find mean value of each column\r\n class_avg = convert[\"Pclass\"].mean()\r\n sex_avg = convert['Sex'].mean()\r\n age_avg = convert['Age'].mean()\r\n sib_avg = convert['SibSp'].mean()\r\n parch_avg = convert['Parch'].mean()\r\n\r\n \r\n #Replace nan value w/ mean values\r\n convert[\"Pclass\"].fillna(class_avg, inplace = True)\r\n convert[\"Sex\"].fillna(sex_avg, inplace = True)\r\n convert[\"Age\"].fillna(age_avg, inplace = True)\r\n convert[\"SibSp\"].fillna(sib_avg, inplace = True)\r\n convert[\"Parch\"].fillna(parch_avg, inplace = True)\r\n \r\n \r\n self.size = convert.count().max() #Find the maximum length of column\r\n \r\n wholeData = convert #Storage cleaned and corrected all of training data w/out labels\r\n \r\n \r\n np.random.shuffle(wholeData.values) #Makes the selection of samples random for creating training data set\r\n #print(wholeData)\r\n \r\n train = [] #Initalize list that will store only a fragement of the training data\r\n\r\n train = wholeData.sample(frac=0.7)\r\n\r\n test = wholeData.loc[~wholeData.index.isin(train.index)] #Take the rest of wholeData that was Not used in train and uses those unuses values to create test\r\n \r\n \r\n #print(train.columns.values) #Prints the values as nparray format\r\n \r\n train_labels = train[\"Survived\"] #Extract training labels and create new np.dataFrame train_labels\r\n \r\n train.drop(\"Survived\", inplace=True, axis=1)\r\n \r\n test_labels = test[\"Survived\"] #Extract testing labels and creates new np.dataFrame test_labels\r\n \r\n test.drop(\"Survived\" , inplace=True, axis=1)\r\n \r\n \r\n dev = test.iloc[0:test.shape[0]//2, :]\r\n \r\n dev_labels = test_labels.iloc[0:test_labels.shape[0]//2]\r\n \r\n test = test.iloc[test.shape[0]//2:, :]\r\n \r\n test_labels = test_labels.iloc[(test_labels.shape[0]//2):]\r\n #print(train)\r\n \r\n train = np.array(train['Sex'])\r\n \r\n train_labels = np.array(train_labels)\r\n \r\n dev = np.array(dev['Sex'])\r\n \r\n dev_labels = np.array(dev_labels)\r\n \r\n test = np.array(test['Sex'])\r\n \r\n test_labels = np.array(test_labels)\r\n\r\n stand = StandardScaler()\r\n stand.fit(train.reshape(-1,1)) #JUSt make sure to reshape w/in or before the fit function\r\n \r\n train_std = stand.transform(train.reshape(-1,1))\r\n #Dont need to do standardization for 0s and 1s cuz its only 0 and 1\r\n# \r\n# stand.fit(test)\r\n# \r\n# test_std = stand.transform(test)\r\n#\r\n#\r\n# stand.fit(dev)\r\n# \r\n# dev_std = stand.transform(dev)\r\n \r\n \r\n #return(train_std, train_labels, dev_std, dev_labels, test_std, test_labels)\r\n \r\n \r\ndef main():\r\n Tipas = TitanicData(\"train.csv\")\r\n passengers = Tipas.import_clean_TitanicData()\r\n #run_LinReg(passengers[0], passengers[1], passengers[2], passengers[3], passengers[4], passengers[5], False)\r\n\r\nmain()\r\n " } ]
9
Cre-Lis/Pyton_01
https://github.com/Cre-Lis/Pyton_01
849f02927c77aab66a12524a766cdb17ff025647
01b62b028363fae7e0965e9186ab444b9b761864
2e5da502940b12ce763a6f0fb9b83bc5c79596b3
refs/heads/main
2023-01-09T15:08:15.890882
2020-11-12T10:36:32
2020-11-12T10:36:32
312,192,435
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.47058823704719543, "avg_line_length": 13.5, "blob_id": "440a157a2afa85984793b068ec598856c5108a4a", "content_id": "705d7a36cd532f0c5b5037563d0980855adf099f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/string_1.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "s = '' # пустая строка\n\nfor x in range (10):\n print(x)\n\n\n\n\n\n \n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 11.25, "blob_id": "516fc44b02e8d3671c775e11b2029f97366491ae", "content_id": "b48c4821f676ab5033e2209b54cf15878c55ba7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 30, "num_lines": 12, "path": "/curcle_01.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "import turtle\nturtle.reset()\nturtle.speed(0)\nturtle.hideturtle()\n\n\n\n \n\nfor y in range (360):\n turtle.circle(150,360,360)\n turtle.right(1)\n" }, { "alpha_fraction": 0.3071160912513733, "alphanum_fraction": 0.36329588294029236, "avg_line_length": 26.55555534362793, "blob_id": "a6293f02b2067a882e4eb44e222c7cfaa5ba0088", "content_id": "3cb962a4e4707af42ddc3b123d60fc29f5c1a2fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/Rebus_05.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "L = 1000 # глубина поиска\r\nfor x in range (-L,L+1):\r\n print(x)\r\n for y in range (-L,L+1):\r\n for z in range (0,L+1):\r\n\r\n #if( x**3 + y**3 + z**3 == 10 ):\r\n if( x*x*x + y*y*y + z*z*z == 10 ):\r\n print(x,y,z)\r\n \r\n" }, { "alpha_fraction": 0.650337815284729, "alphanum_fraction": 0.6993243098258972, "avg_line_length": 41.21428680419922, "blob_id": "2ac7b7a2a84c6d5c71d84a6391cee3e14bc6211d", "content_id": "9605a76fef626ac9a829eaa7ee9200a68e030b5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2391, "license_type": "no_license", "max_line_length": 139, "num_lines": 42, "path": "/timer.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "import time\nimport calendar # печать календарика\n\ncurrentTime = time.time()\n\nprint(\"Секунды с начала эпохи =\", currentTime) # Печатает абсолютное время, выраженное в секундах с начала эпохи\n # 1 января 1970, 00:00:00\n\n# time.sleep(2) # засыпаем на 2 секунды\ntime.sleep(0.1) # засыпаем на 0.1 секунду\n\n\ncurrentTime = time.time()\nprint(\"Секунды с начала эпохи =\", currentTime) # Печатает абсолютное время\n\n\n# local_time = time.ctime(currentTime) # может перевести во временной формат секунды\nlocal_time = time.ctime() # # может перевести во временной формат текущее время\nprint(\"Местное время:\", local_time) # Местное время: Sat Aug 29 09:51:02 2020\n\nresult = time.localtime(currentTime) # Разбирает абсолютное время, выраженное в секундах на части\n# result = time.localtime() # Разбирает текущую дату на части\nprint(\"год\\t\", result.tm_year)\nprint(\"месяц 1-12\\t\", result.tm_mon)\nprint(\"день 1-31\\t\", result.tm_mday)\nprint(\"час 0-23\\t\", result.tm_hour)\nprint(\"минуты 0-59\\t\", result.tm_min)\nprint(\"секунды 0-59\\t\", result.tm_sec)\nprint(\"день недели 0-6\\t\", result.tm_wday) # понедельник 0\nprint(\"день в году 1-366\\t\", result.tm_yday)\nprint(\"0, 1, -1 код DST\\t\", result.tm_isdst) # Целочисленный флаг tm_isdst для учета перехода на летнее время (daylight saving time, DST):\n # 1 – переход на летнее время учитывается, 0 – не учитывается, -1 – неизвестно\n\nt = (2019, 12, 7, 14, 30, 30, 5, 341, 0) # Можно собрать время из частей\nresult = time.asctime(t)\nprint(\"Результат:\", result)\n\ntime_string = \"21 August, 1966\" # Функция strptime() делает разбор строки python, в которой упоминается время и возвращает struct_time\nresult = time.strptime(time_string, \"%d %B, %Y\")\nprint(result)\n\ncalendar.prmonth(2019, 12)\n\n\n\n" }, { "alpha_fraction": 0.32692307233810425, "alphanum_fraction": 0.42307692766189575, "avg_line_length": 19, "blob_id": "0d2fd951243894f6974ce90d92a8a369dc0395a3", "content_id": "948624a7d89f0e78472b31b4fffbbcc8f359f09f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/Rebus_04.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "for x in range (-10,11,2):\r\n print(x)\r\n \r\n" }, { "alpha_fraction": 0.3407643437385559, "alphanum_fraction": 0.4904458522796631, "avg_line_length": 28.600000381469727, "blob_id": "c2f04a08a20ab03bfb088f65ce135df8a14e59d3", "content_id": "c11de94dc56229a8cb0b3065747f2a9b857d501d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 98, "num_lines": 10, "path": "/Rebus_01.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "for k in range (10):\r\n for n in range (10):\r\n for i in range (10):\r\n for g in range (10):\r\n for a in range (10):\r\n for y in range (10):\r\n if( (k*10000 + n*1000 + i*100 + g*10 + a*1)*3 == n*10000 + a*1000 + y*100 + k*10 + a*1 ):\r\n print(k,n,i,g,a,y)\r\n\r\n# Пароль на сообщество 1234 \r\n" }, { "alpha_fraction": 0.5584725737571716, "alphanum_fraction": 0.5966587066650391, "avg_line_length": 17.045454025268555, "blob_id": "9baaaaa6954e7a216c8166c991bde851ab3159af", "content_id": "ccc29691be60dbfb199eb9ecc648de47d674500b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 48, "num_lines": 22, "path": "/square.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "import turtle\r\nturtle.reset()\r\nturtle.speed(0)\r\nturtle.hideturtle()\r\n\r\n\r\ndef draw_square(a): # Рисует квадрат сторона \r\n #turtle.down()\r\n turtle.forward(a) \r\n turtle.right(90) \r\n turtle.forward(a)\r\n turtle.right(90)\r\n turtle.forward(a)\r\n turtle.right(90)\r\n turtle.forward(a)\r\n turtle.right(90)\r\n #turtle.up()\r\n\r\n \r\nfor y in range (360):\r\n draw_square(200)\r\n turtle.right(1)\r\n" }, { "alpha_fraction": 0.6283618807792664, "alphanum_fraction": 0.6503667235374451, "avg_line_length": 23.5625, "blob_id": "082080e4693f799860c6a5427f8d4a2c39faf11f", "content_id": "26d241467fee54ac26c3d92d1184258d18727456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/Comp_Var_02.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "A = {\"Щука\", \"Щука\", \"Карась\", \"Окунь\"} # Множество содержит повторяющиеся элементы\r\nprint(A)\r\nprint(len(A))\r\n\r\nA = {1,2,3,4,5,6} # Множество не содержит повторяющиеся элементы\r\nprint(A)\r\nprint(len(A))\r\n\r\nB = {6,7,8} # Множество не содержит повторяющиеся элементы\r\nprint(B)\r\nprint(len(B))\r\n\r\nprint(A | B) # Объединение множеств\r\nprint(len(A | B))\r\nprint(A & B) # Пересечение множеств\r\nprint(len(A & B))\r\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5568627715110779, "avg_line_length": 13.6875, "blob_id": "91c311d8f2889705f23f39fbc703b25f096bbdbb", "content_id": "704a01bc025689ed179ec3d4453cbb98afa6e7db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/Comp_Var_01.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "a = 4\r\nb = 2\r\nc = 1\r\nd = 4\r\ne = 5\r\nf = 6\r\n\r\n\r\nComp_var = {a,b,c,d,e,f} # Множество не содержит повторяющиеся элементы\r\nprint(Comp_var)\r\nprint(len(Comp_var))\r\n\r\nif ( len(Comp_var) == 6 ):\r\n print(\"Все разные\")\r\nelse:\r\n print(\"Есть одинаковые\") \r\n" }, { "alpha_fraction": 0.4609929025173187, "alphanum_fraction": 0.4903748631477356, "avg_line_length": 20.44186019897461, "blob_id": "e27ae10a20fae8b1953782810a8780652d6784de", "content_id": "0ae2ba7d64d9a2f8c21c6054770adc476568de8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 74, "num_lines": 43, "path": "/test3.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "import turtle\r\nturtle.reset()\r\n\r\ndef draw_square(a, pen, brush): # Рисует квадрат сторона, перо, кисть\r\n\r\n turtle.color(pen, brush) # цвет пера , цвет заливки\r\n turtle.down();\r\n turtle.begin_fill();\r\n turtle.forward(a); \r\n turtle.right(90); \r\n turtle.forward(a)\r\n turtle.right(90)\r\n turtle.forward(a)\r\n turtle.right(90)\r\n turtle.forward(a)\r\n turtle.right(90)\r\n turtle.end_fill()\r\n turtle.up()\r\n\r\nfill = 0 # переключатель типа заливки\r\nPen = \"black\" # цвет пера\r\n\r\n\r\nfor y in range (0 , 320, 40):\r\n\r\n if fill == 0:\r\n fill = 1\r\n Brush = \"red\"\r\n else:\r\n fill = 0\r\n Brush = \"white\"\r\n \r\n for x in range (0 , 320, 40):\r\n \r\n turtle.goto(x,y) \r\n draw_square(30, Pen, Brush) # Рисует квадрат сторона, перо, кисть\r\n \r\n if fill == 0:\r\n fill = 1\r\n Brush = \"red\"\r\n else:\r\n fill = 0\r\n Brush = \"white\"\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 22, "blob_id": "00d871744f31a45b83f5cf411d16cf3fb08c0998", "content_id": "e3baa94c4ea69927e92b8f9e24454b5945ce001e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 23, "num_lines": 3, "path": "/range_1.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "\nprint(list(range(10)))\nprint(tuple(range(10)))\nprint(set(range(10)))\n\n\n\n \n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 11.5, "blob_id": "af3cc4532122b9711544e0101775cd03d15fe47a", "content_id": "0ade5a3e30e3943fc51e1bde10be8cffd5637477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/test4.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "for i in 'mo':\n print(i) \n" }, { "alpha_fraction": 0.44999998807907104, "alphanum_fraction": 0.550000011920929, "avg_line_length": 2.3333332538604736, "blob_id": "7d91960a97869cd7737d7f6544120d1f5822c92c", "content_id": "13095a18c1d33e4cfb937b6710d464c2e3912cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 10, "num_lines": 6, "path": "/README.md", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "# Pyton_01\n\n\n\n\nRead\n" }, { "alpha_fraction": 0.31908831000328064, "alphanum_fraction": 0.4415954351425171, "avg_line_length": 29.18181800842285, "blob_id": "794872256362c861a828a479382bd7a26981dd0d", "content_id": "3fc72eacac42e7614b5aa128354604425031c968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 92, "num_lines": 11, "path": "/Rebus_03.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "for a in range (10):\r\n for b in range (10):\r\n for c in range (10):\r\n for d in range (10):\r\n for e in range (10):\r\n for f in range (10):\r\n if( (a+b+c+d+e+f == 13) and\r\n ( (a*100000 + b*10000 + c*1000 + d*100 + e*10 + f*1 + 1) % 13 == 0 ) ):\r\n print(a,b,c,d,e,f)\r\n\r\n# Пароль на сообщество 1234 \r\n" }, { "alpha_fraction": 0.3378378450870514, "alphanum_fraction": 0.4189189076423645, "avg_line_length": 15, "blob_id": "2bd660b424e5ffee874b4a97b2adde55c1b0971e", "content_id": "735469e22d8e8eac45e5f81021b8a2df240b7ae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/for_2.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "s = '' # пустая строка\n\nfor x in [ 1, 3, 456, 1]:\n print(x)\n\n\n\n\n\n \n" }, { "alpha_fraction": 0.4747474789619446, "alphanum_fraction": 0.47727271914482117, "avg_line_length": 16, "blob_id": "e04c2e4781a583941dce12a8ef9951ebda821aa0", "content_id": "fcec6976564b7d71994c6238d3013ccf7237bd2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 43, "num_lines": 23, "path": "/dict_01.py", "repo_name": "Cre-Lis/Pyton_01", "src_encoding": "UTF-8", "text": "my_dict = {\"собака\":\"dog\", \"кот\":\"cat\",\n \"лиса\":\"fox\",\n\"медведь\":\"bear\",\n \"слон\":\"elefant\",\n\"слон\":\"ele\",\n \"слон\":\"elefa\",\n \"кенгуру\":\"kenguru\",}\n\n\nwhile (1):\n\n print(\"введите слово:\")\n word = input()\n\n if word == \"стоп\":\n break\n\n if word in my_dict:\n print( my_dict[word] )\n print(\"\")\n else:\n print(\"слово не найдено\")\n print(\"\")\n\n\n\n\n\n" } ]
16
cldavis3burnsmcd/FE-EMS_Dump
https://github.com/cldavis3burnsmcd/FE-EMS_Dump
f6f0045848bc562c4afa4d6ba25028eed0da1d9c
013244b616d15d3bf5a3e202b6d74df62be9ce1a
fc68b9d2b7dc72df9229a3f60f88637691221247
refs/heads/master
2020-03-24T09:08:51.010595
2018-06-18T21:43:00
2018-06-18T21:43:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5220018029212952, "alphanum_fraction": 0.5361586809158325, "avg_line_length": 40.53932571411133, "blob_id": "1a2654b6effe408edad06cd2b0278abbdfa5311f", "content_id": "4ce1a799c421da815ad42df372b0a6d5bf73c561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11090, "license_type": "no_license", "max_line_length": 202, "num_lines": 267, "path": "/EMS_Parser.py", "repo_name": "cldavis3burnsmcd/FE-EMS_Dump", "src_encoding": "UTF-8", "text": "import os\nimport time\nimport xlrd\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename\n\ndef grab_rtu_list(worksheet):\n rtus_raw = worksheet.col(2)\n rtus = []\n for rtu in rtus_raw:\n if rtu.value != '':\n rtus.append(rtu.value)\n rtus.pop(0)\n rtus.sort()\n #print(rtus)\n return rtus\n\n\ndef status_parse(region, date, worksheet, rtus):\n rtu_dict = {}\n for rtu in rtus:\n rtu_dict[rtu] = []\n\n for i, entry in enumerate(worksheet.col(1)):\n if i:\n try:\n rtu_dict[entry.value].append(i)\n except:\n pass\n\n for i, rtu in enumerate(rtus):\n print('status {}/{}'.format(i+1, len(rtus)))\n # print(rtu)\n outfile_dir = 'Z:\\\\Clients\\\\TND\\\\FirstEnr\\\\82568_EtfScadaSupprt\\\\Design\\\\Substation Projects\\\\EMS MODEL SCREEN DUMPS\\\\' + region + '\\\\' + rtu + '\\\\'\n if not os.path.exists(outfile_dir):\n os.makedirs(outfile_dir)\n\n outfile_name = date + '_' + rtu + '_STATUS.csv'\n outfile = outfile_dir + outfile_name\n\n # print(rtu_dict[rtu])\n with open(outfile, 'w+') as output_file:\n output_file.write('STATION, RTU, TYPE_RTU, RTU_STATUS, PHYADR, EMS POINT, PRI SITE, SEC SITE2, SINVT, XINVT, MCD, CONCAT, CONV, ID_DEVICE (short), NAME_DEVICE (descriptive)\\n')\n for row in rtu_dict[rtu]:\n output_file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\\n'.format(\n worksheet.cell(row, 0).value,\n worksheet.cell(row, 1).value,\n worksheet.cell(row, 2).value,\n worksheet.cell(row, 3).value,\n worksheet.cell(row, 4).value,\n worksheet.cell(row, 5).value,\n worksheet.cell(row, 6).value,\n worksheet.cell(row, 7).value,\n worksheet.cell(row, 8).value,\n worksheet.cell(row, 9).value,\n worksheet.cell(row, 10).value,\n worksheet.cell(row, 11).value,\n worksheet.cell(row, 12).value,\n worksheet.cell(row, 13).value,\n worksheet.cell(row, 14).value,\n ))\n # print(rtu + ' completed')\n\n\ndef control_parse(region, date, worksheet, rtus):\n rtu_dict = {}\n for rtu in rtus:\n rtu_dict[rtu] = []\n\n for i, entry in enumerate(worksheet.col(1)):\n if i:\n try:\n rtu_dict[entry.value].append(i)\n except:\n pass\n\n for i, rtu in enumerate(rtus):\n print('control {}/{}'.format(i + 1, len(rtus)))\n outfile_dir = 'Z:\\\\Clients\\\\TND\\\\FirstEnr\\\\82568_EtfScadaSupprt\\\\Design\\\\Substation Projects\\\\EMS MODEL SCREEN DUMPS\\\\' + region + '\\\\' + rtu + '\\\\'\n if not os.path.exists(outfile_dir):\n os.makedirs(outfile_dir)\n\n outfile_name = date + '_' + rtu + '_CONTROL.csv'\n outfile = outfile_dir + outfile_name\n\n with open(outfile, 'w+') as output_file:\n output_file.write('STATION,RTU,TYPE_RTU,RTU CONTROL,CONTROL,PHYADR_RELAY,EMS CONTROL,ID_CTRL,CTRLFUNC,COMMAND,SEXP,OPTIME,WAIT,TIMEOUT,ID_DEVICE (short),NAME_DEVICE (descriptive)\\n')\n for row in rtu_dict[rtu]:\n output_file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\\n'.format(\n worksheet.cell(row, 0).value,\n worksheet.cell(row, 1).value,\n worksheet.cell(row, 2).value,\n worksheet.cell(row, 3).value,\n worksheet.cell(row, 4).value,\n worksheet.cell(row, 5).value,\n worksheet.cell(row, 6).value,\n worksheet.cell(row, 7).value,\n worksheet.cell(row, 8).value,\n worksheet.cell(row, 9).value,\n worksheet.cell(row, 10).value,\n worksheet.cell(row, 11).value,\n worksheet.cell(row, 12).value,\n worksheet.cell(row, 13).value,\n worksheet.cell(row, 14).value,\n ))\n\n\ndef analog_parse(region, date, worksheet, rtus):\n rtu_dict = {}\n for rtu in rtus:\n rtu_dict[rtu] = []\n\n for i, entry in enumerate(worksheet.col(1)):\n if i:\n try:\n rtu_dict[entry.value].append(i)\n except:\n pass\n\n for i, rtu in enumerate(rtus):\n print('analog {}/{}'.format(i + 1, len(rtus)))\n outfile_dir = 'Z:\\\\Clients\\\\TND\\\\FirstEnr\\\\82568_EtfScadaSupprt\\\\Design\\\\Substation Projects\\\\EMS MODEL SCREEN DUMPS\\\\' + region + '\\\\' + rtu + '\\\\'\n if not os.path.exists(outfile_dir):\n os.makedirs(outfile_dir)\n\n outfile_name = date + '_' + rtu + '_ANALOG.csv'\n outfile = outfile_dir + outfile_name\n\n with open(outfile, 'w+') as output_file:\n output_file.write('STATION,RTU,TYPE_RTU,RTU ANALOG,PHYADR,EMS ANALOG,PRI SITE,SEC SITE2,loreas,hireas,RAW LOW,RAW HIGH,ENG LOW,ENG HIGH,NEGATE,ID_DEVICE (short),NAME_DEVICE (descriptive)\\n')\n for row in rtu_dict[rtu]:\n output_file.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\\n'.format(\n worksheet.cell(row, 0).value,\n worksheet.cell(row, 1).value,\n worksheet.cell(row, 2).value,\n worksheet.cell(row, 3).value,\n worksheet.cell(row, 4).value,\n worksheet.cell(row, 5).value,\n worksheet.cell(row, 6).value,\n worksheet.cell(row, 7).value,\n worksheet.cell(row, 8).value,\n worksheet.cell(row, 9).value,\n worksheet.cell(row, 10).value,\n worksheet.cell(row, 11).value,\n worksheet.cell(row, 12).value,\n worksheet.cell(row, 13).value,\n worksheet.cell(row, 14).value,\n worksheet.cell(row, 15).value,\n worksheet.cell(row, 16).value,\n ))\n\n\ndef accum_parse(region, date, worksheet, rtus):\n rtu_dict = {}\n for rtu in rtus:\n rtu_dict[rtu] = []\n\n for i, entry in enumerate(worksheet.col(1)):\n if i:\n try:\n rtu_dict[entry.value].append(i)\n except:\n pass\n\n for i, rtu in enumerate(rtus):\n print('accumulator {}/{}'.format(i + 1, len(rtus)))\n outfile_dir = 'Z:\\\\Clients\\\\TND\\\\FirstEnr\\\\82568_EtfScadaSupprt\\\\Design\\\\Substation Projects\\\\EMS MODEL SCREEN DUMPS\\\\' + region + '\\\\' + rtu + '\\\\'\n if not os.path.exists(outfile_dir):\n os.makedirs(outfile_dir)\n\n outfile_name = date + '_' + rtu + '_ACCUM.csv'\n outfile = outfile_dir + outfile_name\n\n with open(outfile, 'w+') as output_file:\n output_file.write('STATION,RTU,TYPE_RTU,RTU ACCUMULATOR,PHYADR_PULSE,EMS ACCUMULATOR,PRI SITE,SEC SITE2,SCALE_PULSE,ID_DEVICE (short),NAME_DEVICE (descriptive)\\n')\n for row in rtu_dict[rtu]:\n output_file.write('{},{},{},{},{},{},{},{},{},{},{}\\n'.format(\n worksheet.cell(row, 0).value,\n worksheet.cell(row, 1).value,\n worksheet.cell(row, 2).value,\n worksheet.cell(row, 3).value,\n worksheet.cell(row, 4).value,\n worksheet.cell(row, 5).value,\n worksheet.cell(row, 6).value,\n worksheet.cell(row, 7).value,\n worksheet.cell(row, 8).value,\n worksheet.cell(row, 9).value,\n worksheet.cell(row, 10).value,\n ))\n\n\ndef anout_parse(region, date, worksheet, rtus):\n rtu_dict = {}\n for rtu in rtus:\n rtu_dict[rtu] = []\n\n for i, entry in enumerate(worksheet.col(1)):\n if i:\n try:\n rtu_dict[entry.value].append(i)\n except:\n pass\n\n for i, rtu in enumerate(rtus):\n print('analog out {}/{}'.format(i + 1, len(rtus)))\n outfile_dir = 'Z:\\\\Clients\\\\TND\\\\FirstEnr\\\\82568_EtfScadaSupprt\\\\Design\\\\Substation Projects\\\\EMS MODEL SCREEN DUMPS\\\\' + region + '\\\\' + rtu + '\\\\'\n if not os.path.exists(outfile_dir):\n os.makedirs(outfile_dir)\n\n outfile_name = date + '_' + rtu + '_ANOUT.csv'\n outfile = outfile_dir + outfile_name\n\n with open(outfile, 'w+') as output_file:\n output_file.write('STATION,RTU,TYPE_RTU,RTU ACCUMULATOR,PHYADR_PULSE,EMS ACCUMULATOR,PRI SITE,SEC SITE2,SCALE_PULSE,ID_DEVICE (short),NAME_DEVICE (descriptive)\\n')\n for row in rtu_dict[rtu]:\n output_file.write('{},{},{},{},{},{},{},{}\\n'.format(\n worksheet.cell(row, 0).value,\n worksheet.cell(row, 1).value,\n worksheet.cell(row, 2).value,\n worksheet.cell(row, 3).value,\n worksheet.cell(row, 4).value,\n worksheet.cell(row, 5).value,\n worksheet.cell(row, 6).value,\n worksheet.cell(row, 7).value,\n ))\n\n\ndef ems_parse(region, date, workbook, rtus):\n status_parse(region, date, workbook.sheet_by_name('BMCD_STATUS'), rtus)\n control_parse(region, date, workbook.sheet_by_name('BMCD_CONTROL'), rtus)\n analog_parse(region, date, workbook.sheet_by_name('BMCD_ANALOG'), rtus)\n accum_parse(region, date, workbook.sheet_by_name('BMCD_ACCUM'), rtus)\n anout_parse(region, date, workbook.sheet_by_name('BMCD_ANOUT'), rtus)\n\n\nif __name__ == '__main__':\n s_time = time.time()\n file_dir = Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n file_full_path = askopenfilename(title='Select EMS Dump Excel file') # show an \"Open\" dialog box and return the path to the selected file\n # file_full_path = 'Z:/Clients/TND/FirstEnr/82568_EtfScadaSupprt/Design/Substation Projects/EMS MODEL SCREEN DUMPS/20180301 - SOUTH - SNAPSHOT - TELEMETRY CROSS-REF'\n # file_full_path = 'C:/Users/machristiansen/Desktop/20180301 - SOUTH - SNAPSHOT - TELEMETRY CROSS-REF.xlsx'\n\n #find where excel file name starts and grab the file name + date of EMS upload dump\n fname_index = file_full_path.rfind('/')\n filename = file_full_path[fname_index+1:]\n dump_date = filename[:8]\n\n #decide if east, west, or south\n if filename[11:12] == 'E':\n ems_region = 'EAST'\n elif filename[11:12] == 'W':\n ems_region = 'WEST'\n else:\n ems_region = 'SOUTH'\n\n #load EMS dump excel workbook and create List of RTU names\n print('Opening workbook...')\n wbook = xlrd.open_workbook(file_full_path)\n wsheet = wbook.sheet_by_index(0)\n print('Grabbing RTU list...')\n rtu_list = grab_rtu_list(wsheet)\n\n #parse through dump file for each RTU\n print('Beginning parse of spreadsheet...')\n ems_parse(ems_region, dump_date, wbook, rtu_list)\n f_time = time.time()\n print(f_time-s_time)" } ]
1
kyclark/configcode
https://github.com/kyclark/configcode
c0b0cdea957f26ceadf0f1118836c8736ec3e135
ca060d5a6a00d64a277165302f90691067a9925e
3cd1052f5b288d15e88b24cf5132dbeeddf47148
refs/heads/master
2022-11-15T11:49:03.412872
2020-07-08T19:57:01
2020-07-08T19:57:01
277,911,360
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 30, "blob_id": "77ac19d4c4416398158d876b02190abf5283552d", "content_id": "14be84124456a4c30afe4f93a99a9f3d448b6ba2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 62, "license_type": "permissive", "max_line_length": 55, "num_lines": 2, "path": "/dhall/Makefile", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "json:\n\tdhall-to-json --file config.dhall --output config.json\n" }, { "alpha_fraction": 0.7028571367263794, "alphanum_fraction": 0.7028571367263794, "avg_line_length": 28.08333396911621, "blob_id": "6acbd97d4db6ea7e7dddc689615730c62e7edd66", "content_id": "0684894f09b0387f954c72180a41112a1deaa2d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "permissive", "max_line_length": 52, "num_lines": 12, "path": "/type/test.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "import algorithm\nfrom config import Config\nfrom typing import List\n\ndef test_config():\n conf = algorithm.config()\n assert conf\n assert type(conf) == Config\n assert type(conf.version) == str\n assert type(conf.algorithm_author) == list\n assert type(conf.algorithm_author_email) == list\n assert type(conf.write_betydb_csv) == bool\n\n" }, { "alpha_fraction": 0.7010752558708191, "alphanum_fraction": 0.7010752558708191, "avg_line_length": 24.83333396911621, "blob_id": "f157534a96413dc90cc8bc272cc91b6d4aaf81df", "content_id": "54707d31b735c1ebb3423dbd97d8e540469a0168", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "permissive", "max_line_length": 37, "num_lines": 18, "path": "/type/config.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "from typing import NamedTuple, List\n\n\nclass Config(NamedTuple):\n version: str\n algorithm_author: List[str]\n algorithm_author_email: List[str]\n algorithm_contributors: List[str]\n algorithm_name: str\n algorithm_description: str\n citation_author: str\n citation_title: str\n citation_year: str\n variable_names: List[str]\n variable_units: List[str]\n variable_labels: List[str]\n write_betydb_csv: bool\n write_geostreams_csv: bool\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.724252462387085, "avg_line_length": 29.100000381469727, "blob_id": "bcd67390d28d8f2eceb32f27e200834233a4d897", "content_id": "01721ae38cd47f74fe73dc7c687a208bcedc5418", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "permissive", "max_line_length": 63, "num_lines": 10, "path": "/type/use_type.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport algorithm\n\nconfig = algorithm.config()\ntmpl = '{:25} => {}'\nprint(tmpl.format('VERSION', config.version))\nprint(tmpl.format('AUTHOR', config.author))\nprint(tmpl.format('AUTHOR_EMAIL', config.author_email))\nprint(tmpl.format('WRITE_BETYDB_CSV', config.write_betydb_csv))\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16, "blob_id": "b2e82fc2c3a68efc8ca84c93d956ad757de27f2b", "content_id": "dba9d03ccdefcd864987f5a247fe0d539cd39d8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 85, "license_type": "permissive", "max_line_length": 51, "num_lines": 5, "path": "/testing/Makefile", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "test:\n\tpytest -xv test.py\n\njson:\n\tdhall-to-json --file meta.dhall --output meta.json\n" }, { "alpha_fraction": 0.7352941036224365, "alphanum_fraction": 0.75, "avg_line_length": 12.600000381469727, "blob_id": "bbe65b9be86700c96b7d843daa8dfd1e976df0be", "content_id": "1b5a1e3dd02351a5f6ca2909ba51dd3168437552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "permissive", "max_line_length": 25, "num_lines": 5, "path": "/json1/main.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport algorithm\n\nprint(algorithm.config())\n" }, { "alpha_fraction": 0.3833162784576416, "alphanum_fraction": 0.42118731141090393, "avg_line_length": 45.52381134033203, "blob_id": "a8dbd3dca3a3ec04dd20c65034c32f5d80e566df", "content_id": "a6bac1759383dfa9aa8c067664c48a35bdeb2563", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1954, "license_type": "permissive", "max_line_length": 76, "num_lines": 42, "path": "/type/algorithm.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "from config import Config\n\n\ndef config() -> Config:\n return Config(version='1.0',\n algorithm_author=[\n 'Chris Schnaufer', 'Clairessa Brown', 'David Lebauer'\n ],\n algorithm_author_email=[\n '[email protected]',\n '[email protected]',\n '[email protected]'\n ],\n algorithm_contributors=[\"Jacob van der Leeuw\"],\n algorithm_name='Greenness Transformer',\n algorithm_description=(\n 'This algorithm performs a variety of '\n 'calculations using RGB pixels from images in order '\n 'to assess plant and crop health and growth'),\n citation_author='Clairessa Brown',\n citation_title='Woebbecke, D.M. et al',\n citation_year='2020',\n variable_names=[\n 'excess greenness index', 'green leaf index', 'cive',\n 'normalized difference index', 'excess red', 'exgr',\n 'combined indices 1', 'combined indices 2',\n 'vegetative index', 'normalized green-red difference',\n 'percent green'\n ],\n variable_units=[\n '[-510:510]', '[-1:1]', '[-255:255]', '[-127:129]',\n '[-255:255]', '[-255:332]', '[-1000:1000]',\n '[-1000:1000]', '[-255:255]', '[-255:255]', '[0:100]'\n ],\n variable_labels=[\n 'excess_greenness_index', 'green_leaf_index', 'cive',\n 'normalized_difference_index(pxarray)', 'excess_red',\n 'exgr', 'combined_indices_1', 'combined_indices_2',\n 'vegetative_index', 'ngrdi', 'percent_green'\n ],\n write_betydb_csv=True,\n write_geostreams_csv=True)\n" }, { "alpha_fraction": 0.6504854559898376, "alphanum_fraction": 0.6504854559898376, "avg_line_length": 21.88888931274414, "blob_id": "19af04c47ada7e297e39663d0adb399eb001537e", "content_id": "bdbe9975cb59ac0773d25d083c7f92e9e73693f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "permissive", "max_line_length": 65, "num_lines": 9, "path": "/json2/algorithm.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "import json\nimport os\nfrom config import Config\n\n\ndef config() -> Config:\n file = os.path.join(os.path.dirname(__file__), 'config.json')\n with open(file) as fh:\n return Config(**json.load(fh))\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 20, "blob_id": "3ada42517438fe439c40663d393fbcadf5ae8a70", "content_id": "7d799aa3d0e6a6b58e9e084f9b26aba6cce72240", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "permissive", "max_line_length": 65, "num_lines": 8, "path": "/json1/algorithm.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "import json\nimport os\n\n\ndef config() -> dict:\n file = os.path.join(os.path.dirname(__file__), 'config.json')\n with open(file) as fh:\n return json.load(fh)\n" }, { "alpha_fraction": 0.6962025165557861, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 18.75, "blob_id": "ac86a8fe3a8dfc185586c66300813504bcae926f", "content_id": "c4c2ea9c81f092cfdee6abbec6b97286fec377d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "permissive", "max_line_length": 35, "num_lines": 8, "path": "/testing/config.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "from typing import NamedTuple, List\n\n\nclass Config(NamedTuple):\n version: str\n author: List[str]\n author_email: List[str]\n write_betydb_csv: bool\n" }, { "alpha_fraction": 0.6352941393852234, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 33, "blob_id": "9e068a77fdcdc051cb44843172e7766d97d116b5", "content_id": "00c1847cc97bd839deb468a5791133a8c1bcf032", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "permissive", "max_line_length": 47, "num_lines": 5, "path": "/attr/algorithm.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "VERSION = '1.0'\nALGORITHM_AUTHOR = 'Chris Schnaufer, Ken Youens-Clark'\n# ALGORITHM_AUTHOR_EMAIL = '[email protected], [email protected]'\nALGORITHM_AUTHOR_EMAIL = ['[email protected]', '[email protected]']\nWRITE_BETYDB_CSV = True\n" }, { "alpha_fraction": 0.4884755313396454, "alphanum_fraction": 0.4896886348724365, "avg_line_length": 28.094118118286133, "blob_id": "9d8899ac7edbb208e797713596f6c82b9b3d5c28", "content_id": "762112ed2b2e56e1c449ddc9f0152655f2680411", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2473, "license_type": "permissive", "max_line_length": 77, "num_lines": 85, "path": "/testing/testing.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"\nTest algorithm_rgb\n\"\"\"\n\nimport algorithm_rgb\nimport argparse\nimport osgeo.gdal as gdal\nimport numpy as np\nimport sys\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Test algorithm',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('file',\n help='Input file(s)',\n metavar='FILE',\n type=argparse.FileType('r'),\n nargs='+')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n\n for fh in args.file:\n fh.close()\n run_test(fh.name)\n\n\n# --------------------------------------------------\ndef run_test(filename):\n \"\"\"Runs the extractor code using pixels from the file\n Args:\n filename(str): Path to image file\n Return:\n The result of calling the extractor's calculate() method\n Notes:\n Assumes the path passed in is valid. An error is reported if\n the file is not an image file.\n \"\"\"\n try:\n if fh := gdal.Open(filename):\n # Get the pixels and call the calculation\n pix = np.array(fh.ReadAsArray())\n calc_val = algorithm_rgb.calculate(np.rollaxis(pix, 0, 3))\n\n # Check for unsupported types\n if isinstance(calc_val, set):\n raise RuntimeError(\n \"A 'set' type of data was returned and isn't supported. \"\n \"Please use a list or a tuple instead\"\n )\n\n # Perform any type conversions to a printable string\n if isinstance(calc_val, str):\n print_val = calc_val\n else:\n # Check if the return is iterable and \n # comma separate the values if it is\n try:\n _ = iter(calc_val)\n print_val = \",\".join(map(str, calc_val))\n except Exception:\n print_val = str(calc_val)\n\n print(filename + \",\" + print_val)\n except Exception as ex:\n sys.stderr.write(\"Exception caught: \" + str(ex) + \"\\n\")\n sys.stderr.write(\" File: \" + filename + \"\\n\")\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.43020594120025635, "alphanum_fraction": 0.43135011196136475, "avg_line_length": 22.62162208557129, "blob_id": "98d274a609877bcb553305f31feca4c21059dff9", "content_id": "fec06bf3351edacde3de07f34d26e0870a6cc69a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "permissive", "max_line_length": 63, "num_lines": 37, "path": "/json1/use_json.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\" Use JSON config \"\"\"\n\nimport argparse\nimport json\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Use JSON config',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-f',\n '--file',\n help='JSON file',\n metavar='FILE',\n type=argparse.FileType('rt'),\n default='config.json')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n config = json.load(args.file)\n print(config)\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6112679839134216, "alphanum_fraction": 0.6309170126914978, "avg_line_length": 37.07628631591797, "blob_id": "93e737e287ba932d86ac45db948d353590db4562", "content_id": "c7bcf5dc4bb514303d40c8b5084d12b390357105", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 41427, "license_type": "permissive", "max_line_length": 564, "num_lines": 1088, "path": "/README.adoc", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "= Configuration and Testing Ideas for algorithm_rbg.py\n\nA review of the code and tests in https://github.com/AgPipeline/transformer-rgb-indices with suggestions for using `pytest` to create unit tests as well as using the `typing` module to create classes/types to more finely describe data.\n\t\n== Encoding metainformation\n\nCurrently the `algorithm_rbg` uses top-level, global variables to define meta-information about the module, e.g.:\n\n----\n# Definitions\nVERSION = '1.0'\n\n# Information on the creator of this algorithm\nALGORITHM_AUTHOR = 'Chris Schnaufer, Clairessa Brown, David Lebauer'\nALGORITHM_AUTHOR_EMAIL = '[email protected], [email protected], [email protected]'\nALGORITHM_CONTRIBUTORS = [\"Jacob van der Leeuw\"]\n----\n\nQuestion: Why is `ALGORITHM_CONTRIBUTORS` defined as a `list` but `ALGORITHM_AUTHOR`/`ALGORITHM_AUTHOR_EMAIL` are defined as a `str`?\nIt would seem all of these would be lists.\n\nOne immediate danger is that these are still *variables* and as such are *mutable*:\n\n----\n>>> import algorithm\n>>> algorithm.VERSION\n'1.0'\n>>> algorithm.VERSION = 'foobar'\n>>> algorithm.VERSION\n'foobar'\n----\n\nNOTE: There are many instances of lines exceeding 79 characters in length such as the `ALGORITHM_AUTHOR_EMAIL` or this:\n\n----\nALGORITHM_DESCRIPTION = 'This algorithm performs a variety of calculations using RGB pixels from images in order' \\\n 'to assess plant and crop health and growth'\n----\n\nAccording to PEP8 (https://www.python.org/dev/peps/pep-0008/#maximum-line-length): \"Limit all lines to a maximum of 79 characters.\"\n\nOne way to fix this is to use the implicit concatenation of adjacent strings (https://www.python.org/dev/peps/pep-3126/):\n\n----\nALGORITHM_DESCRIPTION = ('This algorithm performs a variety of '\n 'calculations using RGB pixels from images in order'\n 'to assess plant and crop health and growth')\n----\n\nOr use `+` to join shorter strings:\n\n----\nALGORITHM_DESCRIPTION = 'This algorithm performs a variety of ' + \\\n 'calculations using RGB pixels from images in order' + \\\n 'to assess plant and crop health and growth'\n----\n\nOr a join:\n\n----\nALGORITHM_DESCRIPTION = ''.join([\n 'This algorithm performs a variety of'\n 'calculations using RGB pixels from images in order'\n 'to assess plant and crop health and growth'])\n----\n\nTo access these values, the `testing.py` program uses the `hasattr()` function, e.g.:\n\n----\nif not hasattr(algorithm_rgb, 'VARIABLE_NAMES')\n----\n\nThe `hasattr()` function is problematic as it calls `getattr()` and checks for an exception.\nThe article https://hynek.me/articles/hasattr/ does a good job explaining why this is not an ideal way to check for the existence of a property or method using this function, so I would recommend discontinuing its use.\n\nAt the least, it would seem better to replace all `hasattr()` calls with `getattr()` using a default value.\n\nAlso, why have 14 different variables, all of which are `str` types with the exception of the `ALGORITHM_CONTRIBUTORS` which is a `list` (cf \"stringly typed\" code: https://wiki.c2.com/?StringlyTyped, https://www.techopedia.com/definition/31876/stringly-typed)?\n\n----\nVERSION\nALGORITHM_AUTHOR\nALGORITHM_AUTHOR_EMAIL\nALGORITHM_CONTRIBUTORS\nALGORITHM_NAME\nALGORITHM_DESCRIPTION\nCITATION_AUTHOR\nCITATION_TITLE\nCITATION_YEAR\nVARIABLE_NAMES\nVARIABLE_UNITS\nVARIABLE_LABELS\nWRITE_BETYDB_CSV\nWRITE_GEOSTREAMS_CSV\n----\n\nIf you used a `dict` for this information, then you'd only be exporting one value:\n\n----\nCONFIG = {\n 'VERSION': '',\n 'ALGORITHM_AUTHOR': '',\n 'ALGORITHM_AUTHOR_EMAIL': '',\n 'ALGORITHM_CONTRIBUTORS': '',\n 'ALGORITHM_NAME': '',\n 'ALGORITHM_DESCRIPTION': '',\n 'CITATION_AUTHOR': '',\n 'CITATION_TITLE': '',\n 'CITATION_YEAR': '',\n 'VARIABLE_NAMES': '',\n 'VARIABLE_UNITS': '',\n 'VARIABLE_LABELS': '',\n 'WRITE_BETYDB_CSV': '',\n 'WRITE_GEOSTREAMS_CSV': ''}\n----\n\nOr eschew `getattr()` to directly access a module's value and rather call a function that returns this?\n\n----\ndef config() -> dict:\n return {\n 'VERSION': '',\n 'ALGORITHM_AUTHOR': '',\n 'ALGORITHM_AUTHOR_EMAIL': '',\n 'ALGORITHM_CONTRIBUTORS': '',\n 'ALGORITHM_NAME': '',\n 'ALGORITHM_DESCRIPTION': '',\n 'CITATION_AUTHOR': '',\n 'CITATION_TITLE': '',\n 'CITATION_YEAR': '',\n 'VARIABLE_NAMES': '',\n 'VARIABLE_UNITS': '',\n 'VARIABLE_LABELS': '',\n 'WRITE_BETYDB_CSV': '',\n 'WRITE_GEOSTREAMS_CSV': ''}\n----\n\nOr define a `type`/`class` to represent this as an immutable `NamedTuple`:\n\n----\nfrom typing import NamedTuple, List\n\n\nclass Config(NamedTuple):\n version: str\n algorithm_author: List[str]\n algorithm_author_email: List[str]\n algorithm_contributors: List[str]\n algorithm_name: str\n algorithm_description: str\n citation_author: str\n citation_title: str\n citation_year: str\n variable_names: List[str]\n variable_units: List[str]\n variable_labels: List[str]\n write_betydb_csv: bool\n write_geostreams_csv: bool\n----\n\nAnd then return a `Config` from the function which can by type-checked by `mypy`:\n\n----\nfrom config import Config\n\n\ndef config() -> Config:\n return Config(version='1.0',\n algorithm_author=[\n 'Chris Schnaufer', 'Clairessa Brown', 'David Lebauer'\n ],\n algorithm_author_email=[\n '[email protected]',\n '[email protected]',\n '[email protected]'\n ],\n algorithm_contributors=[\"Jacob van der Leeuw\"],\n algorithm_name='Greenness Transformer',\n algorithm_description=(\n 'This algorithm performs a variety of '\n 'calculations using RGB pixels from images in order '\n 'to assess plant and crop health and growth'),\n citation_author='Clairessa Brown',\n citation_title='Woebbecke, D.M. et al',\n citation_year='2020',\n variable_names=[\n 'excess greenness index', 'green leaf index', 'cive',\n 'normalized difference index', 'excess red', 'exgr',\n 'combined indices 1', 'combined indices 2',\n 'vegetative index', 'normalized green-red difference',\n 'percent green'\n ],\n variable_units=[\n '[-510:510]', '[-1:1]', '[-255:255]', '[-127:129]',\n '[-255:255]', '[-255:332]', '[-1000:1000]',\n '[-1000:1000]', '[-255:255]', '[-255:255]', '[0:100]'\n ],\n variable_labels=[\n 'excess_greenness_index', 'green_leaf_index', 'cive',\n 'normalized_difference_index(pxarray)', 'excess_red',\n 'exgr', 'combined_indices_1', 'combined_indices_2',\n 'vegetative_index', 'ngrdi', 'percent_green'\n ],\n write_betydb_csv=True,\n write_geostreams_csv=True)\n----\n\nMuch easier to test, too:\n\n----\nimport algorithm\nfrom config import Config\nfrom typing import List\n\ndef test_config():\n conf = algorithm.config()\n assert conf\n assert type(conf) == Config\n assert type(conf.version) == str\n assert type(conf.algorithm_author) == list\n assert type(conf.algorithm_author_email) == list\n assert type(conf.write_betydb_csv) == bool\n----\n\nWhich leads me to ask if it's necessary to encode this metadata into the module.\nThis is static information that essentially is configuration.\nFurther, nothing inside the `algorithm_rgb` module uses this information (but maybe it should?).\nSo perhaps this would be better encoded as JSON that lives in the same directory as the module?\n\nYou could still have this available from a function:\n\n----\nimport json\nimport os\n\n\ndef config() -> dict:\n file = os.path.join(os.path.dirname(__file__), 'config.json')\n with open(file) as fh:\n return json.load(fh)\n----\n\nCalled like so:\n\n----\nimport algorithm\n\nprint(algorithm.config())\n----\n\nYou could even have this structure be typed.\nConsider a small example:\n\n----\n$ cat config.json\n{\n \"version\": \"1.0\",\n \"author\": [\"Chris Schnaufer\", \"Ken Youens-Clark\"],\n \"author_email\": [\"[email protected]\", \"[email protected]\"],\n \"write_betydb_csv\": true\n}\n----\n\nWhere we define a `Config` type like so:\n\n----\n$ cat config.py\nfrom typing import NamedTuple, List\n\n\nclass Config(NamedTuple):\n version: str\n author: List[str]\n author_email: List[str]\n write_betydb_csv: bool\n----\n\nWhich is used by the \"algorithm\":\n\n----\nimport json\nimport os\nfrom config import Config\n\n\ndef config() -> Config:\n file = os.path.join(os.path.dirname(__file__), 'config.json')\n with open(file) as fh:\n return Config(**json.load(fh))\n----\n\nWhich we can call like so:\n\n----\n$ cat main.py\n#!/usr/bin/env python3\n\nimport algorithm\n\nprint(algorithm.config())\n----\n\nWhich will produce a typed, immutable object:\n\n----\n$ ./main.py\nConfig(version='1.0', author=['Chris Schnaufer', 'Ken Youens-Clark'], author_email=['[email protected]', '[email protected]'], write_betydb_csv=True)\n----\n\nWere this information to be stored as JSON, it still begs the question of how to produce valid JSON, so it would be good to consider a proper configuration language like Dhall.\nIn this version, I create the \"author\" as a structure that includes both the \"name\" and \"email\" so that it cannot be possible to generate a configuration that leaves out one of these values.\nThe same could/should be done for the variable name/label/unit:\n\n----\n$ cat config.dhall\n-- ./config.dhall\n\nlet Prelude =\n https://prelude.dhall-lang.org/v11.1.0/package.dhall sha256:99462c205117931c0919f155a6046aec140c70fb8876d208c7c77027ab19c2fa\n\n\nlet Author = { name : Text, email : Text }\n\nlet authors\n : List Author\n = [ { name = \"Chris Schnaufer\", email = \"[email protected]\" }\n , { name = \"Ken Youens-Clark\", email = \"[email protected]\" }\n ]\n\nin { authors = authors\n , version = \"1.0\"\n , write_betydb_csv = True\n }\n----\n\nFrom which we can derive JSON:\n\n----\n$ dhall-to-json --file config.dhall --output config.json\n$ cat config.json\n{\n \"authors\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"Chris Schnaufer\"\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"Ken Youens-Clark\"\n }\n ],\n \"version\": \"1.0\",\n \"write_betydb_csv\": true\n}\n----\n\nThe `Config` class would likewise need to be changed to reflect this.\n\nNOTE: Should every algorithm return the same structure/metadata. That is, are the 14 above listed fields exhaustive or just the minimal set? Can an algorithm return other/more/less data?\n\nInside \"testing.py\" is the function `check_configuration()` which checks if the `VARIABLE_NAMES` variable exists in the package, so it's only check one of the 14 values:\n\n----\ndef check_configuration():\n \"\"\"Checks if the configuration is setup properly for testing\n \"\"\"\n if not hasattr(algorithm_rgb, 'VARIABLE_NAMES') or not algorithm_rgb.VARIABLE_NAMES:\n sys.stderr.write(\"Variable names configuration variable is not defined yet. Please define and try again\")\n sys.stderr.write(\" Update configuration.py and set VALUE_NAMES variable with your variable names\")\n return False\n\n return True\n----\n\nFurther the `_get_variables_header_fields()` inspects the `VARIABLE_NAMES`, `VARIABLE_LABELS`, and `VARIABLE_UNITS` to ensure they are all the same length:\n\n----\ndef _get_variables_header_fields() -> str:\n \"\"\"Returns a string representing the variable header fields\n Return:\n Returns a string representing the variables' header fields\n \"\"\"\n variables = algorithm_rgb.VARIABLE_NAMES.split(',') <1>\n labels = algorithm_rgb.VARIABLE_LABELS.split(',')\n labels_len = len(labels)\n units = algorithm_rgb.VARIABLE_UNITS.split(',')\n units_len = len(units)\n\n if labels_len != len(variables): <2>\n sys.stderr.write(\"The number of defined labels doesn't match the number of defined variables\")\n sys.stderr.write(\" continuing processing\")\n sys.stderr.write(\"\\n\")\n if units_len != len(variables): <3>\n sys.stderr.write(\"The number of defined units doesn't match the number of defined variables\")\n sys.stderr.write(\" continuing processing\")\n sys.stderr.write(\"\\n\")\n\n headers = ''\n for idx, variable_name in enumerate(variables):\n variable_header = variable_name\n if idx < labels_len:\n variable_header += ' - %s' % labels[idx]\n if idx < units_len:\n variable_header += ' (%s)' % units[idx]\n headers += variable_header + ','\n\n return headers <4>\n----\n\n<1> Splitting a string on a comma to get a list. Why not store the values as a list in the first place?\n<2> Here and in 3, it's not a fatal error if the names/labels/units don't match. So how can we be sure that the values are correct? What if there are 10 names but 5 labels and 15 units? Shouldn't that be an error?\n<3> Also not a error, just a warning.\n<4> Why return a string (note \"stringly typing\" above)? Wouldn't a `List[str]` be better? Or a `List[Measurement]` where the name/label/unit have been explicitly defined?\n\nNote that these values are coded with commas and spaces separating the values:\n\n----\n>>> VARIABLE_NAMES\n'excess greenness index, green leaf index, cive, normalized difference index, excess red, exgr, combined indices 1, combined indices 2, vegetative index, normalized green-red difference, percent green'\n----\n\nSo splitting them on a comma leaves a space that I imagine is not intentional, e.g., \"' green leaf index'\" instead of \"'green leaf index'\":\n\n----\n>>> VARIABLE_NAMES.split(',')\n['excess greenness index', ' green leaf index', ' cive', ' normalized difference index', ' excess red', ' exgr', ' combined indices 1', ' combined indices 2', ' vegetative index', ' normalized green-red difference', ' percent green']\n----\n\nStoring the name/label/units in three different strings/lists seems like an invitation to errors.\nNote the above representation of an `Author` where \"name\" and \"email\" are required values.\nAt the least, consider a data structure that unifies the idea of a `variable` into one structure, even if it's just a `dict`.\n\n== Return from algorithm_rbg.calculate()\n\nThe `algorithm_rbg.calculate()` function currently returns a list of floating-point values, but the type is annotated to just a `list`:\n\n----\ndef calculate(pxarray: np.ndarray) -> list:\n\treturn [\n\t excess_greenness_index(pxarray),\n\t green_leaf_index(pxarray),\n\t cive(pxarray),\n\t normalized_difference_index(pxarray),\n\t excess_red(pxarray),\n\t exgr(pxarray),\n\t combined_indices_1(pxarray),\n\t combined_indices_2(pxarray),\n\t vegetative_index(pxarray),\n\t ngrdi(pxarray),\n\t percent_green(pxarray)\n\t]\n----\n\nRecommend at least annotating return value as `List[float]`.\n\nWhat does the calling code expect?\nIs every algorithm expected to return the same thing?\n\nShould this perhaps return a `dict` (or `TypedDict`) so that the values are explicitly available by name rather than assumed to be in position?\n\n----\nreturn {\n 'excess_greenness_index': excess_greenness_index(pxarray),\n 'green_leaf_index': green_leaf_index(pxarray),\n 'cive': cive(pxarray),\n 'normalized_difference_index': normalized_difference_index(pxarray),\n 'excess_red': excess_red(pxarray),\n 'exgr': exgr(pxarray),\n 'combined_indices_1': combined_indices_1(pxarray),\n 'combined_indices_2': combined_indices_2(pxarray),\n 'vegetative_index': vegetative_index(pxarray),\n 'ngrdi': ngrdi(pxarray),\n 'percent_green': percent_green(pxarray)\n}\n----\n\nThis could, of course, just as easily be a list of tuple with (\"name\", \"value\").\n\nCould this be better handled as a new `type` (perhaps based on `NamedTuple`)?\nFor instance, I see that `testing._get_variables_header_fields()` inspects the meta data from `algorithm_rbg` for `VARIABLE_NAMES`, `VARIABLE_LABELS`, and `VARIABLE_UNITS`, verifies that these are all the same length, and then returns a `str`.\nDoes some other code use these values to match up with the measurements?\nShould that data be included with the return values for each?\n\nThat is, `excess_greenness_index()` currently returns a `float`.\nShould it instead return a record that includes:\n\n* value: `float`\n* name: `str`\n* unit: `str`\n* label: `str`\n\nI notice the \"unit\" for this measurement is a `str` like \"[-510:510]\" which follows a pattern for all the other units that look like possible `[low:high]` values for this value.\nCould this be better represented as a `tuple` like `(-510, 510)`?\nThis in turn could become a `NewType` possible:\n\n----\n>>> from typing import NewType, Tuple\n>>> Unit = NewType('Unit', Tuple[float, float])\n>>> unit1 = Unit((-510, 501))\n>>> type(unit1)\n<class 'tuple'>\n>>> unit1\n(-510, 501)\n----\n\nThen you could use type checking to verify the return with a type:\n\n----\nUnit = NewType('Unit', Tuple[float, float])\nclass Measurement(NamedTuple):\n value: float\n name: str\n label: str\n unit: Unit\n\n\ndef excess_greenness_index(pxarray: np.ndarray) -> Measurement:\n red, green, blue = get_red_green_blue_averages(pxarray)\n return Measurement(\n value = round(2 * green - (red + blue), 2),\n name = 'excess greenness index',\n label = 'excess_greenness_index',\n unit = Unit((-510, 501)))\n----\n\nThen you get an immutable, typed value back from the function:\n\n----\n>>> import algorithm_rgb_type as a2\n>>> a2.excess_greenness_index(pix1)\nMeasurement(value=14.0, name='excess greenness index', label='excess_greenness_index', unit=(-510, 501))\n----\n\n== Testing\n\nThe current https://github.com/AgPipeline/transformer-rgb-indices/blob/master/testing.py program demonstrates a way to use the `algorithm_rgb.py` module to see if works in some way, but it falls short of fully testing the module/functions.\nThis program also manages a number of tasks manually that would be better done using standard modules.\n\n=== Parsing command-line arguments\n\nThe \"testing.py\" module has two functions associated with handling arguments and printing the usage:\n\n----\ndef check_arguments():\n \"\"\"Checks that we have script argument parameters that appear valid\n \"\"\"\n argc = len(sys.argv) <1>\n if argc < 2: <2>\n sys.stderr.write(\"One or more paths to images need to be specified on the command line\\n\")\n print_usage()\n return False <3>\n\n # Check that the paths exist.\n have_errors = False\n for idx in range(1, argc): <4>\n if not os.path.exists(sys.argv[idx]): <5>\n print(\"The following path doesn't exist: \" + sys.argv[idx])\n have_errors = True\n\n if have_errors:\n sys.stderr.write(\"Please correct any problems and try again\\n\")\n\n return not have_errors <6>\n\t\ndef print_usage():\n \"\"\"Displays information on how to use this script\n \"\"\"\n argc = len(sys.argv) <7>\n if argc:\n our_name = os.path.basename(sys.argv[0]) <8>\n else:\n our_name = os.path.basename(__file__)\n print(our_name + \" <folder>|<filename> ...\") <9>\n print(\" folder: path to folder containing images to process\") <10>\n print(\" filename: path to an image file to process\")\n print(\"\") <11>\n print(\" One or more folders and/or filenames can be used\")\n print(\" Only files at the top level of a folder are processed\")\n----\n\n<1> `sys.argv` is a `list` containing the path to the currently running program (i.e., the \"testing.py\" program itself) followed by any other values. This program appears to rely upon positional parameters only, so no named options. If necessary to manually handle `sys.argv`, recommend at least to use `sys.argv[1:]` so as to skip the program name and only handle the actual arguments as this will help avoid off-by-one errors.\n<2> We really only need 1 argument, but the off-by-one problem shows here.\n<3> Three lines of code to handle printing an error, usage, and returning a `False` value from the function, but nothing here will make the program itself return an error code to the command line. See below.\n<4> Another instance of needing to skip the first value as this is not actually an argument. \n<5> Manually checking that a given argument exists which could mean either a directory or a file.\n<6> Recommend always using positive variable names like \"is_ok\" with default of `True` and setting to `False` when there is a problem so that you can `return is_ok`. The brain has to work extra to work out the negative of `not have_errors`.\n<7> The `argc` variable is used just once. If you change the code to `if len(sys.argv):` then `pylint` would complain \"Do not use `len(SEQUENCE)` without comparison to determine if a sequence is empty (len-as-condition)\". The more common idiom would be `if sys.argv:`.\n<8> Given the binary choice, an `if` expression would be better (see below). Also, this code relies on the fact that Python's variable scoping is really terrible. In a stricter language, `our_name` would not be visible after the `if`/`else` block, but in Python it is. Recommend to initialize the variable before the block or better to use an `if` expression.\n<9> Manually printing the usage.\n<10> These 5 separate `print()` calls could be handled with one `print()` where the text is provided using a single triple-quoted (`\"\"\"`) string which is more idiomatic.\n<11> Note that `print()` (with no arguments) will accomplish the same thing as this.\n\n\nRemark #1 above relies on a strange behavior of Python in that requesting list slices for non-existent ranges will result in an empty list rather than an exception:\n\n----\n>>> x = ['foo', 'bar']\n>>> x[10:]\n[]\n----\n\nRemark #3 above is due to how this function is called:\n\n----\nif __name__ == \"__main__\":\n if check_arguments() and check_configuration():\n process_files()\n----\n\nIf `check_arguments()` returns `False`, then `process_files()` never executes, but nothing ever tells the program to exit with a non-zero value.\nOne way to fix this would be to add an explicit `sys.exit()` call:\n\n----\nif __name__ == \"__main__\":\n if check_arguments() and check_configuration():\n process_files()\n else:\n sys.exit(1)\n----\n\nNote that calling `sys.exit()` with a `str` value will cause the `str` to be printed to `sys.stderr` and the program to exit with the value `1`:\n\n----\nif __name__ == \"__main__\":\n if check_arguments() and check_configuration():\n process_files()\n else:\n sys.exit('Something went wrong')\n----\n\nRemark #8, recommend rewriting as such:\n\n----\nour_name = os.path.basename(sys.argv[0] if sys.argv else __file__)\n----\n\nLastly, while this program will produce a usage, it does not respond to the standard `-h` or `--help` flags for usage:\n\n----\n$ ./testing.py -h\nThe following path doesn't exist: -h\nPlease correct any problems and try again\n----\n\n=== Using argparse\n\nI have written an alternate version of this program using the standard `argparse` module to handle at https://github.com/kyclark/configcode/blob/master/testing/testing.py.\nMost of the above code can be handled using the standard `argparse` module.\nMy version will accept one or more input files.\nThe program will produce a usage when run with no arguments or the \"help\" flags:\n\n----\n$ ./testing.py -h\nusage: testing.py [-h] FILE [FILE ...]\n\nTest algorithm\n\npositional arguments:\n FILE Input file(s)\n\noptional arguments:\n -h, --help show this help message and exit\n----\n\nAny non-file argument is validated and rejected by `argparse`:\n\n----\n$ ./testing.py blarg\nusage: testing.py [-h] FILE [FILE ...]\ntesting.py: error: argument FILE: can't open 'blarg': [Errno 2] No such file or directory: 'blarg'\n----\n\nNote that `argparse` will reject any undefined arguments such as `-x`:\n\n----\n$ ./testing.py -x test_input/*\nusage: testing.py [-h] FILE [FILE ...]\ntesting.py: error: unrecognized arguments: -x\n----\n\nHere is the relevant section that handles the command-line arguments and usage:\n\n----\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Test algorithm',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('file',\n help='Input file(s)',\n metavar='FILE',\n type=argparse.FileType('r'), <1>\n nargs='+') <2>\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args() <3>\n\n for fh in args.file: <4>\n fh.close() <5>\n run_test(fh.name) <6>\n----\n\n<1> This will cause `argparse` to validate that the positional arguments are readable (`'r'`) files.\n<2> The `nargs` is for the \"number of arguments,\" and `+` means \"one or more.\"\n<3> All the parsing and validation of the arguments happens here. If the arguments are invalid in any way, then this line will fail, the usage will be printed, and the program will exit with a non-zero value.\n<4> The `args.file` value will be a `list` of one or more _open file handles_.\n<5> Need to close the file handle for `gdal.Open()` to work.\n<6> Pass the file's name.\n\n=== Processing the files\n\nIn the original \"testing.py,\" the `process_files()` function needs to decide if the arguments to the program are files or directories the latter of which it will walk to find files:\n\n----\ndef process_files():\n \"\"\"Processes the command line file/folder arguments\n \"\"\"\n argc = len(sys.argv) <1>\n if argc: <2>\n print(\"Filename,\" + _get_variables_header_fields())\n for idx in range(1, argc): <3>\n cur_path = sys.argv[idx]\n if not os.path.isdir(cur_path): <4>\n run_test(cur_path)\n else:\n allfiles = [os.path.join(cur_path, fn) for fn in os.listdir(cur_path) if os.path.isfile(os.path.join(cur_path, fn))] <5>\n for one_file in allfiles:\n run_test(one_file)\n----\n\n<1> This function separately parses the command-line arguments making this an impure function. Recommend that the arguments be parsed in one place (e.g., something like `get_args()`) and those values passed in. The `typing` module has a `TextIO` type that could be used in the function signature like `process_files(List[TextIO])` which would make it easier to validate and check with `mypy`.\n<2> Another instance of using a sequence's length without comparison (see above). Better to say `if sys.argv:`. Of greater concern is that there is no `else`. Should something happen if no files are passed? \n<3> This is a very C-like `for` loop using the index positions of the list. If the arguments were taken like `args = sys.argv[1:]` then you could use a more Pythonic `for arg in args:` here.\n<4> Recommend to avoid `not` and rather accentuate the positive. Change to `if os.path.isfile(cur_path)` and swap the blocks.\n<5> Extremely long line that should be broken up. Use a code formatter like `yapf` to fix. Also consider using Python `glob` library to recursively find files (https://docs.python.org/3/library/glob.html).\n\nNote that all this code is obviated by the version I show that requires the files to be passed as arguments to the program.\n\nAlso note that this function marches to 5 levels of indentation which seems a bit much to me (cf https://stackoverflow.com/questions/10959683/preferred-maximum-indentation-in-python). \n\n=== Running a test\n\nThe \"testing.py\" has a function called `run_test()` that accepts a filename, processes it with the `algorithm_rgb.calculate()` function, and raises an exception if it encounters a problem:\n\n----\ndef run_test(filename):\n \"\"\"Runs the extractor code using pixels from the file\n Args:\n filename(str): Path to image file\n Return:\n The result of calling the extractor's calculate() method\n Notes:\n Assumes the path passed in is valid. An error is reported if\n the file is not an image file.\n \"\"\"\n try:\n open_file = gdal.Open(filename)\n if open_file:\n # Get the pixels and call the calculation\n pix = np.array(open_file.ReadAsArray())\n calc_val = algorithm_rgb.calculate(np.rollaxis(pix, 0, 3)) <1>\n\n # Check for unsupported types\n if isinstance(calc_val, set): <2>\n raise RuntimeError(\"A 'set' type of data was returned and isn't supported. Please use a list or a tuple instead\") <3>\n\n # Perform any type conversions to a printable string\n if isinstance(calc_val, str): <4>\n print_val = calc_val\n else:\n # Check if the return is iterable and comma separate the values if it is\n try:\n _ = iter(calc_val) <5>\n print_val = \",\".join(map(str, calc_val))\n except Exception:\n print_val = str(calc_val)\n\n print(filename + \",\" + print_val)\n except Exception as ex:\n sys.stderr.write(\"Exception caught: \" + str(ex) + \"\\n\")\n sys.stderr.write(\" File: \" + filename + \"\\n\")\n----\n\n<1> Quite a bit happens before the `calculate()` function is called. I have yet to look into the code that would call this function, so I have to assume this will be handled elsewhere and this function is assumed to handle only a matrix of pixels.\n<2> Rather than checking for what is *not* allowed (here a `set`), maybe check that it's something that *is* allowed, i.e., `list` or `tuple`. Another instance where type hints could help? Lists and tuples are sort of interchangeable, but I would suggest allowing only one type from a function. I would probably choose a `tuple` given their immutability.\n<3> Why raise a `RuntimeError` when the `except` handles nothing but a generic `Exception`?\n<4> It would appear that the `calculate()` function is allowed to return lists, tuples, and strings, which seems incredibly dangerous to me. A function should return only one type. The `typing.Optional` or `typing.Union` might be useful here if it's necessary to return `None` or you really need to mix strings and lists, but it still seems like a very bad idea.\n<5> Here the `iter()` function is being called to see if it will produce an exception which seems like a really bad idea. Also note that dictionaries and file handles are iterable and so would pass this line.\n\nNOTE: This another function with 5 levels of indentation and includes try/catch inside try/catch, both of which strike me as too complicated.\n\nThe most notable problem with this function is that it _never verifies that the `algorithm_rgb.calculate()` function returns the correct answer_.\nFor instance, I can change the function to this:\n\n----\ndef calculate(pxarray: np.ndarray) -> list:\n pass\n----\n\nAnd then run the program:\n\n----\n$ ./testing.py ../sample_plots/*\nFilename,excess greenness index - excess_greenness_index ([-510:510]), green leaf index - green_leaf_index ( [-1:1]), cive - cive ( [-255:255]), normalized difference index - normalized_difference_index(pxarray) ( [-127:129]), excess red - excess_red ( [-255:255]), exgr - exgr ( [-255:332]), combined indices 1 - combined_indices_1 ( [-1000:1000]), combined indices 2 - combined_indices_2 ( [-1000:1000]), vegetative index - vegetative_index ( [-255:255]), normalized green-red difference - ngrdi ( [-255:255]), percent green - percent_green ( [0:100]),\n../sample_plots/rgb_17_7_W.tif,None\n../sample_plots/rgb_1_2_E.tif,None\n../sample_plots/rgb_33_8_W.tif,None\n../sample_plots/rgb_40_11_W.tif,None\n../sample_plots/rgb_5_11_W.tif,None\n../sample_plots/rgb_6_1_E.tif,None\n----\n\n**A test should use a known input and verify that a function/program will produce an expected output.**\n\nThe https://github.com/kyclark/configcode/blob/master/testing/testing.py version uses this same `run_test()` and so is not a recommended solution. \nThat program is merely provided to demonstrate how the original program can be shortened from 151 lines of code (LOC) to 85 all while using standard modules.\n\n=== A different testing scheme\n\nI have provided a separate https://github.com/kyclark/configcode/blob/master/testing/test.py that demonstrates how `pytest` can be used to create unit tests for the https://github.com/kyclark/configcode/blob/master/testing/algorithm_rgb.py file.\n\nNOTE: Is it possible for any of the functions to return anything other than a `float`? That is, the tests only use good input files, and I think it's crucial to run tests using known bad values to ensure the code gracefully handles errors. What would happen if a corrupted file were used or one that could conceivably create values of 0 for R+G+B which I see is used as a denominator in division.\n\nHere is a simple testing file which uses two known files (`input1` and `input2`) and verifies that each function in the `algorithm_rgb` will return the correct value:\n\n----\nimport algorithm_rgb as al\nimport os\nimport osgeo.gdal as gdal\nimport numpy as np\nimport json\n\ninput1 = './test_input/rgb_1_2_E.tif'\ninput2 = './test_input/rgb_40_11_W.tif'\nmeta = './meta.json'\n\n\n# --------------------------------------------------\ndef test_input_files():\n \"\"\"Test input files exist\"\"\"\n\n assert os.path.isfile(input1)\n assert os.path.isfile(input2)\n\n\n# --------------------------------------------------\ndef test_get_red_green_blue_averages():\n \"\"\"Test get_red_green_blue_averages\"\"\"\n\n assert al.get_red_green_blue_averages(\n read_input(input1)) == (166.8537142857143, 160.37885714285713,\n 139.89971428571428)\n\n assert al.get_red_green_blue_averages(\n read_input(input2)) == (109.85485714285714, 144.25085714285714, 90.381)\n\n\n# --------------------------------------------------\ndef test_excess_greenness_index():\n \"\"\"Test excess_greenness_index\"\"\"\n\n assert al.excess_greenness_index(read_input(input1)) == 14.0\n assert al.excess_greenness_index(read_input(input2)) == 88.27\n\n\n# --------------------------------------------------\ndef test_green_leaf_index():\n \"\"\"Test green_leaf_index\"\"\"\n\n assert al.green_leaf_index(read_input(input1)) == 0.02\n assert al.green_leaf_index(read_input(input2)) == 0.18\n\n\n# --------------------------------------------------\ndef test_cive():\n \"\"\"Test cive\"\"\"\n\n assert al.cive(read_input(input1)) == 16.16\n assert al.cive(read_input(input2)) == -14.96\n\n\n# --------------------------------------------------\ndef test_normalized_difference_index():\n \"\"\"Test normalized_difference_index\"\"\"\n\n assert al.normalized_difference_index(read_input(input1)) == -1.53\n assert al.normalized_difference_index(read_input(input2)) == 18.33\n\n\n# --------------------------------------------------\ndef test_excess_red():\n \"\"\"Test excess_red\"\"\"\n\n assert al.excess_red(read_input(input1)) == 56.53\n assert al.excess_red(read_input(input2)) == -1.44\n\n\n# --------------------------------------------------\ndef test_exgr():\n \"\"\"Test exgr\"\"\"\n\n assert al.exgr(read_input(input1)) == -42.53\n assert al.exgr(read_input(input2)) == 89.71\n\n\n# --------------------------------------------------\ndef test_combined_indices_1():\n \"\"\"Test combined_indices_1\"\"\"\n\n assert al.combined_indices_1(read_input(input1)) == 30.16\n assert al.combined_indices_1(read_input(input2)) == 73.31\n\n\n# --------------------------------------------------\ndef test_combined_indices_2():\n \"\"\"Test combined_indices_2\"\"\"\n\n assert al.combined_indices_2(read_input(input1)) == 12.81\n assert al.combined_indices_2(read_input(input2)) == 24.98\n\n\n# --------------------------------------------------\ndef test_vegetative_index():\n \"\"\"Test vegetative_index\"\"\"\n\n assert al.vegetative_index(read_input(input1)) == 1.02\n assert al.vegetative_index(read_input(input2)) == 1.4\n\n\n# --------------------------------------------------\ndef test_ngrdi():\n \"\"\"Test ngrdi\"\"\"\n\n assert al.ngrdi(read_input(input1)) == -0.02\n assert al.ngrdi(read_input(input2)) == 0.14\n\n\n# --------------------------------------------------\ndef test_percent_green():\n \"\"\"Test percent_green\"\"\"\n\n assert al.percent_green(read_input(input1)) == 0.34\n assert al.percent_green(read_input(input2)) == 0.42\n\n\n# --------------------------------------------------\ndef test_calculate():\n \"\"\"Test calculate\"\"\"\n\n assert al.calculate(read_input(input1)) == [\n 14.0, 0.02, 16.16, -1.53, 56.53, -42.53, 30.16, 12.81, 1.02, -0.02,\n 0.34\n ]\n\n assert al.calculate(read_input(input2)) == [\n 88.27, 0.18, -14.96, 18.33, -1.44, 89.71, 73.31, 24.98, 1.4, 0.14, 0.42\n ]\n\n\n# --------------------------------------------------\ndef read_input(file) -> np.ndarray:\n \"\"\"Run calculate on a file\"\"\"\n\t\n if fh := gdal.Open(file):\n pix = np.array(fh.ReadAsArray())\n return np.rollaxis(pix, 0, 3)\n\n\n# --------------------------------------------------\ndef test_meta():\n \"\"\"Test meta\"\"\"\n\n assert os.path.isfile(meta)\n data = json.load(open(meta))\n assert data['authors']\n----\n\nUsing `pytest` to run this test suite will produce a familiar output:\n\n----\n$ pytest -xv test.py\n============================= test session starts ==============================\n...\n\ntest.py::test_input_files PASSED [ 6%]\ntest.py::test_get_red_green_blue_averages PASSED [ 13%]\ntest.py::test_excess_greenness_index PASSED [ 20%]\ntest.py::test_green_leaf_index PASSED [ 26%]\ntest.py::test_cive PASSED [ 33%]\ntest.py::test_normalized_difference_index PASSED [ 40%]\ntest.py::test_excess_red PASSED [ 46%]\ntest.py::test_exgr PASSED [ 53%]\ntest.py::test_combined_indices_1 PASSED [ 60%]\ntest.py::test_combined_indices_2 PASSED [ 66%]\ntest.py::test_vegetative_index PASSED [ 73%]\ntest.py::test_ngrdi PASSED [ 80%]\ntest.py::test_percent_green PASSED [ 86%]\ntest.py::test_calculate PASSED [ 93%]\ntest.py::test_meta PASSED [100%]\n\n============================== 15 passed in 0.20s ==============================\n----\n\nTo demonstrate the output when code is failing, I can introduce an error like so:\n\n----\ndef test_cive():\n \"\"\"Test cive\"\"\"\n\n assert al.cive(read_input(input1)) == None # 16.16 <<<< Changing to None\n assert al.cive(read_input(input2)) == -14.96\n----\n\nAnd now the test output reads:\n\n----\n$ pytest -xv test.py\n============================= test session starts ==============================\n...\n\ntest.py::test_input_files PASSED [ 6%]\ntest.py::test_get_red_green_blue_averages PASSED [ 13%]\ntest.py::test_excess_greenness_index PASSED [ 20%]\ntest.py::test_green_leaf_index PASSED [ 26%]\ntest.py::test_cive FAILED [ 33%]\n\n=================================== FAILURES ===================================\n__________________________________ test_cive ___________________________________\n\n def test_cive():\n \"\"\"Test cive\"\"\"\n\n> assert al.cive(read_input(input1)) == None # 16.16\nE assert 16.16 == None\nE +16.16\nE -None\n\ntest.py:52: AssertionError\n=========================== short test summary info ============================\nFAILED test.py::test_cive - assert 16.16 == None\n!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!\n========================= 1 failed, 4 passed in 0.51s ==========================\n----\n\nThe `pytest` module will integrate with the `coverage` (https://coverage.readthedocs.io/en/coverage-5.2/) module to help determine how much of the code is covered by tests:\n\n----\n$ coverage run -m pytest test.py\n============================= test session starts ==============================\n...\n\ntest.py ............... [100%]\n\n============================== 15 passed in 0.24s ==============================\n$ coverage report\nName Stmts Miss Cover\n--------------------------------------\nalgorithm_rgb.py 41 0 100%\ntest.py 58 0 100%\n--------------------------------------\nTOTAL 99 0 100%\n----\n\nThese unit tests cover all the functions in the `algorithm_rgb.py` module.\n\n== Summary\n\nI have only investigated how to use `pytest` to create unit tests for the given module.\nI do not know to what extent contributors of algorithms will be expected to create such tests.\nThis may or may not be beyond the capabilities of the typical programmer, but I believe a thorough explanation and demonstration will encourage people to contribute a full test suite.\nI especially feel that the functional nature of `pytest` makes it rather easy to create and run tests (as opposed to an object-oriented test suite, cf https://docs.python.org/3/library/unittest.html).\n\n== Going further\n\nNext I need to see how an algorithm is integrated into a greater system which is probably where more intense and focused testing should occur.\n\n== Author \n\nKen Youens-Clark <[email protected]>\n" }, { "alpha_fraction": 0.4343704283237457, "alphanum_fraction": 0.5041322112083435, "avg_line_length": 26.79729652404785, "blob_id": "b8c68b9ff0bc7037c33538368dfdd909999a3c12", "content_id": "ef1d58f9d80944f576f527aa5ef04b18763b93eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4114, "license_type": "permissive", "max_line_length": 79, "num_lines": 148, "path": "/testing/test.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "import algorithm_rgb as al\nimport os\nimport osgeo.gdal as gdal\nimport numpy as np\nimport json\n\ninput1 = './test_input/rgb_1_2_E.tif'\ninput2 = './test_input/rgb_40_11_W.tif'\nmeta = './meta.json'\n\n\n# --------------------------------------------------\ndef test_input_files():\n \"\"\"Test input files exist\"\"\"\n\n assert os.path.isfile(input1)\n assert os.path.isfile(input2)\n\n\n# --------------------------------------------------\ndef test_get_red_green_blue_averages():\n \"\"\"Test get_red_green_blue_averages\"\"\"\n\n assert al.get_red_green_blue_averages(\n read_input(input1)) == (166.8537142857143, 160.37885714285713,\n 139.89971428571428)\n\n assert al.get_red_green_blue_averages(\n read_input(input2)) == (109.85485714285714, 144.25085714285714, 90.381)\n\n\n# --------------------------------------------------\ndef test_excess_greenness_index():\n \"\"\"Test excess_greenness_index\"\"\"\n\n assert al.excess_greenness_index(read_input(input1)) == 14.0\n assert al.excess_greenness_index(read_input(input2)) == 88.27\n\n\n# --------------------------------------------------\ndef test_green_leaf_index():\n \"\"\"Test green_leaf_index\"\"\"\n\n assert al.green_leaf_index(read_input(input1)) == 0.02\n assert al.green_leaf_index(read_input(input2)) == 0.18\n\n\n# --------------------------------------------------\ndef test_cive():\n \"\"\"Test cive\"\"\"\n\n assert al.cive(read_input(input1)) == 16.16\n assert al.cive(read_input(input2)) == -14.96\n\n\n# --------------------------------------------------\ndef test_normalized_difference_index():\n \"\"\"Test normalized_difference_index\"\"\"\n\n assert al.normalized_difference_index(read_input(input1)) == -1.53\n assert al.normalized_difference_index(read_input(input2)) == 18.33\n\n\n# --------------------------------------------------\ndef test_excess_red():\n \"\"\"Test excess_red\"\"\"\n\n assert al.excess_red(read_input(input1)) == 56.53\n assert al.excess_red(read_input(input2)) == -1.44\n\n\n# --------------------------------------------------\ndef test_exgr():\n \"\"\"Test exgr\"\"\"\n\n assert al.exgr(read_input(input1)) == -42.53\n assert al.exgr(read_input(input2)) == 89.71\n\n\n# --------------------------------------------------\ndef test_combined_indices_1():\n \"\"\"Test combined_indices_1\"\"\"\n\n assert al.combined_indices_1(read_input(input1)) == 30.16\n assert al.combined_indices_1(read_input(input2)) == 73.31\n\n\n# --------------------------------------------------\ndef test_combined_indices_2():\n \"\"\"Test combined_indices_2\"\"\"\n\n assert al.combined_indices_2(read_input(input1)) == 12.81\n assert al.combined_indices_2(read_input(input2)) == 24.98\n\n\n# --------------------------------------------------\ndef test_vegetative_index():\n \"\"\"Test vegetative_index\"\"\"\n\n assert al.vegetative_index(read_input(input1)) == 1.02\n assert al.vegetative_index(read_input(input2)) == 1.4\n\n\n# --------------------------------------------------\ndef test_ngrdi():\n \"\"\"Test ngrdi\"\"\"\n\n assert al.ngrdi(read_input(input1)) == -0.02\n assert al.ngrdi(read_input(input2)) == 0.14\n\n\n# --------------------------------------------------\ndef test_percent_green():\n \"\"\"Test percent_green\"\"\"\n\n assert al.percent_green(read_input(input1)) == 0.34\n assert al.percent_green(read_input(input2)) == 0.42\n\n\n# --------------------------------------------------\ndef test_calculate():\n \"\"\"Test calculate\"\"\"\n\n assert al.calculate(read_input(input1)) == [\n 14.0, 0.02, 16.16, -1.53, 56.53, -42.53, 30.16, 12.81, 1.02, -0.02,\n 0.34\n ]\n\n assert al.calculate(read_input(input2)) == [\n 88.27, 0.18, -14.96, 18.33, -1.44, 89.71, 73.31, 24.98, 1.4, 0.14, 0.42\n ]\n\n\n# --------------------------------------------------\ndef read_input(file) -> np.ndarray:\n \"\"\"Run calculate on a file\"\"\"\n if fh := gdal.Open(file):\n pix = np.array(fh.ReadAsArray())\n return np.rollaxis(pix, 0, 3)\n\n\n# --------------------------------------------------\ndef test_meta():\n \"\"\"Test meta\"\"\"\n\n assert os.path.isfile(meta)\n data = json.load(open(meta))\n assert data['authors']\n" }, { "alpha_fraction": 0.6100543737411499, "alphanum_fraction": 0.6195651888847351, "avg_line_length": 27.30769157409668, "blob_id": "cf8462e95ab7b638b745548fb85af388dc2eb1e0", "content_id": "0afb19721f99a869e6ced5e915acc4d917014a1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "permissive", "max_line_length": 74, "num_lines": 26, "path": "/attr/use_attrs.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport algorithm\n\nflds = [\n 'VERSION', 'ALGORITHM_AUTHOR', 'ALGORITHM_AUTHOR_EMAIL',\n 'WRITE_BETYDB_CSV', 'BOGUS'\n]\n\nprint('Using has/getattr()')\nfor fld in filter(lambda f: hasattr(algorithm, f), flds):\n val = getattr(algorithm, fld)\n print('{:25} => {} ({})'.format(fld, val, type(val)))\n\nprint()\nprint('Using getattr() with default')\nfor fld in flds:\n val = getattr(algorithm, fld, 'NA')\n print('{:25} => {} ({})'.format(fld, val, type(val)))\n\nprint()\nprint('Using dir()')\navailable = list(filter(lambda f: not f.startswith('__'), dir(algorithm)))\nfor fld in filter(lambda f: f in available, flds):\n val = getattr(algorithm, fld)\n print('{:25} => {} ({})'.format(fld, val, type(val)))\n" }, { "alpha_fraction": 0.6850393414497375, "alphanum_fraction": 0.6968504190444946, "avg_line_length": 27.22222137451172, "blob_id": "dec3298ba42cc7f4cb8e40fb27609dcdaacc7900", "content_id": "1d6418b84ea84fa256ccc73ab0e91894abd85d69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "permissive", "max_line_length": 64, "num_lines": 9, "path": "/type/bad.py", "repo_name": "kyclark/configcode", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport config_type\n\nconfig = config_type.config()\ntmpl = '{:25} => {}'\nprint(tmpl.format('AUTHOR', config.author))\nprint(tmpl.format('EMAIL', config.author_emails))\nprint(tmpl.format('VERSION', ','.join(config.write_betydb_csv)))\n" } ]
17
DandinPower/CryptoAI
https://github.com/DandinPower/CryptoAI
079f1a556b44a3fdf2c2a3061a6fe9e4b1cbf509
bce0e04f35b5b5e5c7343859ef273d2691955f80
d1c3b6cb52ea05a246ae8733288a00bca97583a8
refs/heads/main
2023-07-18T23:25:40.244518
2021-09-23T01:39:07
2021-09-23T01:39:07
409,241,570
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6465753316879272, "alphanum_fraction": 0.6611872315406799, "avg_line_length": 34.35483932495117, "blob_id": "ed29312ba4b33168fd2714c92133655d3104e25a", "content_id": "be40ebf81b51bdeee027d3215d9b55033f9b22b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1111, "license_type": "no_license", "max_line_length": 96, "num_lines": 31, "path": "/ann.py", "repo_name": "DandinPower/CryptoAI", "src_encoding": "UTF-8", "text": "import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom price import GetData\ndef main():\n X,Y = GetData('BTCUSDT','15m', '5 day ago UTC', 89)\n labelencoder_X_4 = LabelEncoder()\n X[:, 4] = labelencoder_X_4.fit_transform(X[:, 4])\n transformer = ColumnTransformer(\n transformers=[\n (\"OneHot\", # Just a name\n OneHotEncoder(), # The transformer class\n [5] # The column(s) to be applied on.\n )\n ],\n remainder='passthrough' # donot apply anything to the remaining columns\n )\n X = transformer.fit_transform(X.tolist())\n X = X.astype('float64')\n # 預防虛擬變量陷阱\n #X = X[:,1:]\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\nmain()" }, { "alpha_fraction": 0.516837477684021, "alphanum_fraction": 0.5296486020088196, "avg_line_length": 27.763158798217773, "blob_id": "413100444f21dd4cb696e3beae50f3e2814f7bba", "content_id": "00bcb29cd77bfbe39ae47009234336bbbf8a7ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5606, "license_type": "no_license", "max_line_length": 121, "num_lines": 190, "path": "/price.py", "repo_name": "DandinPower/CryptoAI", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom binance.client import Client\nimport plotly.graph_objects as go\napi_key = \"hbOfUeQs7wRpllIrZfbXgxRMNudnWybfoyE4MOhtO2nU2iuHta5A21TxHpapWRSY\"\napi_secret = \"Zhb9OuU8g3zUmUmabxxZwdL9AGT21eOBTPR7VsUmNUrNxzzO3GvMSuZvNP6BIhxf\"\nclient = Client(api_key, api_secret) # 用自己創立的Key,Secret登入binance帳戶\ndef GetEma(CloseList,_len):\n EmaList = []\n ema_len = _len\n k = 2/(ema_len + 1)\n for i in range(len(CloseList)):\n if(i == 0):\n EmaList.append(CloseList[i])\n else:\n ema_last = EmaList[i-1]\n TempEma = (CloseList[i]*k) + (ema_last*(1-k))\n EmaList.append(TempEma)\n return EmaList\n\ndef GetEVolume(VolumeList,_len):\n EVolume = []\n k = 2/(_len + 1)\n for i in range(len(VolumeList)):\n if(i == 0):\n EVolume.append(VolumeList[i])\n else:\n volume_last = VolumeList[i-1]\n Temp = (VolumeList[i]*k) + (volume_last*(1-k))\n EVolume.append(Temp)\n return EVolume\n\ndef GetPrice(Coin_Money,Interval,Time_Interval):\n OpenList = []\n CloseList = []\n HighList = []\n LowList = []\n VolumeList = []\n NumOfTradeList = []\n AvList = []\n Time = []\n i = 0\n # 讀取K線\n for kline in client.get_historical_klines(Coin_Money, Interval, Time_Interval):\n Open = float(kline[1])\n OpenList.append(Open)\n High = float(kline[2])\n HighList.append(High)\n Low = float(kline[3])\n LowList.append(Low)\n Close = float(kline[4])\n CloseList.append(Close)\n Volume = float(kline[5])\n VolumeList.append(Volume)\n Trade = float(kline[8])\n NumOfTradeList.append(Trade)\n TempAv = (Open + Close)/2\n AvList.append(TempAv)\n Time.append(i)\n i += 1\n klines = [OpenList,CloseList,HighList,LowList,VolumeList,NumOfTradeList,AvList,Time]\n return klines\n\ndef Show(klines,Ema):\n plttime = 0\n if(plttime == 0):\n Data = [go.Candlestick(x=klines[7], open=klines[0], high=klines[2], low=klines[3],\n close=klines[1], increasing_line_color='red', decreasing_line_color='green'),\n go.Scatter(\n x=klines[7],\n y=Ema,\n name='EMA',\n mode='lines',\n line=go.Line(\n color='#77AAFF'\n )\n )]\n fig = go.Figure(Data)\n\n fig.show()\n plttime = 1\n\ndef GetKlineState(Open,Close,High,Low):\n #print(Open,Close,High,Low)\n state = ''\n if Close >= Open:\n state = 'Green'\n if High >Close:\n upcandle = True\n else:\n upcandle = False\n if Low < Open:\n downcandle = True\n else:\n downcandle = False\n else:\n state = 'Red'\n if High > Open:\n upcandle = True\n else:\n upcandle = False\n if Low < Close:\n downcandle = True\n else:\n downcandle = False\n candle = ''\n candle = candle + state\n \n if upcandle:\n candle = candle + 'Up'\n if downcandle:\n candle = candle + 'Down'''\n return candle\n\ndef WinOrLose(Close,High,Low,nowtime):\n state = True\n time = nowtime + 1\n startprice = Close[nowtime]\n winprice = startprice * 1.01\n loseprice = startprice * 0.993\n answer = 0\n while state:\n if time >= len(Close) -2:\n break\n high = High[time]\n low = Low[time]\n if loseprice >= low:\n state = False\n elif winprice <= high:\n answer = 1\n state = False\n else:\n time += 1\n #print(f'現在測試時間 :{nowtime},起始價格 :{startprice},測試價格為 :{price},斜率為 :{(price - startprice)/startprice}')\n return answer\n \n\ndef GetData(Coin_Money,Interval,Time_Interval,klen):\n #Coin = \"BTC\" # 設定貨幣為\"...\"\n #Money = \"USDT\" # 設定法幣為為\"...\"\"\n #Coin_Money = Coin + Money\n #Interval = Client.KLINE_INTERVAL_5MINUTE\n #Time_Interval = \"5 day ago UTC\"\n klines = GetPrice(Coin_Money, Interval, Time_Interval)\n EmaLen = klen\n Ema = GetEma(klines[1], EmaLen)\n EVolume = GetEVolume(klines[4], klen)\n #Show(klines,Ema)\n Open = klines[0]\n Close = klines[1]\n High = klines[2]\n Low = klines[3]\n Volume = klines[4]\n Trade = klines[5]\n Av = klines[6]\n Time = klines[7]\n X = [] #Ma斜率,價格斜率,價格是否大於ma,移動平均volume斜率,9跟k線狀態,k線型態\n Y = [] #是否成功\n '''\n for i in range(len(Open)):\n if i < EmaLen:\n continue\n else:\n Ema_Angle = ((Ema[i] - Ema[i-1]) / Ema[i-1]) \n Price_Angle = ((Close[i] - Close[i-1]) / Close[i-1]) \n if Close[i] >= Ema[i]:\n BullOrBear = 1\n else:\n BullOrBear = 0\n \n EVolume_angle = (EVolume[i] - EVolume[i-1]) / EVolume[i-1]\n Candle = GetKlineState(Open[i], Close[i], High[i], Low[i])\n #print(f'EMA:{Ema_Angle},PRICE:{Price_Angle},BullOrBear:{BullOrBear},VOLUME:{EVolume_angle},CANDLE:{Candle}')\n X.append([Ema_Angle,Price_Angle,BullOrBear,EVolume_angle])\n win = WinOrLose(Close, High, Low, i)\n #print(win)\n Y.append(win)'''\n for i in range(len(Open)):\n X.append([Open[i],Close[i],High[i],Low[i],Volume[i],Trade[i]])\n win = WinOrLose(Close, High, Low, i)\n Y.append(win)\n X = np.array(X)\n Y = np.array(Y)\n return X,Y\n\n\nif __name__ == '__main__':\n X,Y = GetData('BTCUSDT','15m', '5 day ago UTC', 89)\n print(X.shape,Y.shape)\n print(X[0],Y[0])" } ]
2
EcePanos/test
https://github.com/EcePanos/test
63133ce9365e64adba78463075ec3106b4d9a3f3
94ccb18f9d7ff0efd3d443b49cc09ed142142115
70305e794efccb76d45c08108d99dd0222e237dd
refs/heads/master
2020-03-30T04:56:58.772776
2019-02-26T16:09:56
2019-02-26T16:09:56
150,770,735
0
0
null
2018-09-28T17:01:12
2018-09-28T17:01:15
2018-09-28T17:06:20
null
[ { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.738095223903656, "avg_line_length": 41, "blob_id": "a4e57c163d2f9aa071d62add35ce6950d8d686fe", "content_id": "47463b8e457f6f1060ede5b1334930b84c585b02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 126, "license_type": "no_license", "max_line_length": 109, "num_lines": 3, "path": "/README.md", "repo_name": "EcePanos/test", "src_encoding": "UTF-8", "text": "# test\ntest rep\n[![Build Status](https://travis-ci.org/EcePanos/test.svg?branch=master)](https://travis-ci.org/EcePanos/test)\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "86b337996194b5c1a1bd614aa50101434f020986", "content_id": "0c59e12bb5247a723b98bd9512697d7a4c7f09a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/test.py", "repo_name": "EcePanos/test", "src_encoding": "UTF-8", "text": "#This is a new comment\n\nprint(\"hello world\")\nprint(\"this is new\")\n" } ]
2
yangshiyu89/lane_detection_simple
https://github.com/yangshiyu89/lane_detection_simple
ed0ffecaf4baf6fd012025bbdadde774b0b834af
0bda7f17612b06600df25796da9bf7c6779811ff
5ebe7897cf3c04858ee858d399ed1e0314b4a105
refs/heads/master
2021-01-21T11:27:22.421338
2017-03-01T16:24:10
2017-03-01T16:24:10
83,575,313
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.5088978409767151, "alphanum_fraction": 0.5597015023231506, "avg_line_length": 34.551021575927734, "blob_id": "f865997484735c4814983d198dec434eb5e114e3", "content_id": "16ac8c3bf18550e2ffa0ce2ff85776f514b31f1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3484, "license_type": "no_license", "max_line_length": 123, "num_lines": 98, "path": "/lane_detection_simple.py", "repo_name": "yangshiyu89/lane_detection_simple", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 27 22:03:01 2017\n\n@author: yangshiyu89\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ndef get_edge(frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray,(7,7),0)\n canny = cv2.Canny(blur, 50, 150)\n return canny\n\ndef get_roi(edge_frame):\n point1 = (round(edge_frame.shape[1]*0.18), round(edge_frame.shape[0]*0.90))\n point2 = (round(edge_frame.shape[1]*0.43), round(edge_frame.shape[0]*0.67))\n point3 = (round(edge_frame.shape[1]*0.60), round(edge_frame.shape[0]*0.67))\n point4 = (round(edge_frame.shape[1]*0.95), round(edge_frame.shape[0]*0.90))\n roi = np.array([[point1, point2, point3, point4]])\n mask = np.zeros_like(edge_frame)\n mask_value = 100\n cv2.fillPoly(mask, roi, mask_value)\n masked_img = cv2.bitwise_and(edge_frame, mask)\n return masked_img\n\ndef get_hough_lines(roi_frame):\n lines = cv2.HoughLinesP(roi_frame, 1, np.pi/180, 15, np.array([]), \n minLineLength=200, maxLineGap=200)\n hough_frame = np.zeros((roi_frame.shape[0], roi_frame.shape[1], 3), dtype=np.uint8)\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(hough_frame, (x1, y1), (x2, y2), color=[0, 255, 0], thickness=3)\n \n return hough_frame, lines\n\ndef get_lines(frame, hough_frame, lines, threshold=0.1):\n def fit_line(lines):\n lines = np.array(lines)\n x = np.concatenate((lines[:, 0], lines[:, 2]))\n y = np.concatenate((lines[:, 1], lines[:, 3]))\n ymin, ymax = y.min(), y.max()\n model = np.polyfit(y, x, 1)\n model_poly1d = np.poly1d(model)\n xmin = int(model_poly1d(ymin))\n xmax = int(model_poly1d(ymax))\n line = [xmin, ymin, xmax, ymax]\n return line\n\n if len(lines) > 0:\n left_lines = []\n right_lines= []\n \n slopes = [(y2 - y1) / (x2 - x1) for line in lines for x1, y1, x2, y2 in line]\n lines = [(x1, y1, x2, y2) for line in lines for x1, y1, x2, y2 in line]\n for index in range(len(slopes)):\n if abs(slopes[index]) < threshold:\n lines.pop(lines[index])\n slopes.pop(slopes[index])\n \n else:\n if slopes[index] < 0:\n left_lines.append(lines[index])\n else:\n right_lines.append(lines[index])\n left_line = fit_line(left_lines)\n right_lines = fit_line(right_lines)\n cv2.line(frame, (left_line[0], left_line[1]), (left_line[2], left_line[3]), color=[255, 0, 0], thickness=3)\n cv2.line(frame, (right_lines[0], right_lines[1]), (right_lines[2], right_lines[3]), color=[0, 0, 255], thickness=3)\n \n return frame\n\nif __name__ == \"__main__\":\n file_name = \"lane_detection.mp4\"\n cap = cv2.VideoCapture(file_name)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.mp4',fourcc, 20.0, (640,480))\n \n while(True): \n _, frame = cap.read()\n \n try:\n edge_frame = get_edge(frame)\n roi_frame = get_roi(edge_frame)\n hough_frame, lines = get_hough_lines(roi_frame)\n frame_result = get_lines(frame, hough_frame, lines)\n out.write(frame_result)\n\n cv2.imshow(\"frame\", frame_result)\n except:\n pass\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n" } ]
1
Rajat-Dabade/MiniCloud
https://github.com/Rajat-Dabade/MiniCloud
3875f444714b3d73a7aa4ba5f8d8275d7fcf7b7e
71f3f3b511a64d29c3a268f604db882dc7b4da1e
6990ea95aedb570bd28d271c768e095b9ffbde5f
refs/heads/master
2021-08-19T23:26:17.949311
2017-11-27T16:57:01
2017-11-27T16:57:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7167630195617676, "alphanum_fraction": 0.7225433588027954, "avg_line_length": 13.333333015441895, "blob_id": "8647f0973fe6ec88cee343c160612935fbeca699", "content_id": "a006a12d934301caa1b4910f72dbb37ee02559d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/Webserver/html/scripts/iaas.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport cgi,cgitb\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ninstDetails=cgi.FieldStorage()\n\ncpu = instDetails.getvalue('cpu')\n\nprint cpu\n\n" }, { "alpha_fraction": 0.659360408782959, "alphanum_fraction": 0.6748113632202148, "avg_line_length": 41.83076858520508, "blob_id": "d42a71a67e134edd798e5a000e8852efd1d5d8c1", "content_id": "f7756f7fe33263e9c7bd6bfe24da0d513321cce3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2783, "license_type": "no_license", "max_line_length": 182, "num_lines": 65, "path": "/Webserver/cgi-bin/operations.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport commands as cmd\nimport cgi,cgitb\nimport os\nimport commands as cmd\nimport sys\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ninstDetails=cgi.FieldStorage()\nop=instDetails.getvalue('operation')\n\ncusr=Connection.connect()\n\nif(type(cusr)==int):\n print \"Could not establish the connection\"\n\ninstName = op[:-1]\n\nif(op[len(op)-1:]=='@'):\n instState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"virsh shutdown {0}\"'.format(instName))\n if(instState[0] == 0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"update InstDetails set state=0 where name='{0}'\".format(instName))\n db.commit()\nelif(op[len(op)-1:]=='^'):\n instState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"virsh start {0}\"'.format(instName))\n if(instState[0] == 0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"update InstDetails set state=1 where name='{0}'\".format(instName))\n db.commit()\nelif(op[len(op)-1:]=='$'):\n instState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"virsh reboot {0}\"'.format(instName))\n if(instState[0] == 0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"update InstDetails set state=1 where name='{0}'\".format(instName))\n db.commit()\nelif(op[len(op)-1:]=='&'):\n instState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"virsh destroy {0}\"'.format(instName))\n instState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"virsh undefine {0}\"'.format(instName))\n instState1=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"rm -f /var/lib/libvirt/images/{0}.qcow2\"'.format(instName))\n if(instState[0] == 0 and instState1[0]==0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"select InstId from InstDetails where name='{0}'\".format(instName))\n instid=cusr.fetchall()[0][0]\n cusr.execute(\"delete from IaasUsers where InstID = '{0}'\".format(instid))\n cusr.execute(\"delete from InstDetails where InstId = '{0}'\".format(instid))\n db.commit()\nelif (op[-1:]==\"*\"):\n instState=cmd.getstatusoutput(\"\"\"sshpass -p root ssh -q -o StrictHostKeyChecking=No root@server \"virsh domifaddr %s --source agent eth0\" | awk '/eth0/ {print $4}' \"\"\" % instName)\n print \"\"\"\n <script type='text/javascript'>alert('IP address: {0}'); window.location.href='/final/pages/iaas.php'</script>\"\"\".format(instState[1][:-3])\n\n \n\n\n\nprint \"<script type='text/javascript'>window.location.href='/final/pages/iaas.php'</script>\"" }, { "alpha_fraction": 0.5358490347862244, "alphanum_fraction": 0.5358490347862244, "avg_line_length": 20.16666603088379, "blob_id": "5cf56436cc68c44cbab0aa21ca45024710989466", "content_id": "8d6ff57cee03b079d9cf8f07e55d8097d8cebf8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 530, "license_type": "no_license", "max_line_length": 76, "num_lines": 24, "path": "/Webserver/html/final/pages/signupv.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php \r\n \r\n require_once(\"../include/connect.php\");\r\n\r\n\r\n $username = $_POST[\"username\"];\r\n $email = $_POST[\"email\"];\r\n $password = $_POST[\"password\"];\r\n\r\n $sql = \"INSERT INTO Identity VALUES ('$username', '$password', '$email')\";\r\n \r\n if (mysqli_query($conn, $sql)) {\r\n echo \"<script type='text/javascript'>alert('Signup successfully!');\r\n window.location.href='../index.php'</script>\";\r\n \r\n } else {\r\n echo \"Error: \" . $sql . \"<br>\" . mysqli_error($conn);\r\n }\r\n\r\n mysqli_close($conn);\r\n\r\n\r\n\r\n?>" }, { "alpha_fraction": 0.4509027898311615, "alphanum_fraction": 0.46524858474731445, "avg_line_length": 34.71818161010742, "blob_id": "2cf16273493c452ef430a9fe2796e0ca3a5fcddb", "content_id": "da81ee5e0164a7e5a4cd5e780d8d360c129c6a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4043, "license_type": "no_license", "max_line_length": 180, "num_lines": 110, "path": "/Webserver/html/final/pages/saas.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n\r\n<?php\r\n\r\n require_once(\"../include/connect.php\");\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n \r\n?>\r\n\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n b:hover {\r\n color: #F59D1E; \r\n }\r\n </style>\r\n\r\n <!-- load angular via CDN -->\r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n\t\t\t<nav class=\"navbar navbar-inverse\" data-spy=\"affix\" data-offset-top=\"197\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"../index.php\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li><a href=\"../index.php\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo \"<script type='text/javascript'>\r\n window.location.href='../index.php'</script>\";\r\n }else{\r\n echo '<li><a href=\"services.php\">Services</a></li>';\r\n echo '<li class=\"active\"><a href=\"#\"><span class=\"glyphicon glyphicon-user\"></span>'.$_SESSION['username'].'</a></li>';\r\n echo '<li><a href=\"logout.php\"><span class=\"glyphicon glyphicon-log-in\"></span> logout</a></li>';\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n\t\t</header>\r\n\r\n <div class=\"container\">\r\n <h1 style=\"text-align: center;letter-spacing: 2px;font-size: 60px;color: #F59D1E\"><b>WELCOME TO SOFTWARE AS A SERVICE (SaaS)</b></h1>\r\n\r\n <div class=\"row\" style=\"margin-top: 50px\">\r\n\r\n <?php\r\n\r\n $username = $_SESSION['username'];\r\n\r\n $sql = \"SELECT Username from SAAS where Username = '\".$username.\"'\";\r\n\r\n $result = mysqli_query($conn, $sql);\r\n\r\n if (mysqli_num_rows($result) > 0) {\r\n\r\n echo '<div class=\"col-md-5\" style=\"margin-left: 50px\">\r\n <form action=\"#\">\r\n <button type=\"button\" class=\"btn btn-outline-primary btn-lg btn-block disabled\"><span style=\"font-size: 30px\"><br>Service is<br> already Activated</span></button>\r\n </form>\r\n </div>';\r\n\r\n }\r\n\r\n else{\r\n echo ' <div class=\"col-md-5\" style=\"margin-left: 50px\">\r\n <form action=\"/cgi-bin/createSUser.py\" method=\"post\">\r\n <input type=\"hidden\" value=\"'.$username.'\" name=\"username\">\r\n <button type=\"submit\" class=\"btn btn-outline-primary btn-lg btn-block\"><span style=\"font-size: 110px\">Activate</span></button>\r\n </form>\r\n </div>';\r\n }\r\n\r\n ?> \r\n \r\n\r\n <div class=\"col-md-1\"></div>\r\n\r\n \r\n <a href=\"https://drive.google.com/open?id=0B7UkhJMGCNILbkxVVmsxdkxyeHM\"><div class=\"col-md-5\" style=\"height: 170px;display: block;background-color: #F0F0F0\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 50px;font-size: 30px;color: black\"><b>SaaS</b></div>\r\n <small style=\"padding-left: 120px;color: black;font-size: 15px\">(Click here to download software)</small>\r\n </div>\r\n </a>\r\n\r\n </div>\r\n\r\n\t\t </div>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.37589696049690247, "alphanum_fraction": 0.3849126100540161, "avg_line_length": 37.369564056396484, "blob_id": "ad5a1c383f8be32987fb8e1ee579005c3a1ec41b", "content_id": "9be250e66ecf9eab69b192809874d621acea027e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5435, "license_type": "no_license", "max_line_length": 220, "num_lines": 138, "path": "/Webserver/html/final/pages/instDetails.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n<?php\r\n require_once(\"../include/connect.php\");\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n \r\n?>\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n </style>\r\n\r\n <!-- load angular via CDN -->\r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n\t\t\t<nav class=\"navbar navbar-inverse\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"../index.php\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li><a href=\"../index.php\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo \"<script type='text/javascript'>\r\n window.location.href='../index.php'</script>\";\r\n }else{\r\n \techo '<li><a href=\"services.php\">Services</a></li>';\r\n echo '<li class=\"active\"><a href=\"#\"><span class=\"glyphicon glyphicon-user\"></span>'.$_SESSION['username'].'</a></li>';\r\n echo '<li><a href=\"logout.php\"><span class=\"glyphicon glyphicon-log-in\"></span> logout</a></li>';\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n\t\t</header>\r\n\r\n <div class=\"container\">\r\n\r\n <div class=\"row\" style=\"margin-top: 100px;\">\r\n <div class=\"col-md-6\">\r\n <h3 style=\"text-align: center; margin-bottom: 50px\"><b>Instance Details</b></h3>\r\n <form action=\"/cgi-bin/macuuid.py\" method=\"post\">\r\n <input type=\"hidden\" name=\"username\" value=\"<?php echo $_SESSION['username'] ?>\">\r\n <div class=\"form-group\">\r\n <table class=\"table table-inverse\">\r\n <tbody>\r\n <tr>\r\n <td> <label for=\"instanceName\">Instance Name :</label></td>\r\n <td><input name=\"instanceName\" type=\"instanceName\" class=\"form-control\" id=\"instanceName\" aria-describedby=\"instanceName\" value=\"<?php echo $_SESSION['instance_name']; ?>\" readonly></td>\r\n </tr>\r\n <tr>\r\n <td> <label for=\"cpu_core\">CPU Core : </label></td>\r\n <td><input name=\"cpu_core\" type=\"number\" class=\"form-control\" id=\"cpu_core\" aria-describedby=\"cpu_core\" max=\"4\" required></td>\r\n \t</tr>\r\n \t<tr>\r\n <td> <label for=\"memory\">Memory(KiB) : </label></td>\r\n <td><input name=\"memory\" type=\"number\" class=\"form-control\" id=\"memory\" aria-describedby=\"memory\" min=\"1677721\" max='4194304' required></td>\r\n \t</tr>\r\n \r\n <tr>\r\n <td> <label for=\"cpu_core\">Image: </label></td>\r\n <td>\r\n\r\n\r\n \t<?php\r\n\r\n\r\n\r\n \t$sql = \"SELECT * from Images \";\r\n\r\n\t\t\t\t\t\t\t $result = mysqli_query($conn, $sql);\r\n\t\t\t\t\t\t\t \r\n\t\t\t\t\t\t\t if (mysqli_num_rows($result) > 0) {\r\n \r\n\t\t\t\t\t\t\t \techo '<select class=\"custom-select mb-2 mr-sm-2 mb-sm-0\" id=\"inlineFormCustomSelect\" name=\"ImageID\">';\r\n\t\t\t\t\t\t\t while($row = mysqli_fetch_assoc($result)) {\r\n\t\t\t\t\t\t\t \t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t echo \"<option value='\".$row['ImageId'].\"'>\".$row['Image'].\"</option>\"; \r\n\r\n\t\t\t\t\t\t\t }\r\n\t\t\t\t\t\t\t echo '</select>';\r\n\t\t\t\t\t\t\t }\r\n\r\n\t\t\t\t\t\t\t ?>\r\n\t\t\t\t\t\t\t\t \r\n\r\n\t\t\t\t\t\t\t\t </td>\r\n \t</tr>\r\n\r\n </tbody>\r\n </table>\r\n </div>\r\n <table class=\"table table-inverse\">\r\n <tr>\r\n <td><input type=\"submit\" class=\"btn btn-warning\" value=\"Continue\"></td>\r\n </tr>\r\n </table>\r\n </form>\r\n </div>\r\n <div class=\"col-md-1\">\r\n <style>\r\n .vl {\r\n border-left: 4px solid grey;\r\n height: 350px;\r\n }\r\n </style>\r\n <div class=\"vl\"></div>\r\n </div>\r\n <div class=\"col-md-5\">\r\n <img src=\"../images/instance.png\" class=\"img-rounded\" alt=\"Cinque Terre\" width=\"504\" height=\"336\">\r\n </div>\r\n </div>\r\n\r\n\r\n\t\t</div>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.47424963116645813, "alphanum_fraction": 0.497065007686615, "avg_line_length": 47.598899841308594, "blob_id": "6e4d82a4eb7056c91493b816a117821d804fe12a", "content_id": "87e1241091cd5959ae82c84d3cc3726052104047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 9029, "license_type": "no_license", "max_line_length": 225, "num_lines": 182, "path": "/Webserver/html/final/index.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n<?php\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n \r\n?>\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n\r\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\">\r\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js\"></script>\r\n <script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js\"></script>\r\n\r\n\r\n\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n .abc:hover{\r\n border: solid 1px #E1E1E1;\r\n \r\n }\r\n </style>\r\n\r\n <!-- load angular via CDN -->\r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n\t\t\t<nav class=\"navbar navbar-inverse navbar-fixed-top\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"#\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li class=\"active\"><a href=\"#\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo '<li><a href=\"pages/signup.php\"><span class=\"glyphicon glyphicon-user\"></span> Sign Up</a></li>';\r\n echo '<li><a href=\"pages/login.php\"><span class=\"glyphicon glyphicon-log-in\"></span> Login</a></li>';\r\n }else{\r\n echo '<li><a href=\"pages/services.php\">Services</a></li>';\r\n echo '<li><a href=\"#\"><span class=\"glyphicon glyphicon-user\"></span>'.$_SESSION['username'].'</a></li>';\r\n echo '<li><a href=\"pages/logout.php\"><span class=\"glyphicon glyphicon-log-in\"></span> logout</a></li>';\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n\t\t</header>\r\n\r\n <!-- <div style=\"height: 100px\">\r\n <h1 style=\"text-align: center;text-transform: uppercase;color: #F59D1E;margin-top: 100px;font-size: 50px\">Welcome To Cloud Services</h1>\r\n <div>\r\n <hr size=\"10px\" width=\"70%\" style=\"height:1px;border:none;color:#333;background-color:#333;\">\r\n </div>\r\n </div> -->\r\n \r\n <section>\r\n <div id=\"myCarousel\" class=\"carousel slide\" data-ride=\"carousel\">\r\n <!-- Indicators -->\r\n <ol class=\"carousel-indicators\">\r\n <li data-target=\"#myCarousel\" data-slide-to=\"0\" class=\"active\"></li>\r\n <li data-target=\"#myCarousel\" data-slide-to=\"1\"></li>\r\n <li data-target=\"#myCarousel\" data-slide-to=\"2\"></li>\r\n </ol>\r\n\r\n <!-- Wrapper for slides -->\r\n <div class=\"carousel-inner\">\r\n <div class=\"item active\">\r\n <img src=\"images/2.jpg\" alt=\"Los Angeles\" style=\"width:100%;height: 500px;\">\r\n </div>\r\n\r\n <div class=\"item\">\r\n <img src=\"images/5.jpg\" alt=\"Chicago\" style=\"width:100%;height: 500px;\">\r\n </div>\r\n \r\n <div class=\"item\">\r\n <img src=\"images/6.jpg\" alt=\"New york\" style=\"width:100%;height: 500px;\">\r\n </div>\r\n </div>\r\n\r\n <!-- Left and right controls -->\r\n <a class=\"left carousel-control\" href=\"#myCarousel\" data-slide=\"prev\">\r\n <span class=\"glyphicon glyphicon-chevron-left\"></span>\r\n <span class=\"sr-only\">Previous</span>\r\n </a>\r\n <a class=\"right carousel-control\" href=\"#myCarousel\" data-slide=\"next\">\r\n <span class=\"glyphicon glyphicon-chevron-right\"></span>\r\n <span class=\"sr-only\">Next</span>\r\n </a>\r\n </div>\r\n </section>\r\n <div style=\"height: 40px;margin-top: 10px\">\r\n \r\n \r\n\r\n \r\n </div>\r\n\r\n\r\n <section>\r\n <h1 style=\"text-align: center;text-transform: uppercase;color: #F59D1E;font-size: 40px\">services provided are</h1>\r\n\r\n <div>\r\n <hr size=\"10px\" width=\"50%\" style=\"height:1px;border:none;color:#333;background-color:#333;\">\r\n </div>\r\n \r\n <div class=\"container\">\r\n\r\n <div class=\"row\">\r\n\r\n <div class=\"col-md-4 abc\" style=\"height: 400px;display: block;\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 40px;color: black\"><b>IaaS</b></div>\r\n <p style=\"padding-left: 30px;color: black;font-size: 25px\">(Infrastucture as a Service)</p>\r\n <p style=\"text-align: center;padding-top: 10px\">IaaS provides you basic virtual compute infrastructure resources like CPU, Memory, Disk Storage attached to blank VMs with allowing you to install OS</p>\r\n <div style=\"padding-top: 20px;padding-left: 110px\">\r\n <a href=\"https://en.wikipedia.org/wiki/IAAS\"><button type=\"button\" class=\"btn btn-info\">More About IaaS</button></a>\r\n </div>\r\n </div>\r\n\r\n <div class=\"col-md-4 abc\" style=\"height: 400px;display: block;\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 40px;color: black\"><b>PaaS</b></div>\r\n <p style=\"padding-left: 55px;color: black;font-size: 25px\">(Platform as a Service)</p>\r\n <p style=\"text-align: center;padding-top: 10px\">PaaS provides pre-installed web and database servers so that you can publish and run web application without worrying about server setup.</p>\r\n <div style=\"padding-top: 20px;padding-left: 110px\">\r\n <a href=\"https://en.wikipedia.org/wiki/Platform_as_a_service\"> <button type=\"button\" class=\"btn btn-info\">More About PaaS</button></a>\r\n </div>\r\n </div>\r\n \r\n <div class=\"col-md-4 abc\" style=\"height: 400px;display: block;\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 40px;color: black\"><b>SaaS</b></div>\r\n <p style=\"padding-left: 55px;color: black;font-size: 25px\">(Software as a Service)</p>\r\n <p style=\"text-align: center;padding-top: 10px\">SaaS is a software delivery model here users are not responsible for supporting the application or any of the components.</p>\r\n <div style=\"padding-top: 45px;padding-left: 110px\">\r\n <a href=\"https://en.wikipedia.org/wiki/Software_as_a_service\"><button type=\"button\" class=\"btn btn-info\">More About SaaS</button></a>\r\n </div>\r\n </div>\r\n </div>\r\n </div>\r\n </section>\r\n\r\n <footer style=\"height: 270px;background-color: #0F111C\">\r\n <div class=\"container\">\r\n <div class=\"row\">\r\n <div class=\"col-md-6\" style=\"text-align: center;color: white;\">\r\n <h2 style=\"text-align: center;color: white\"><b>WebSite Name</b></h2>\r\n\r\n <hr size=\"10px\" width=\"50%\" style=\"height:1px;border:none;color:#333;background-color:white;\">\r\n <h3>Created By</h3>\r\n\r\n <a href=\"\" style=\"text-align: center;color: white;\">Aniket Baad<b>(2015BCS062)</b></a><br>\r\n <a href=\"\" style=\"text-align: center;color: white;\">Ankush Khobragade<b>(2015BCS070)</b></a><br>\r\n <a href=\"\" style=\"text-align: center;color: white;\">Rajat Dabade<b>(2015BCS056)</b></a>\r\n </div>\r\n <div class=\"col-md-6\" style=\"text-align: center;color: white;padding-top: 90px\">\r\n <h4 style=\"font-size: 40px\">Guided By</h4>\r\n <a href=\"\" style=\"text-align: center;color: white;\"><b>Prof.Sandeep Rathod</b></a><br>\r\n </div>\r\n </div>\r\n </div>\r\n <p style=\"text-align: center;color: white;padding-top: 20px\">&copy;Walchand College Of Engineering,Sangli</p>\r\n </footer>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.6789988875389099, "alphanum_fraction": 0.7023938894271851, "avg_line_length": 29.649999618530273, "blob_id": "558b0c4550ef5699dcd94d1ce9431d82ae005d59", "content_id": "c42862a5b86459670fca3e881798290340d49c00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1838, "license_type": "no_license", "max_line_length": 188, "num_lines": 60, "path": "/Webserver/cgi-bin/macuuid.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport uuid\nfrom random import randint\nimport os\nimport commands as cmd\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ninstDetails=cgi.FieldStorage()\nusername=instDetails.getvalue('username')\ninstname=instDetails.getvalue('instanceName')\ncpu=instDetails.getvalue('cpu_core')\nmem=instDetails.getvalue('memory')\nimgID=instDetails.getvalue('ImageID')\n\n\nname=username+\"_\"+instname\n\n# name = istDetails.getValue('name')\n# cpu = instDetails.getvalue('cpu')\n# memory = instDetails.getValue('memory')\n\n# name = instDetails.getlist('name')\n# cpu = instDetails.getlist('cpu')\n# memory = instDetails.getlist('memory')\n\ndef genuuid():\n return str(uuid.uuid4())\n\ndef randomMAC():\n return [ 0x00, 0x16, 0x3e,\n randint(0x00, 0x7f),\n randint(0x00, 0xff),\n randint(0x00, 0xff) ]\n\ndef macid(mac):\n return ':'.join(map(lambda x: \"%02x\" % x, mac))\n\nuuid=genuuid()\nmac=macid(randomMAC())\n\ninstState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"python /root/Documents/IAAS/createVM.py {0} {1} {2} {3} {4}\"'.format(name, uuid, mac, cpu, mem))\n#print cmd.getstatusoutput(\"sshpass -p root ssh root@server firefox\")\n\nif(instState[0]==0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"insert into InstDetails(name,CPU,Memory,UUID,image,macNo,state) values('{0}','{1}','{2}','{3}','{4}','{5}','1')\".format(name,cpu,mem,uuid,imgID,mac))\n cusr.execute(\"select max(InstId) from InstDetails\")\n instid=cusr.fetchall()[0][0]\n print instid\n cusr.execute(\"insert into IaasUsers(Username,InstID) values('{0}','{1}')\".format(username,instid))\n db.commit()\n\nprint \"Worked correctly\"\nprint \"<script type='text/javascript'>window.location.href='/final/pages/iaas.php'</script>\"" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 21.44444465637207, "blob_id": "521033291f6e4698fd2a68e736329fdf0c3610af", "content_id": "efe6f220da8c6421353b4e846f89c76001399190", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 209, "license_type": "no_license", "max_line_length": 68, "num_lines": 9, "path": "/Webserver/html/final/pages/logout.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php\r\n\t\r\n\tsession_start();\r\n\tunset($_SESSION['username']);\r\n\tsession_destroy();\r\n\techo \"<script type='text/javascript'>alert('Successfully logout!');\r\n window.location.href='../index.php'</script>\";\r\n\r\n?>" }, { "alpha_fraction": 0.7072120308876038, "alphanum_fraction": 0.7190527319908142, "avg_line_length": 25.571428298950195, "blob_id": "d24659ec00f4af3baa412c8791c0bbc432ac6619", "content_id": "2231b9c322161405b4b466fddb9643b4f95dfc05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 99, "num_lines": 35, "path": "/Webserver/cgi-bin/createSUser.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport os\nimport commands as cmd\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\nform=cgi.FieldStorage()\nuser=form.getvalue(\"username\")\n\ndb=Connection.getDB()\ncusr=db.cursor()\ncusr.execute(\"select Password from Identity where Username='{0}'\".format(user))\npasswd = cusr.fetchall()[0][0]\n\nif(passwd==None):\n\tprint \"User Does not exists\"\n\nuserState= cmd.getstatusoutput(\"python /var/www/cgi-bin/useradd.py {}\".format(user))\n\nif(userState[0]==0):\n\tpassState= cmd.getstatusoutput(\"echo 'apache\\n{1}\\n{1}' | sudo -S passwd {0}\".format(user,passwd))\n\tmsg=\"Service activated\"\n\tcusr.execute(\"insert into SAAS(Username) values('{0}')\".format(user))\n\tdb.commit()\nelse:\n msg=\"Sorry, Could not activate service\"\n\nprint \"\"\"\n<script type='text/javascript'>alert('{0}');\nwindow.location.href='/final/pages/saas.php'</script>\n\"\"\".format(msg)" }, { "alpha_fraction": 0.7126050591468811, "alphanum_fraction": 0.7243697643280029, "avg_line_length": 24.782608032226562, "blob_id": "6014732d0331ec44fb521fec361e07c5aff29321", "content_id": "9b8bb73b9542c2c4ccb9e6a5d631efd64c6d9177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1190, "license_type": "no_license", "max_line_length": 128, "num_lines": 46, "path": "/Webserver/cgi-bin/deleteCont.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport commands as cmd\nimport os\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\nform=cgi.FieldStorage()\n# name=form.getvalue('name')\n# pltid=form.getvalue('pltID')\ncontName=form.getvalue('operation')\n\n\n\n\n#removing from database\ndb=Connection.getDB()\ncusr=db.cursor()\ncusr.execute(\"select ContID from Containers where ContName='{0}'\".format(contName))\ncontID=cusr.fetchall()[0][0]\ncusr.execute(\"delete from PAASUsers where ContID='{0}'\".format(contID))\ncusr.execute(\"delete from Containers where ContID='{0}'\".format(contID))\n\n#Deleting Directory\nos.system(\"rm -R -f /PAAS/{0}\".format(contName))\n\n\n# Deleting Container\ndelState=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'docker rm -f {0}' \".format(contName))\n\n\n\nif(delState[0]==0 or delState[0]==256):\n db.commit()\n print \"\"\"\n<script type='text/javascript'>alert('Project deleted');\nwindow.location.href='/final/pages/paas.php'</script>\n\"\"\"\nelse:\n print \"\"\"\n<script type='text/javascript'>alert('Sorry coud not delete project.');\nwindow.location.href='/final/pages/paas.php'</script>\n\"\"\"\n " }, { "alpha_fraction": 0.3879022002220154, "alphanum_fraction": 0.39562419056892395, "avg_line_length": 37.22222137451172, "blob_id": "7b13a6a3dd5e36de936f79a971bd0cdc35b14c9e", "content_id": "829a2bbe97e9e52bebbdff2b86cb28b571ced6f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3885, "license_type": "no_license", "max_line_length": 172, "num_lines": 99, "path": "/Webserver/html/final/pages/login.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n<?php\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n \r\n?>\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n </style>\r\n\r\n <!-- load angular via CDN -->\r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n\t\t\t<nav class=\"navbar navbar-inverse\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"../index.php\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li class=\"active\"><a href=\"../index.php\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo '<li><a href=\"signup.php\"><span class=\"glyphicon glyphicon-user\"></span> Sign Up</a></li>';\r\n echo '<li><a href=\"#\"><span class=\"glyphicon glyphicon-log-in\"></span> Login</a></li>';\r\n }else{\r\n echo \"<script type='text/javascript'>\r\n window.location.href='../index.php'</script>\";\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n\t\t</header>\r\n\r\n <div class=\"container\">\r\n\r\n <div class=\"row\" style=\"margin-top: 100px;\">\r\n <div class=\"col-md-6\">\r\n <h3 style=\"text-align: center; margin-bottom: 50px\"><b>Log-In Into Your Account</b></h3>\r\n <form action=\"loginv.php\" method=\"post\">\r\n <div class=\"form-group\">\r\n <table class=\"table table-inverse\">\r\n <tbody>\r\n <tr>\r\n <td> <label for=\"username\">UserName :</label></td>\r\n <td><input name=\"username\" type=\"username\" class=\"form-control\" id=\"username\" aria-describedby=\"username\" placeholder=\"FirstName\"></td>\r\n </tr>\r\n <tr>\r\n <td> <label for=\"password\">Password : </label></td>\r\n <td><input name=\"password\" type=\"password\" class=\"form-control\" id=\"password\" aria-describedby=\"password\" placeholder=\"Password\"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </div>\r\n <table class=\"table table-inverse\">\r\n <tr>\r\n <td><input type=\"submit\" class=\"btn btn-warning\" value=\"Continue\"></td>\r\n </tr>\r\n </table>\r\n </form>\r\n </div>\r\n <div class=\"col-md-1\">\r\n <style>\r\n .vl {\r\n border-left: 4px solid grey;\r\n height: 250px;\r\n }\r\n </style>\r\n <div class=\"vl\"></div>\r\n </div>\r\n <div class=\"col-md-5\">\r\n <img src=\"../images/login.svg\" class=\"img-rounded\" alt=\"Cinque Terre\" width=\"404\" height=\"236\">\r\n </div>\r\n </div>\r\n\r\n\r\n\t\t</div>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.4957698881626129, "alphanum_fraction": 0.4991539716720581, "avg_line_length": 26.16666603088379, "blob_id": "2600eeb6bb94dbacbc1fbe5afb829bc6be7c3cc7", "content_id": "5f907098d5e9dbaf3306bd121194b1b9f1ba7ce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 164, "num_lines": 42, "path": "/Webserver/html/final/pages/createInstance.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": " <?php\r\n\r\n require_once(\"../include/connect.php\");\r\n session_start();\r\n $username = $_SESSION['username'];\r\n $instanceName = $_POST['instance_name'];\r\n $_SESSION['instance_name'] = $instanceName;\r\n $instanceName = $username.\"_\".$instanceName;\r\n\r\n $flg = 0;\r\n \r\n \r\n\r\n $sql = \"SELECT InstDetails.name from InstDetails INNER JOIN IaasUsers ON IaasUsers.Username = '\".$username.\"' WHERE IaasUsers.InstID = InstDetails.InstId \";\r\n\r\n $result = mysqli_query($conn, $sql);\r\n\r\n if (mysqli_num_rows($result) > 0) {\r\n\r\n while($row = mysqli_fetch_assoc($result)) {\r\n \r\n if(!strcmp($instanceName, $row[\"name\"]))\r\n {\r\n $flg = 1;\r\n break;\r\n }\r\n\r\n }\r\n }\r\n\r\n if($flg == 1){\r\n echo \"<script type='text/javascript'>alert('Instance Name Is Already Exist Please Enter Another Instance Name!');\r\n window.location.href='./iaas.php'</script>\";\r\n }\r\n\r\n else\r\n {\r\n echo \"<script type='text/javascript'>window.location.href='./instDetails.php'</script>\";\r\n }\r\n\r\n\r\n ?>" }, { "alpha_fraction": 0.6643059253692627, "alphanum_fraction": 0.6919263601303101, "avg_line_length": 25.660377502441406, "blob_id": "13151de1fb4f9a4d52634a1ddd6effb00996ac4e", "content_id": "b7aa5f0866b08b43188131f866b772336b56e339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 188, "num_lines": 53, "path": "/Webserver/html/scripts/macuuid.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport uuid\nfrom random import randint\nimport os\nimport commands as cmd\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ninstDetails=cgi.FieldStorage()\nname=instDetails.getvalue('nm')\ncpu=instDetails.getvalue('cpu')\nmem=instDetails.getvalue('memory')\n\n\n\n# name = istDetails.getValue('name')\n# cpu = instDetails.getvalue('cpu')\n# memory = instDetails.getValue('memory')\n\n# name = instDetails.getlist('name')\n# cpu = instDetails.getlist('cpu')\n# memory = instDetails.getlist('memory')\n\ndef genuuid():\n return str(uuid.uuid4())\n\ndef randomMAC():\n return [ 0x00, 0x16, 0x3e,\n randint(0x00, 0x7f),\n randint(0x00, 0xff),\n randint(0x00, 0xff) ]\n\ndef macid(mac):\n return ':'.join(map(lambda x: \"%02x\" % x, mac))\n\nuuid=genuuid()\nmac=macid(randomMAC())\n\ninstState=cmd.getstatusoutput('sshpass -p root ssh -o StrictHostKeyChecking=No root@server \"python /root/Documents/IAAS/createVM.py {0} {1} {2} {3} {4}\"'.format(name, uuid, mac, cpu, mem))\n#print cmd.getstatusoutput(\"sshpass -p root ssh root@server firefox\")\nprint instState\n\nif(instState[0]==0):\n db=Connection.getDB()\n cusr=db.cursor()\n cusr.execute(\"insert into InstDetails(name,CPU,Memory,UUID,image,macNo) values('{0}','{1}','{2}','{3}','{4}','{5}')\".format(name,cpu,mem,uuid,1,mac))\n db.commit()\n\nprint \"Worked correctly\"" }, { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5283018946647644, "avg_line_length": 13.428571701049805, "blob_id": "4e49928773e963ff50ba9be163bc21d9ef4f5166", "content_id": "dacb635d27a81d06967cd15beadf2d36925b70fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 106, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/Webserver/html/final/pages/test.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n\r\n\t$filename=$_FILES['file']['name'];\r\n\t$name = $_POST['contName'];\r\n\r\n\techo $filename.\" \".$name;" }, { "alpha_fraction": 0.5879396796226501, "alphanum_fraction": 0.6180904507637024, "avg_line_length": 32.16666793823242, "blob_id": "31195a288d5425f2b280114386fd7a244096f178", "content_id": "eacd287471d17a3f802ac7faa1e26ed5ea932829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 84, "num_lines": 6, "path": "/Webserver/cgi-bin/useradd.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport os\nimport sys\n\nos.system(\"\"\" echo \"apache\" | sudo -S useradd {0}\"\"\".format(sys.argv[1]))\n#os.system(\"\"\" echo \"apache\\n{0}\\n{0}\" | sudo -S useradd {0}\"\"\".format(sys.argv[2]))\n" }, { "alpha_fraction": 0.5918674468994141, "alphanum_fraction": 0.5933734774589539, "avg_line_length": 26.95652198791504, "blob_id": "c647793078485871607acbb9072ca9211c48587c", "content_id": "0adeb0fbedbc44b646e19c613cfbcac427e10a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 664, "license_type": "no_license", "max_line_length": 115, "num_lines": 23, "path": "/Webserver/html/final/pages/loginv.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php\r\n\trequire_once(\"../include/connect.php\");\r\n\t\r\n\t$username = $_POST['username'];\r\n\t$password = $_POST['password'];\r\n\r\n\t$sql = \"SELECT Username, Password FROM Identity WHERE username = '\".$username.\"' AND password = '\".$password.\"'\";\r\n\r\n\t$result = mysqli_query($conn, $sql);\r\n\r\n\tif (mysqli_num_rows($result) > 0) {\r\n\t \r\n\t session_start();\r\n\t $_SESSION['username'] = $username;\r\n\t echo \"<script type='text/javascript'>alert('login successfully!');\r\n window.location.href='./services.php'</script>\";\r\n\t \r\n\t} else {\r\n\t echo \"<script type='text/javascript'>alert('login failed!');\r\n \t\t window.location.href='./login.php'</script>\";\r\n\t}\r\n\r\n?>" }, { "alpha_fraction": 0.5336787700653076, "alphanum_fraction": 0.5336787700653076, "avg_line_length": 17.5, "blob_id": "49db5be155ec3ad5efef3c0434f1e346787fa1ac", "content_id": "eaeaeb1a11ed7f4bbbe338f8fb8a085eaed172d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 193, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/Webserver/html/final/pages/projectCreated.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php\r\n\t\r\n\t session_start();\r\n\t $username = $_POST['username'];\r\n\t $projectName = $_POST['projectName'];\r\n\t $PltID = $_POST['PltID'];\r\n\r\n\t echo $projectName.\" \".$username.\" \".$PltID;\r\n\r\n?>" }, { "alpha_fraction": 0.4900605082511902, "alphanum_fraction": 0.4935177266597748, "avg_line_length": 26.2439022064209, "blob_id": "500f98922086a7dc2f545a79c598fe4bcde51628", "content_id": "9823cdf84b6500b6219b56706e412954b47b8723", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 165, "num_lines": 41, "path": "/Webserver/html/final/pages/createPlatform.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": " <?php\r\n\r\n require_once(\"../include/connect.php\");\r\n session_start();\r\n $username = $_SESSION['username'];\r\n $projectName = $_POST['project_name'];\r\n $_SESSION['project_name'] = $projectName;\r\n $projectName = $username.\"_\".$projectName;\r\n $flg = 0;\r\n \r\n \r\n\r\n $sql = \"SELECT Containers.ContName from Containers INNER JOIN PAASUsers ON PAASUsers.Username = '\".$username.\"' WHERE PAASUsers.ContID = Containers.ContId \";\r\n\r\n $result = mysqli_query($conn, $sql);\r\n\r\n if (mysqli_num_rows($result) > 0) {\r\n\r\n while($row = mysqli_fetch_assoc($result)) {\r\n \r\n if(!strcmp($projectName, $row[\"ContName\"]))\r\n {\r\n $flg = 1;\r\n break;\r\n }\r\n\r\n }\r\n }\r\n\r\n if($flg == 1){\r\n echo \"<script type='text/javascript'>alert('Application already exicts with this name!');\r\n window.location.href='./paas.php'</script>\";\r\n }\r\n\r\n else\r\n {\r\n echo \"<script type='text/javascript'>window.location.href='./projectDetails.php'</script>\";\r\n }\r\n\r\n\r\n ?>" }, { "alpha_fraction": 0.7370242476463318, "alphanum_fraction": 0.7404844164848328, "avg_line_length": 19.64285659790039, "blob_id": "d42111d1bfe65989cfc469997b84cd67b3a61e12", "content_id": "3cf2b2dfa585db6c9a583aa96f961c955203b3d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 141, "num_lines": 14, "path": "/Webserver/cgi-bin/startServers.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport os\nimport commands as cmd \nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\n\n\n\nprint cmd.getstatusoutput(\"\"\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'bash /root/Documents/PAAS/startContainers.sh' \"\"\")\n" }, { "alpha_fraction": 0.46973901987075806, "alphanum_fraction": 0.4875069260597229, "avg_line_length": 26.303030014038086, "blob_id": "bdb8f8b06c5fb5fdd182e3d62dc0e01fde8a00ba", "content_id": "4a58e3a229795efa0eea9e124676c026d933241f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1801, "license_type": "no_license", "max_line_length": 97, "num_lines": 66, "path": "/Webserver/html/scripts/createVM.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "from lxml.builder import E\nfrom lxml.etree import tostring\nimport libvirt as v\nimport sys\n\nname = str(sys.argv[1])\nuuid = str(sys.argv[2])\nmacid = str(sys.argv[3])\ncpu = str(sys.argv[4])\nmem = str(sys.argv[5])\n\nvar=tostring(E.domain(\n E.name(name),\n E.uuid(uuid),\n E.memory(mem),\n E.currentMemory(mem),\n E.vcpu(cpu),\n E.os(\n E.type('hvm',arch='x86_64',machine='pc'),\n E.boot(dev='cdrom')\n ),\n E.features(\n E.acpi(),\n E.apic(),\n E.pae()\n ),\n E.clock(offset='localtime'),\n E.on_poweroff('preserve'),\n E.on_reboot('restart'),\n E.on_crash('restart'),\n E.devices(\n E.emulator('/usr/libexec/qemu-kvm'),\n E.disk(\n E.driver(name='qemu',type='qcow2',queues='4'),\n E.source(file='/var/lib/libvirt/images/CentOS-7-x86_64-GenericCloud-1503.qcow2'),\n E.target(dev='hda',bus='ide'),\n type='file',device='disk'\n ),\n E.interface(\n E.mac(address=macid),\n E.source(dev='em1',mode='bridge'),\n E.model(type='virtio'),\n E.driver(name='vhost'),\n type='direct'\n ),\n E.channel(\n E.source(mode='bind',path='/var/lib/libvirt/qemu/rhel6.agent'),\n E.target(type='virtio',name='org.qemu.guest_agent.0'),\n type='unix'\n ),\n E.input(type='mouse',bus='ps2'),\n E.graphics(type='vnc',port='-1',autoport='yes',keymap='en-us')\n ),\n type='kvm'\n )\n)\n\nxmlconfig = var\n\nn = v.open(\"qemu://10.7.3.69/system\")\n\ndom = n.defineXML(xmlconfig)\n\ndom.create()\n\nprint(\"Guest \"+dom.name()+' has booted')" }, { "alpha_fraction": 0.543156087398529, "alphanum_fraction": 0.5518744587898254, "avg_line_length": 26.674999237060547, "blob_id": "062f70df0ca8a1f1b8a6057f5e06030ace082d17", "content_id": "1219196b5b3d19b32b7b46eaa8ff39b7aa5a665a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1147, "license_type": "no_license", "max_line_length": 108, "num_lines": 40, "path": "/Webserver/html/scripts/package/connect.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\nimport MySQLdb as sql\r\nimport sys\r\n\r\nclass Connection:\r\n 'Class for establising connection with VD WebServices'\r\n def __init__(self):\r\n pass\r\n @staticmethod\r\n def connect():\r\n try:\r\n db=Connection.getDB()\r\n crsr=db.cursor()\r\n sys.stdout.write(\"Connection Established\\n\")\r\n except:\r\n sys.stdout.write(\"Sorry, Could not connect to Database\\n\")\r\n return crsr\r\n\r\n @staticmethod\r\n def getDB():\r\n return sql.connect(\"10.7.3.56\",\"web\",\"web\",\"WebServices\")\r\n\r\n\r\n\r\nclass SAAS:\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def getIP(uname,passwd):\r\n c=Connection.connect()\r\n c.execute(\"SELECT Username from Identity where Username='\"+uname+\"' and Password='\"+passwd+\"';\")\r\n if(len(c.fetchall())==1):\r\n sys.stdout.write(\"User has been varified\\n\")\r\n c.execute(\"SELECT IP from SAAS where Username='\"+uname+\"';\")\r\n tmp=c.fetchall()\r\n return tmp[0][0]\r\n else:\r\n sys.stderr.write(\"Could not verify the user\\n\")\r\n return -1\r\n" }, { "alpha_fraction": 0.7526595592498779, "alphanum_fraction": 0.7553191781044006, "avg_line_length": 17.850000381469727, "blob_id": "92cee0cedce88b483bbc62aae5feb22d530c649c", "content_id": "94aeccdbe94de5f23a0893ddc070545a2e88a154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/Webserver/html/scripts/test.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport uuid\nfrom random import randint\nimport os\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ninstDetails=cgi.FieldStorage()\n\nprint instDetails.getlist('memory')\n\n#name = instDetails.getValue('name')\n#cpu = instDetails.getvalue('cpu')\n#memory = instDetails.getValue('memory')\n#print cpu\n#print name\n#print memory" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6041666865348816, "avg_line_length": 47, "blob_id": "f05211e06720cb4eedf323c0e1c6a3f71cc4eec6", "content_id": "d3cd74f2093f72b5ad18f5bd9c1f5b38cc6e2bb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 48, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/Mainserver/PAAS/startContainers.sh", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "docker start $(docker ps -a | awk {'print $1'})\n" }, { "alpha_fraction": 0.6823992133140564, "alphanum_fraction": 0.6843658089637756, "avg_line_length": 26.486486434936523, "blob_id": "6bb7bd2af3363c9b17d923cd6226115b13376673", "content_id": "5dc07f1c79ba6200edb1741124228fe65ed24a57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/Webserver/cgi-bin/uploadCode.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#from package.connect import Connection\nimport commands as cmd\nimport zipfile as zip\nimport os,zipfile\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\nform = cgi.FieldStorage()\n\ncontName=form.getvalue('contName')\n# Get filename here.\nfileitem = form['file']\n\nprint fileitem.filename\n# Test if the file was uploaded\nif fileitem.filename:\n # strip leading path from file name to avoid \n # directory traversal attacks\n fn = os.path.basename(fileitem.filename.replace(\"\\\\\", \"/\" ))\n open('/PAAS/'+contName+'/'+ fn, 'wb').write(fileitem.file.read())\n\n message = 'The file \"' + fn + '\" was uploaded successfully'\n zip_ref = zipfile.ZipFile('/PAAS/'+contName+'/'+ fn, 'r')\n zip_ref.extractall('/PAAS/'+contName+'/')\n zip_ref.close()\n os.remove('/PAAS/'+contName+'/'+ fn)\nelse:\n message = 'No file was uploaded'\n \nprint \"\"\"\n<script type='text/javascript'>alert('{0}');\nwindow.location.href='/final/pages/paas.php'</script>\n\"\"\".format(message)\n" }, { "alpha_fraction": 0.7070437669754028, "alphanum_fraction": 0.7251867651939392, "avg_line_length": 35.764705657958984, "blob_id": "433c207ca9bc91c08376519e8d4069e366afd976", "content_id": "76095f3d06d63f4941bb89aa5213a0c7a637eb63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1874, "license_type": "no_license", "max_line_length": 189, "num_lines": 51, "path": "/Webserver/cgi-bin/launchPaas.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport commands as cmd\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\nform=cgi.FieldStorage()\n# name=form.getvalue('name')\n# pltid=form.getvalue('pltID')\nusername=form.getvalue('username')\nprojectName=form.getvalue('projectName')\npltid=form.getvalue('PltID')\n\nContName = username+'_'+projectName\ncusr=Connection.connect()\n\nif(type(cusr)==int):\n print \"Could not establish the connection\"\n\nprint pltid\ncusr.execute(\"select Name from Paltforms where PltID='{0}'\".format(pltid))\n\nimg=cusr.fetchall()[0][0]\n\ndb=Connection.getDB()\ncrsr=db.cursor()\ncrsr.execute(\"select max(port) from Containers\")\nport=crsr.fetchall()[0][0]+1\n\n#Creating Directory\ndirState=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'mkdir /PAAS/{0}' \".format(ContName))\ndirState=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'chmod 777 /PAAS/{0}' \".format(ContName))\n#Launching docker Container\ncontState=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'docker run -d -v {3}:/app -p {2}:80 --name={0} {1}'\".format(ContName,img,port,\"/PAAS/\"+ContName))\n\n\n\nif(contState[0]==0):\n getID=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'docker inspect {0} | jq .[].Id'\".format(ContName))\n ContID=getID[1][-65:-1]\n crsr.execute(\"INSERT INTO Containers(ContID,ContName,PltID,port,status) VALUES ('{0}', '{1}', '{2}', '{3}', '1')\".format(ContID,ContName,pltid,port))\n crsr.execute(\"INSERT INTO PAASUsers values('{0}','{1}')\".format(username,ContID))\n db.commit()\n print \"Platform launched Successfully on port number {0}\".format(port)\nelse:\n print contState[1]\n\nprint \"<script type='text/javascript'>window.location.href='/final/pages/paas.php'</script>\"" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.773755669593811, "avg_line_length": 17.41666603088379, "blob_id": "1a9bd58e1ac1a1d974165c72dde59ef45b49c1e0", "content_id": "9c9f05af691c2a9a03c886071adc176dbf096f6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/Webserver/cgi-bin/test.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport uuid\nfrom random import randint\nimport os\nfrom package.connect import Connection\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\ncr=Connection.connect()\n" }, { "alpha_fraction": 0.4637298882007599, "alphanum_fraction": 0.4853600561618805, "avg_line_length": 37.06185531616211, "blob_id": "17eb748f55a0036760ad362e282a485c38965f54", "content_id": "c3f633334e56634e3e5878e70a3eafc909d73bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3791, "license_type": "no_license", "max_line_length": 157, "num_lines": 97, "path": "/Webserver/html/final/pages/services.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n<?php\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n \r\n?>\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n b:hover {\r\n color: #F59D1E; \r\n }\r\n .abc:hover{\r\n border :#C5C5C5 solid 1px;\r\n }\r\n </style>\r\n\r\n <!-- load angular via CDN -->\r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n <nav class=\"navbar navbar-inverse\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"../index.php\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li><a href=\"../index.php\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo \"<script type='text/javascript'>\r\n window.location.href='../index.php'</script>\";\r\n }else{\r\n echo '<li class=\"active\"><a href=\"#\"><span class=\"glyphicon glyphicon-user\"></span>'.$_SESSION['username'].'</a></li>';\r\n echo '<li><a href=\"logout.php\"><span class=\"glyphicon glyphicon-log-in\"></span> logout</a></li>';\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n </header>\r\n\r\n <div class=\"container\" style=\"margin-left: 150px\">\r\n <h1 style=\"text-align: center;letter-spacing: 2px;;text-transform: uppercase;font-size: 60px;color: #F59D1E\"><b>Services Provided By Us!</b></h1>\r\n\r\n <h3 style=\"text-align: center;text-transform: uppercase\">Click on any service to move on....</h3> \r\n <div class=\"row\" style=\"margin-top: 70px\">\r\n\r\n\r\n <a href=\"iaas.php\"><div class=\"col-md-3 abc\" style=\"height: 200px;display: block;background-color: #F0F0F0\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 30px;color: black\"><b>IaaS</b></div>\r\n \r\n <p style=\"padding-left: 40px;color: black;font-size: 15px\">(Infrastucture as a Service)</p>\r\n\r\n </div></a>\r\n\r\n\r\n <div class=\"col-md-1\"></div>\r\n\r\n\r\n <a href=\"paas.php\"><div class=\"col-md-3 abc\" style=\"height: 200px;display: block;background-color: #F0F0F0\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 30px;color: black\"><b>PaaS</b></div>\r\n <small style=\"padding-left: 50px;color: black;font-size: 15px\">(Platfrom as a Service)</small>\r\n </div></a>\r\n\r\n\r\n <div class=\"col-md-1\"></div>\r\n\r\n\r\n <a href=\"saas.php\"><div class=\"col-md-3 abc\" style=\"height: 200px;display: block;background-color: #F0F0F0\">\r\n <div style=\"text-align: center;letter-spacing: 5px;padding-top: 70px;font-size: 30px;color: black\"><b>SaaS</b></div>\r\n <small style=\"padding-left: 50px;color: black;font-size: 15px\">(Software as a Service)</small>\r\n </div></a>\r\n\r\n\r\n </div>\r\n </div>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.7385621070861816, "alphanum_fraction": 0.7450980544090271, "avg_line_length": 29.799999237060547, "blob_id": "3e302be3bdb4ce5fd1ba3eed7fb675c1d1e565c9", "content_id": "c8de24d462fefcc429abab9ebf6cb5da3548f572", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 131, "num_lines": 5, "path": "/Webserver/cgi-bin/getIP.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "import os\n\ntmp = os.popen(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server \\\"virsh domifaddr Aniket --source agent eth0\\\"\").read()\n\nprint tmp" }, { "alpha_fraction": 0.3989320993423462, "alphanum_fraction": 0.41320693492889404, "avg_line_length": 32.100372314453125, "blob_id": "b86b19e1c1b2a617db6a01ae06245984d99e5db3", "content_id": "18e04ae39efe2b670c24c432c7a89f9ced003515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 9177, "license_type": "no_license", "max_line_length": 274, "num_lines": 269, "path": "/Webserver/html/final/pages/paas.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "\r\n\r\n<?php\r\n\r\n require_once(\"../include/connect.php\");\r\n session_start();\r\n if(!isset($_SESSION['username'])){\r\n session_destroy();\r\n }\r\n\r\n ini_set('display_error', 'Off');\r\n \r\n?>\r\n\r\n<!DOCTYPE html>\r\n<html lang=\"en-us\">\r\n <head>\r\n <title>{{ Websitename }}</title>\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\">\r\n <meta charset=\"UTF-8\">\r\n\r\n <!-- load bootstrap and fontawesome via CDN -->\r\n <link rel=\"stylesheet\" href=\"https://netdna.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css\" />\r\n <style>\r\n html, body, input, select, textarea\r\n {\r\n font-size: 1.05em !important;\r\n }\r\n b:hover {\r\n color: #F59D1E; \r\n }\r\n .modal {\r\n display: none; /* Hidden by default */\r\n position: fixed; /* Stay in place */\r\n z-index: 1; /* Sit on top */\r\n padding-top: 100px; /* Location of the box */\r\n left: 0;\r\n top: 0;\r\n width: 100%; /* Full width */\r\n height: 100%; /* Full height */\r\n overflow: auto; /* Enable scroll if needed */\r\n background-color: rgb(0,0,0); /* Fallback color */\r\n background-color: rgba(0,0,0,0.4); /* Black w/ opacity */\r\n }\r\n\r\n /* Modal Content */\r\n .modal-content {\r\n position: relative;\r\n background-color: #fefefe;\r\n margin: auto;\r\n padding: 0;\r\n border: 1px solid #888;\r\n width: 80%;\r\n box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2),0 6px 20px 0 rgba(0,0,0,0.19);\r\n -webkit-animation-name: animatetop;\r\n -webkit-animation-duration: 0.4s;\r\n animation-name: animatetop;\r\n animation-duration: 0.4s\r\n }\r\n\r\n /* Add Animation */\r\n @-webkit-keyframes animatetop {\r\n from {top:-300px; opacity:0} \r\n to {top:0; opacity:1}\r\n }\r\n\r\n @keyframes animatetop {\r\n from {top:-300px; opacity:0}\r\n to {top:0; opacity:1}\r\n }\r\n\r\n /* The Close Button */\r\n .close {\r\n color: white;\r\n float: right;\r\n font-size: 28px;\r\n font-weight: bold;\r\n }\r\n\r\n .close:hover,\r\n .close:focus {\r\n color: #000;\r\n text-decoration: none;\r\n cursor: pointer;\r\n }\r\n\r\n .modal-header {\r\n padding: 2px 16px;\r\n background-color: #000000;\r\n color: white;\r\n }\r\n\r\n .modal-body {padding: 2px 16px;}\r\n\r\n .modal-footer {\r\n padding: 2px 16px;\r\n background-color: #000000;\r\n color: white;\r\n }\r\n form{\r\n display: inline;\r\n }\r\n </style>\r\n\r\n \r\n\r\n \r\n </head>\r\n <body>\r\n\r\n <header>\r\n\t\t\t<nav class=\"navbar navbar-inverse\" data-spy=\"affix\" data-offset-top=\"197\">\r\n <div class=\"container-fluid\">\r\n <div class=\"navbar-header\">\r\n <a class=\"navbar-brand\" href=\"../index.php\" style=\"color: #F59D1E\">WebSiteName</a>\r\n </div>\r\n <ul class=\"nav navbar-nav\">\r\n <li><a href=\"../index.php\">Home</a></li>\r\n </ul>\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <?php if(!isset($_SESSION['username'])) { \r\n echo \"<script type='text/javascript'>\r\n window.location.href='../index.php'</script>\";\r\n }else{\r\n echo '<li><a href=\"services.php\">Services</a></li>';\r\n echo '<li class=\"active\"><a href=\"#\"><span class=\"glyphicon glyphicon-user\"></span>'.$_SESSION['username'].'</a></li>';\r\n echo '<li><a href=\"logout.php\"><span class=\"glyphicon glyphicon-log-in\"></span> logout</a></li>';\r\n } ?>\r\n \r\n </ul>\r\n </div>\r\n </nav>\r\n\t\t</header>\r\n\r\n <div class=\"container\">\r\n <h1 style=\"text-align: center;letter-spacing: 2px;font-size: 50px;color: #F59D1E\"><b>WELCOME TO PLATFORM AS A SERVICE (PaaS)</b></h1>\r\n\r\n \r\n\r\n <div style=\"margin-top: 50px\"><button id=\"myBtn\" class=\"btn btn-primary btn-lg\"><spam class=\"glyphicon glyphicon-refresh\"></spam>Create Platform</button></div>\r\n\r\n <!-- Creating Model -->\r\n\r\n <div id=\"myModal\" class=\"modal\">\r\n <div class=\"modal-content\">\r\n <div class=\"modal-header\">\r\n <span class=\"close\">&times;</span>\r\n <h2>Your Platform Details</h2>\r\n </div>\r\n <div class=\"modal-body\">\r\n <form action=\"createPlatform.php\" method=\"post\">\r\n <label>Name Of Project</label>\r\n <input type=\"text\" name=\"project_name\" required>\r\n <input type=\"submit\" name=\"submit\" value=\"Create Instance\">\r\n </form>\r\n </div>\r\n <div class=\"modal-footer\">\r\n <h3></h3>\r\n </div>\r\n </div>\r\n\r\n </div>\r\n\r\n <!-- Ending Model -->\r\n\r\n\r\n <!-- script For model -->\r\n\r\n <script>\r\n \r\n var modal = document.getElementById('myModal');\r\n\r\n \r\n var btn = document.getElementById(\"myBtn\");\r\n\r\n \r\n var span = document.getElementsByClassName(\"close\")[0];\r\n\r\n btn.onclick = function() {\r\n modal.style.display = \"block\";\r\n }\r\n\r\n span.onclick = function() {\r\n modal.style.display = \"none\";\r\n }\r\n\r\n window.onclick = function(event) {\r\n if (event.target == modal) {\r\n modal.style.display = \"none\";\r\n }\r\n }\r\n </script>\r\n\r\n <!-- script end For model -->\r\n\r\n\r\n <div style=\"margin-top: 30px\">\r\n <h2 style=\"text-align: center;font-size: 40px\">YOUR PROJECTS</h2>\r\n\r\n \r\n <?php\r\n\r\n $username = $_SESSION['username'];\r\n\r\n \r\n \r\n\r\n $sql = \"SELECT Containers.ContName, Paltforms.Info, Containers.IP, Containers.port FROM ((Containers INNER JOIN Paltforms ON Containers.PltID = Paltforms.PltID) INNER JOIN PAASUsers ON Containers.ContID = PAASUsers.ContID) where PAASUsers.Username='\".$username.\"' \";\r\n\r\n $result = mysqli_query($conn, $sql);\r\n\r\n if (mysqli_num_rows($result) > 0) {\r\n\r\n echo '\r\n <table class=\"table\" style=\"margin-top: 30px\">\r\n <thead class=\"thead-dark\">\r\n <tr>\r\n <th scope=\"col\">Project Name</th>\r\n <th scope=\"col\">Platform</th>\r\n <th scope=\"col\">IP</th>\r\n <th scope=\"col\">Port Number</th>\r\n <th scope=\"col\">Choose File</th>\r\n <th scope=\"col\">operation</th>\r\n </tr>\r\n </thead>\r\n <tbody>';\r\n\r\n while($row = mysqli_fetch_assoc($result)) {\r\n \r\n \r\n\r\n echo \"<tr>\r\n <td scope=\".'row'.\">\".str_replace($_SESSION['username'].\"_\", \"\", $row[\"ContName\"]).\"</td>\r\n <td>\".$row[\"Info\"].\"</td>\r\n <td>\".$row[\"IP\"].\"</td>\r\n <td>\".$row[\"port\"].\"</td>\r\n\r\n <td><form action='/cgi-bin/uploadCode.py' method='post' enctype='multipart/form-data'>\r\n <input type='hidden' value=\".$row[\"ContName\"].\" name='contName'>\r\n <input type='file' name='file' required>\r\n <button type='submit' class='glyphicon glyphicon-upload'>\r\n </form></td>\r\n\r\n\r\n <td>\r\n <form action='/cgi-bin/deleteCont.py' action='get'>\r\n <input type='hidden' value=\".$row[\"ContName\"].\" name ='operation'>\r\n <button type='submit' class='btn btn-danger' role='button' aria-disabled='true'><span class='glyphicon glyphicon-trash'></span>DELETE</button>\r\n </form>\r\n </td>\r\n </tr>\";\r\n\r\n }\r\n\r\n echo ' </tbody>\r\n </table>';\r\n\r\n } else {\r\n echo '<h3 style=\"text-align: center;color: black\">No Project found</h3>\r\n \r\n </div>';\r\n }\r\n\r\n ?>\r\n\r\n\r\n \r\n\t\t</div>\r\n\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.6788321137428284, "alphanum_fraction": 0.7017726898193359, "avg_line_length": 28.090909957885742, "blob_id": "d7315fc260c9754c141588d615eb4450ba16540c", "content_id": "eccaed981ba74d96e9be81445f83dda249fbe688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "no_license", "max_line_length": 186, "num_lines": 33, "path": "/Webserver/html/scripts/launchPaas.py", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom package.connect import Connection\nimport commands as cmd\nimport cgi,cgitb\ncgitb.enable()\n\nprint \"Content-Type: text/html; charset=UTF-8;\"\nprint \"\"\n\nform=cgi.FieldStorage()\nname=form.getvalue('name')\npltid=form.getvalue('pltID')\ncusr=Connection.connect()\n\nprint pltid\ncusr.execute(\"select Name from Paltforms where PltID='{0}'\".format(pltid))\n\nimg=cusr.fetchall()[0][0]\n\ndb=Connection.getDB()\ncrsr=db.cursor()\ncrsr.execute(\"select max(port) from Containers\")\nport=crsr.fetchall()[0]\n\nprint port\n\n#contState=cmd.getstatusoutput(\"sshpass -p root ssh -o StrictHostKeyChecking=No root@server 'docker run -d -p 1234:80 --name={0} {1}'\".format(name,img))\n\n# if(contState[0]==0):\n# cusr.execute(\"INSERT INTO 'WebServices'.'Containers' ('ContID', 'ContName', 'PltID', 'port', 'status') VALUES ('{0}', '{1}', '{2}', '{3}', '{4}')\".format(ContID,name,pltid,port,1))\n# print \"Platform launched Successfully\"\n# else:\n# print contState[1]" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 13, "blob_id": "974662a4cafa4af44f9c1712f0682c12607c3a56", "content_id": "76e7a8fd16b67570a396954b57a4f50dc7a29d41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 41, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/Webserver/cgi-bin/mountNfs.sh", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\n\nmount server:/PAAS /PAAS" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.5609756112098694, "avg_line_length": 22.08333396911621, "blob_id": "59f8421506310c83832308dae701dd7b8bd466ed", "content_id": "b19e0637b287946bf6b724049d5d4f74940e4177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 287, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/Webserver/html/final/pages/instanceCreated.php", "repo_name": "Rajat-Dabade/MiniCloud", "src_encoding": "UTF-8", "text": "<?php\r\n\t\r\n\t session_start();\r\n\t $username = $_SESSION['username'];\r\n\t $instanceName = $_POST['instanceName'];\r\n\t $instanceName = $username.\"_\".$instanceName;\r\n\t $cpu_core = $_POST['cpu_core'];\r\n\t $memory = $_POST['memory'];\r\n\r\n\t echo $instanceName.\" \".$cpu_core.\" \".$memory;\r\n\r\n?>" } ]
32
anhmel/FB-search-html-page-creator
https://github.com/anhmel/FB-search-html-page-creator
455f5349a1ea7d4615ecea812e5aa6b83d76000e
d54332ce7484a185c41474fd34c48da09bcaeef2
28a63d69ba00668c4cd6f5c0ba2e79be05d840ed
refs/heads/master
2020-07-21T22:00:37.516053
2019-09-11T02:28:54
2019-09-11T02:28:54
206,983,389
2
2
null
2019-09-07T14:57:44
2019-09-11T02:28:56
2019-09-11T02:28:55
Python
[ { "alpha_fraction": 0.6078161001205444, "alphanum_fraction": 0.6110345125198364, "avg_line_length": 31.492307662963867, "blob_id": "20aa9159e8c2e855d5b3eba8950fb35ee4fa2b9e", "content_id": "6759aa22104d9c9771995ab2f6550ec8541a3754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 134, "num_lines": 65, "path": "/json to HTML FB serach results viewer_v4.py", "repo_name": "anhmel/FB-search-html-page-creator", "src_encoding": "UTF-8", "text": "\"\"\"\r\n\r\nThis python script reads json search result file, sort results by time/date,\r\ncreates links to posts and puts the links together with corresponding messages\r\nand images in an html file. Then the program opens the html file in a default browser. \r\n\r\n1. After getting results of url FB search by the method of T-Rekt, save the \r\nresults page as a json file on your comptuer disk (graphSearch.json). \r\n2. Run this script. The file open dialog will appear.\r\n3. Locate and open the graphSearch.json file. Result page with clickable links \r\nwill appear in your default browser.\r\n\r\n\r\n\"\"\"\r\nfrom tkinter.filedialog import askopenfilename\r\nimport json\r\nimport webbrowser\r\nimport os\r\n\r\n\r\nfilename = askopenfilename()\r\n#filename = \"c:\\\\Users\\\\.....graphSearch.json\"\r\n#File = open(filename, encoding = 'utf-8') #encoding = 'utf-8' parameter works most of the time\r\nFile = open(filename, errors = 'ignore') #Use this parameter if you get \"UnicodeEncodeError: 'charmap' codec can't encode character..\"\r\ncontent = json.load(File)\r\nFile.close()\r\nl = content['data'].get('result')\r\nst = ''\r\nlsorted = sorted(l, key=lambda i: i['creation_time'],reverse = True)\r\nfor j in lsorted:\r\n id = j.get('id')\r\n if '_' in id:\r\n link = 'https://www.facebook.com/'+ id \r\n else:\r\n link = 'https://www.facebook.com/'+ (j.get('actor')).get('id')+'/posts/' \\\r\n + j.get('id')\r\n hlink = '<a href='+'\"'+link+'\"' + '>'+link+'</a>' \r\n \r\n imageLink = j.get('image')\r\n \r\n if type(imageLink) == str: \r\n imhLink = '<img src='+'\"'+imageLink+'\"' + '>' \r\n st = st + hlink + '<br/>'+'<br/>' + imhLink + '<br/>'+'<br/>'\r\n else:\r\n st = st + hlink + '<br/>'+'<br/>'\r\n \r\n message = j.get('message')\r\n \r\n if type(message) == str:\r\n st = st + message+ '<br/>'+'<br/>'+'<br/>'\r\n #print (st)\r\n\r\nmsg = '<html><head></head><body><p>'+st+'</p></body></html>'\r\n\r\nfilename = 'graphSearch.html'\r\n\r\nFile = open(filename, 'w')\r\n\r\nFile.write(msg)\r\n\r\n#lines below change path according to file location\r\nfilename1 = 'file:///'+os.getcwd()+'/' + filename\r\nwebbrowser.open_new_tab(filename1)\r\nFile=open(filename)\r\nFile.close" } ]
1
amietn/chip8-emu
https://github.com/amietn/chip8-emu
49dc5fde458ddd4826401885f50b4877ca56750c
525a90be3f84083a163aea4caa70d7e124c0392f
51c8d63f2fccb7c2d4322d7dc80896dc4395caff
refs/heads/master
2021-01-19T02:14:09.929820
2014-08-18T16:53:35
2014-08-18T16:53:35
23,079,109
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4987678527832031, "alphanum_fraction": 0.5520508289337158, "avg_line_length": 26.33682632446289, "blob_id": "2292b00dd846f3c5b7488225003240f2affab718", "content_id": "7a7166d9681149ab5b73f58dc0518a7365b4d286", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18261, "license_type": "no_license", "max_line_length": 134, "num_lines": 668, "path": "/chip8.py", "repo_name": "amietn/chip8-emu", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\n\"\"\"\nSimple Chip-8 emulator written just for fun\n\n@author: Nils Amiet\n\nTODO: implement sound\nTODO: implement controls\n\"\"\"\n\n\nfrom time import sleep\nimport os\nimport math\nimport curses\nimport random\nimport sys\n\n\nclass Instruction:\n \"\"\"Abstract class representing an instruction\"\"\"\n def __init__(self, cpu):\n self.cpu = cpu\n\n def execute(self, opcode):\n pass\n\n\nclass OP0NNN(Instruction):\n \"\"\"Calls RCA 1802 program at address NNN.\"\"\"\n def execute(self, opcode):\n pass\n\n\nclass OP00E0(Instruction):\n \"\"\"Clears the screen.\"\"\"\n def execute(self, opcode):\n self.cpu.reset_screen()\n\n\nclass OP00EE(Instruction):\n \"\"\"Returns from a subroutine.\"\"\"\n def execute(self, opcode):\n self.cpu.program_counter = self.cpu.stack.pop()\n\n\nclass OP1NNN(Instruction):\n \"\"\"Jumps to address NNN.\"\"\"\n def execute(self, opcode):\n nnn = opcode & 0x0fff\n self.cpu.program_counter = nnn\n self.cpu.program_counter -= 2\n\n\nclass OP2NNN(Instruction):\n \"\"\"Calls subroutine at NNN.\"\"\"\n def execute(self, opcode):\n nnn = opcode & 0x0fff\n self.cpu.stack.append(self.cpu.program_counter)\n self.cpu.program_counter = nnn\n self.cpu.program_counter -= 2\n\n\nclass OP3XNN(Instruction):\n \"\"\"Skips the next instruction if VX equals NN.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n nn = opcode & 0x00ff\n vx = self.cpu.v_registers[x]\n\n if vx == nn:\n self.cpu.program_counter += 2\n\n\nclass OP4XNN(Instruction):\n \"\"\"Skips the next instruction if VX doesn't equal NN.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n nn = opcode & 0x00ff\n vx = self.cpu.v_registers[x]\n\n if vx != nn:\n self.cpu.program_counter += 2\n\n\nclass OP5XY0(Instruction):\n \"\"\"Skips the next instruction if VX equals VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n if vx == vy:\n self.cpu.program_counter += 2\n\n\nclass OP6XNN(Instruction):\n \"\"\"Sets VX to NN.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n nn = (opcode & 0x00ff)\n\n self.cpu.v_registers[x] = nn\n\n\nclass OP7XNN(Instruction):\n \"\"\"Adds NN to VX.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n nn = (opcode & 0x00ff)\n\n self.cpu.v_registers[x] += nn\n\n\nclass OP8XY0(Instruction):\n \"\"\"Sets VX to the value of VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n self.cpu.v_registers[x] = self.cpu.v_registers[y]\n\n\nclass OP8XY1(Instruction):\n \"\"\"Sets VX to VX or VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n self.cpu.v_registers[x] = vx | vy\n\n\nclass OP8XY2(Instruction):\n \"\"\"Sets VX to VX and VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n self.cpu.v_registers[x] = vx & vy\n\n\nclass OP8XY3(Instruction):\n \"\"\"Sets VX to VX xor VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n self.cpu.v_registers[x] = vx ^ vy\n\n\nclass OP8XY4(Instruction):\n \"\"\"Adds VY to VX. VF is set to 1 when there's a carry,\n and to 0 when there isn't.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n result = vx + vy\n carry = 1 if result > 0xff else 0\n\n self.cpu.v_registers[x] = result % 0xff\n self.cpu.v_registers[0xf] = carry\n\n\nclass OP8XY5(Instruction):\n \"\"\"VY is subtracted from VX.\n VF is set to 0 when there's a borrow,\n and 1 when there isn't.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n result = vx - vy\n\n if result < 0:\n borrow = 1\n result += 0xff\n else:\n borrow = 0\n\n self.cpu.v_registers[x] = result\n self.cpu.v_registers[0xf] = 1 - borrow\n\n\nclass OP8XY6(Instruction):\n \"\"\"Shifts VX right by one.\n VF is set to the value of the\n least significant bit of VX before the shift.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n lsb = vx & 1\n\n self.cpu.v_registers[0xf] = lsb\n self.cpu.v_registers[x] = vx >> 1\n\n\nclass OP8XY7(Instruction):\n \"\"\"Sets VX to VY minus VX.\n VF is set to 0 when there's a borrow,\n and 1 when there isn't.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n result = vy - vx\n\n if result < 0:\n borrow = 1\n result += 0xff\n else:\n borrow = 0\n\n self.cpu.v_registers[x] = result\n self.cpu.v_registers[0xf] = 1 - borrow\n\n\nclass OP8XYE(Instruction):\n \"\"\"Shifts VX left by one.\n VF is set to the value of the\n most significant bit of VX before the shift.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n msb = vx >> 7\n\n self.cpu.v_registers[0xf] = msb\n self.cpu.v_registers[x] = vx << 1\n\n\nclass OP9XY0(Instruction):\n \"\"\"Skips the next instruction if VX doesn't equal VY.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n\n if vx != vy:\n self.cpu.program_counter += 2\n\n\nclass OPANNN(Instruction):\n \"\"\"Sets I to the address NNN.\"\"\"\n def execute(self, opcode):\n nnn = opcode & 0x0fff\n self.cpu.i_register = nnn\n\n\nclass OPBNNN(Instruction):\n \"\"\"Jumps to the address NNN plus V0.\"\"\"\n def execute(self, opcode):\n nnn = opcode & 0x0fff\n v0 = self.cpu.v_registers[0]\n self.cpu.program_counter = nnn + v0\n self.cpu.program_counter -= 2\n\n\nclass OPCXNN(Instruction):\n \"\"\"Sets VX to a random number and NN.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n nn = opcode & 0x00ff\n\n rnd = random.randint(0, 255)\n self.cpu.v_registers[x] = rnd & nn\n\n\nclass OPDXYN(Instruction):\n \"\"\"Sprites stored in memory at location in index register (I),\n maximum 8bits wide.\n Wraps around the screen.\n If when drawn, clears a pixel,\n register VF is set to 1 otherwise it is zero.\n All drawing is XOR drawing (e.g. it toggles the screen pixels)\n\n More info:\n Draw a sprite at position VX, VY with N bytes of sprite data starting at the address stored in I\n Set VF to 01 if any set pixels are changed to unset, and 00 otherwise\n \"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n y = (opcode & 0x00f0) >> 4\n n = (opcode & 0x000f)\n\n vx = self.cpu.v_registers[x]\n vy = self.cpu.v_registers[y]\n i = self.cpu.i_register\n\n self.cpu.v_registers[0xf] = 0\n\n y_offset = 0\n for yc in range(vy, vy+n):\n sprite_row = self.cpu.memory[i + y_offset]\n y_offset += 1\n\n x_offset = 0\n for xc in range(vx, vx + 8):\n xc %= Chip8CPU.SCREEN_WIDTH\n yc %= Chip8CPU.SCREEN_HEIGHT\n # if xc >= Chip8CPU.SCREEN_WIDTH or yc >= Chip8CPU.SCREEN_HEIGHT:\n # continue\n\n current_pixel = self.cpu.get_pixel(xc, yc)\n sprite_pixel = 0 if (sprite_row & (1 << (7 - x_offset))) == 0 else 1\n new_pixel = current_pixel ^ sprite_pixel\n x_offset += 1\n\n if current_pixel == 1 and sprite_pixel == 1:\n self.cpu.v_registers[0xf] = 1\n\n self.cpu.set_pixel(xc, yc, new_pixel)\n\n\nclass OPEX9E(Instruction):\n \"\"\"Skips the next instruction if the key stored in VX is pressed.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n\n if self.cpu.inputs[vx] == 1:\n self.cpu.program_counter += 2\n\n\nclass OPEXA1(Instruction):\n \"\"\"Skips the next instruction if the key stored in VX isn't pressed.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n\n if self.cpu.inputs[vx] == 0:\n self.cpu.program_counter += 2\n\n\nclass OPFX07(Instruction):\n \"\"\"Sets VX to the value of the delay timer.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n self.cpu.v_registers[x] = self.cpu.delay_timer\n\n\nclass OPFX0A(Instruction):\n \"\"\"A key press is awaited, and then stored in VX.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n # TODO: unimplemented\n self.cpu.v_registers[x] = random.randint(0, 15)\n\n\nclass OPFX15(Instruction):\n \"\"\"Sets the delay timer to VX.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n self.delay_timer = vx\n\n\nclass OPFX18(Instruction):\n \"\"\"Sets the sound timer to VX.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n self.sound_timer = vx\n\n\nclass OPFX1E(Instruction):\n \"\"\"Adds VX to I.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n i = self.cpu.i_register\n\n result = i + vx\n self.cpu.v_registers[0xf] = 1 if result > 0xfff else 0\n self.cpu.i_register = result % 0xfff\n\n\nclass OPFX29(Instruction):\n \"\"\"Sets I to the location of the sprite for the character in VX.\n Characters 0-F (in hexadecimal) are represented by a 4x5 font.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n\n self.cpu.i_register = vx * 5\n\n\nclass OPFX33(Instruction):\n \"\"\"Stores the Binary-coded decimal representation of VX,\n with the most significant of three digits at the address in I,\n the middle digit at I plus 1, and the least significant digit at I plus 2.\n (In other words, take the decimal representation of VX,\n place the hundreds digit in memory at location in I,\n the tens digit at location I+1, and the ones digit at location I+2.)\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n vx = self.cpu.v_registers[x]\n\n hundreds = math.floor(vx / 100)\n vx -= hundreds * 100\n\n tens = math.floor(vx / 10)\n vx -= tens * 10\n\n units = vx\n\n i = self.cpu.i_register\n self.cpu.memory[i] = hundreds\n self.cpu.memory[i + 1] = tens\n self.cpu.memory[i + 2] = units\n\n\nclass OPFX55(Instruction):\n \"\"\"Stores V0 to VX in memory starting at address I.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n i = self.cpu.i_register\n self.cpu.memory[i:i + x + 1] = self.cpu.v_registers[0:x + 1]\n\n\nclass OPFX65(Instruction):\n \"\"\"Fills V0 to VX with values from memory starting at address I.\"\"\"\n def execute(self, opcode):\n x = (opcode & 0x0f00) >> 8\n i = self.cpu.i_register\n self.cpu.v_registers[0:x + 1] = self.cpu.memory[i:i + x + 1]\n\n\nclass Chip8CPU:\n \"Chip-8 CPU\"\n\n START_ADDRESS = 0x200\n SCREEN_WIDTH = 64\n SCREEN_HEIGHT = 32\n BLACK = 0\n WHITE = 1\n CHARS = {\n BLACK: ' ',\n WHITE: '\\u2588'\n }\n\n def __init__(self, stdscr):\n self.stdscr = stdscr\n self.reset_cpu()\n\n self.opcode_table = [\n # mask, expected result, instruction\n (0xf000, 0x0fff, OP0NNN(self)),\n (0xffff, 0x00e0, OP00E0(self)),\n (0xffff, 0x00ee, OP00EE(self)),\n (0xf000, 0x1000, OP1NNN(self)),\n (0xf000, 0x2000, OP2NNN(self)),\n (0xf000, 0x3000, OP3XNN(self)),\n (0xf000, 0x4000, OP4XNN(self)),\n (0xf00f, 0x5000, OP5XY0(self)),\n (0xf000, 0x6000, OP6XNN(self)),\n (0xf000, 0x7000, OP7XNN(self)),\n (0xf00f, 0x8000, OP8XY0(self)),\n (0xf00f, 0x8001, OP8XY1(self)),\n (0xf00f, 0x8002, OP8XY2(self)),\n (0xf00f, 0x8003, OP8XY3(self)),\n (0xf00f, 0x8004, OP8XY4(self)),\n (0xf00f, 0x8005, OP8XY5(self)),\n (0xf00f, 0x8006, OP8XY6(self)),\n (0xf00f, 0x8007, OP8XY7(self)),\n (0xf00f, 0x800e, OP8XYE(self)),\n (0xf00f, 0x9000, OP9XY0(self)),\n (0xf000, 0xa000, OPANNN(self)),\n (0xf000, 0xb000, OPBNNN(self)),\n (0xf000, 0xc000, OPCXNN(self)),\n (0xf000, 0xd000, OPDXYN(self)),\n (0xf0ff, 0xe09e, OPEX9E(self)),\n (0xf0ff, 0xe0a1, OPEXA1(self)),\n (0xf0ff, 0xf007, OPFX07(self)),\n (0xf0ff, 0xf00a, OPFX0A(self)),\n (0xf0ff, 0xf015, OPFX15(self)),\n (0xf0ff, 0xf018, OPFX18(self)),\n (0xf0ff, 0xf01e, OPFX1E(self)),\n (0xf0ff, 0xf029, OPFX29(self)),\n (0xf0ff, 0xf033, OPFX33(self)),\n (0xf0ff, 0xf055, OPFX55(self)),\n (0xf0ff, 0xf065, OPFX65(self))\n ]\n\n def reset_cpu(self):\n # memory\n self.memory = [0 for x in range(4096)]\n self.program_counter = Chip8CPU.START_ADDRESS\n self.stack = []\n\n # registers\n self.v_registers = [0 for x in range(16)]\n self.i_register = 0\n\n # timers\n self.delay_timer = 0\n self.sound_timer = 0\n\n # inputs\n self.inputs = [0 for x in range(16)]\n\n # graphics\n self.reset_screen()\n self.init_character_sprites()\n\n def init_character_sprites(self):\n # 0-F character sprites\n self.memory[0:80] = [\n 0xf0, 0x90, 0x90, 0x90, 0xf0,\n 0x20, 0x60, 0x20, 0x20, 0x70,\n 0xf0, 0x10, 0xf0, 0x80, 0xf0,\n 0xf0, 0x10, 0xf0, 0x10, 0xf0,\n 0x90, 0x90, 0xf0, 0x10, 0x10,\n 0xf0, 0x80, 0xf0, 0x10, 0xf0,\n 0xf0, 0x80, 0xf0, 0x90, 0xf0,\n 0xf0, 0x10, 0x20, 0x40, 0x40,\n 0xf0, 0x90, 0xf0, 0x90, 0xf0,\n 0xf0, 0x90, 0xf0, 0x10, 0xf0,\n 0xf0, 0x90, 0xf0, 0x90, 0x90,\n 0xe0, 0x90, 0xe0, 0x90, 0xe0,\n 0xf0, 0x80, 0x80, 0x80, 0xf0,\n 0xe0, 0x90, 0x90, 0x90, 0xe0,\n 0xf0, 0x80, 0xf0, 0x80, 0xf0,\n 0xf0, 0x80, 0xf0, 0x80, 0x80\n ]\n\n def reset_screen(self):\n self.screen = [\n [0 for x in range(Chip8CPU.SCREEN_WIDTH)]\n for y in range(Chip8CPU.SCREEN_HEIGHT)\n ]\n\n def update_timers(self):\n if self.delay_timer > 0:\n self.delay_timer -= 1\n\n if self.sound_timer > 0:\n self.sound_timer -= 1\n\n def print_debug(self):\n registers = [\"V%s: %s\" % (x, vx) for x, vx in enumerate(self.v_registers)]\n inputs = [\"I%s: %s\" % (i, ix) for i, ix in enumerate(self.inputs)]\n\n debug_line1 = \"PC: %s | I: %s | DT: %s | ST: %s\" % (self.program_counter, self.i_register, self.delay_timer, self.sound_timer)\n debug_line2 = \" | \".join(registers)\n debug_line3 = \" | \".join(inputs)\n\n self.stdscr.addstr(Chip8CPU.SCREEN_HEIGHT, 0, debug_line1)\n self.stdscr.addstr(Chip8CPU.SCREEN_HEIGHT + 1, 0, debug_line2)\n self.stdscr.addstr(Chip8CPU.SCREEN_HEIGHT + 2, 0, debug_line3)\n\n def print_screen(self):\n for i, row in enumerate(self.screen):\n text_row = [self.CHARS[pixel] for pixel in row]\n self.stdscr.addstr(i, 0, \"\".join(text_row))\n\n def set_pixel(self, x, y, value):\n self.screen[y][x] = value\n\n def get_pixel(self, x, y):\n return self.screen[y][x]\n\n def load_rom(self, rom_path):\n with open(rom_path, 'rb') as rom_file:\n i = Chip8CPU.START_ADDRESS\n\n byte = rom_file.read(1)\n while byte:\n self.memory[i] = byte[0]\n i += 1\n\n byte = rom_file.read(1)\n\n def fetch(self):\n \"\"\"Fetches the next opcode from memory and returns it\"\"\"\n opcode = (self.memory[self.program_counter] << 8) + self.memory[self.program_counter + 1]\n return opcode\n\n def decode(self, opcode):\n \"\"\"Decodes the opcode and returns the instruction to be executed\"\"\"\n for mask, result, instruction in self.opcode_table:\n if (opcode & mask) == result:\n return instruction\n\n def execute(self, instruction, opcode):\n \"\"\"Executes instruction\"\"\"\n instruction.execute(opcode)\n\n def update_pc(self):\n self.program_counter += 2\n\n def cycle(self):\n opcode = self.fetch()\n instruction = self.decode(opcode)\n self.execute(instruction, opcode)\n self.update_pc()\n\n def start(self):\n \"\"\"Runs the program\"\"\"\n while True:\n for i in range(4):\n self.cycle()\n\n self.stdscr.clear()\n self.print_debug()\n self.print_screen()\n self.stdscr.refresh()\n\n sleep(1/60)\n self.update_timers()\n\n\ndef main(stdscr, rom_path):\n cpu = Chip8CPU(stdscr)\n\n cpu.load_rom(rom_path)\n cpu.start()\n\n\nif __name__ == \"__main__\":\n try:\n rom_path = sys.argv[1]\n except IndexError:\n print(\"usage: %s <rom_path>\" % (sys.argv[0],))\n exit(1)\n\n try:\n stdscr = curses.initscr()\n curses.noecho()\n curses.cbreak()\n stdscr.keypad(True)\n curses.curs_set(0)\n\n main(stdscr, rom_path)\n\n finally:\n curses.nocbreak()\n stdscr.keypad(False)\n curses.echo()\n curses.endwin()\n curses.curs_set(1)\n" } ]
1
davidrasm/EMSI-SARS-CoV-2
https://github.com/davidrasm/EMSI-SARS-CoV-2
d4d8ccb7cc791905dd701e97a000d8d7ab1ead91
3b9f0c20f67366e919abcbd77998842b68828344
7bded8b2adb687072991872e29ba95fbf0593b6d
refs/heads/main
2023-08-25T06:27:02.243004
2021-10-04T16:43:34
2021-10-04T16:43:34
370,350,568
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6267969012260437, "alphanum_fraction": 0.645451545715332, "avg_line_length": 34.7215690612793, "blob_id": "be2409f6b6d4c923a9480f9b9be3b274edce9abd", "content_id": "e68176559467363eff365469b4cd5fc501424904", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9113, "license_type": "no_license", "max_line_length": 141, "num_lines": 255, "path": "/code/subsample_GISAID_seqs.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Fri Feb 12 06:37:59 2021\n\nRuns bioinformatic pipeline for global trees/seqs from GISAID\n\n@author: david\n\"\"\"\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport dendropy\nfrom Bio.Seq import Seq\nfrom Bio import SeqIO\nfrom Bio import AlignIO\nfrom Bio.SeqRecord import SeqRecord\nimport time\nimport subprocess\nimport sys\nimport re\n\n\ndef get_geographic_locs(df,column_name,mapToRegion=False):\n \n path = './covid-analysis/maps/'\n code_map_file = path + 'code2StateMap.csv'\n state_map_file = path + 'state2HHSRegionMap.csv'\n \n code_df = pd.read_csv(code_map_file, index_col=0)\n codeMap = {index:row['State'] for index, row in code_df.iterrows()}\n \n state_df = pd.read_csv(state_map_file, index_col=0)\n stateMap = {index:row['Region'] for index, row in state_df.iterrows()}\n \n drop_indexes = []\n locations = []\n for index, row in df.iterrows():\n \n gloc = row['virus_name'].split('/')[2] # after 2nd backslash\n gloc = gloc[:2] # first two chars should be state\n \n if gloc in codeMap:\n state_loc = codeMap[gloc]\n if mapToRegion:\n mapped_loc = stateMap[state_loc]\n else:\n mapped_loc = state_loc\n else:\n mapped_loc = 0 # 0 represents unknown\n print(\"Unrecognized geographic location: \" + gloc)\n \n locations.append(mapped_loc)\n \n df[column_name] = locations\n df.drop(drop_indexes,inplace=True) # drop unknowns\n \n return df\n\ndef fasta2df(fasta_file):\n \n \"Get sequences from fasta\"\n seq_dic = {}\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n seq_dic[record.id] = [i for i in record.seq]\n df = pd.DataFrame.from_dict(seq_dic, orient='index')\n \n return df\n\ndef date_tree(tree_file,tip_date_file,rate_file,verbose = False):\n \n cluster = False\n if cluster:\n cmd = '~/lsd -i ' + tree_file + ' -d ' + tip_date_file + ' -c -w ' + rate_file\n else:\n cmd = 'lsd -i ' + tree_file + ' -d ' + tip_date_file + ' -c -w ' + rate_file\n try:\n output = subprocess.check_output(cmd, shell=True,stderr=subprocess.STDOUT)\n if verbose:\n sys.stdout.write(output)\n except subprocess.CalledProcessError as exc:\n print(exc.output)\n print('Execution of \"%s\" failed!\\n' % cmd)\n sys.exit(1)\n \n \"Parse newick tree and remove date annotation\"\n f = open(tree_file + '.result.date.nexus')\n line = f.readline()\n while line:\n if \"tree 1 =\" in line:\n tree = line.split()[3]\n line = f.readline()\n f.close()\n tree = re.sub(\"[\\[].*?[\\]]\", \"\", tree) # remove bracketed dates\n out_tree = tree_file.split('.')[0] + '_dated.tre'\n nwk=open(out_tree,\"w\")\n nwk.write(tree + '\\n')\n nwk.close() \n\ndef write_tip_dates(tip_date_dict,date_file):\n \n txt=open(date_file,\"w\")\n txt.write(str(len(tip_date_dict)) + '\\n')\n for k,v in tip_date_dict.items():\n txt.write(k + '\\t' + str(v) + '\\n')\n txt.close()\n \ndef rename_seqs(df,records,del_label):\n \n \"Iterate through list of records, renaming each as we go\"\n new_records = []\n for rec in records:\n date = df.loc[rec.name]['collection_date']\n del_state = df.loc[rec.name][del_label]\n if del_state:\n del_state = 'Present'\n else:\n del_state = 'Absent'\n rec.id = rec.name + '_' + del_state + '_' + date\n rec.description = '' # set description blank\n new_records.append(rec)\n\n return new_records\n\ndef get_nsp6_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the nsp6 del9 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[11287:11296]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'*9\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\ndef get_ORF9_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the ORF9 TRS -3 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[28270]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\ndef subsample_align(sampled_taxa,aln_file,new_aln_file):\n \n \"Get sequences in tree from GISAID fasta file\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(aln_file, \"fasta\")) # one line alternative\n seq_records = []\n missing_taxa = []\n for tx in sampled_taxa:\n rec = seq_dict.get(tx)\n if rec is not None:\n seq_records.append(rec)\n else:\n missing_taxa.append(tx)\n print('WARNING: ' + tx + \" is not in GISAID sequence file\")\n SeqIO.write(seq_records, new_aln_file, \"fasta\")\n\n\"\"\"\n Set up directories and files\n\"\"\"\n\n\"GISAID input files\"\n# base_dir = Path(__file__).home() / 'Documents' / 'GitHub' / 'phyloTF2' / \"covid-analysis\"\n# tree_dir = base_dir / \"phylogenies\" / 'GISAID-hCoV-19-phylogeny-2021-03-08'\n# align_dir = Path.home() / 'Desktop' / 'msa_0314'\n# tree_file = str(tree_dir / \"hcov_march2021_USA_post2020-09-01.tree\")\n\n\"Subsample full sequence data to get smaller EMSI data set\"\n# meta_file = str(tree_dir / 'hcov_USA_post2020-09-01_EMSI_metadata.csv')\n# aln_fasta_file = str(align_dir / \"hcov_march2021_USA_post2020-09-01_aligned.fasta\")\n# sub_meta_file = str(tree_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_metadata.csv\")\n# sub_aln_file = str(align_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_aligned.fasta\")\n# meta_df = pd.read_csv(meta_file,sep=\",\",index_col='accession_id')\n# sample_count = 2000\n# sub_df = meta_df.sample(n=sample_count, axis=0)\n# sub_df.to_csv(sub_meta_file,index=True)\n# sampled_taxa = sub_df.index.tolist()\n# subsample_align(sampled_taxa,aln_fasta_file,sub_aln_file)\n\n\"Subsampled data set file names\"\nbase_dir = Path(__file__).parent.parent / \"data\"\nmeta_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_metadata.csv\")\naln_fasta_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_aligned.fasta\")\n\n\"\"\"\n Create new sub_sampled data set\n\"\"\"\n\n\"Get metadata for GISAID global tree\"\nmeta_df = pd.read_csv(meta_file,sep=\",\",index_col='accession_id')\n\n\"Encode nsp6 deletion as binary variable in meta data\"\n#del_label = 'nsp6_Delta9'\n#meta_df = get_nsp6_del_state(meta_df,aln_fasta_file,del_label)\n\n\"Encode ORF9 TRS deletion as binary variable in meta data\"\ndel_label = 'ORF9_TRS-3'\nmeta_df = get_ORF9_del_state(meta_df,aln_fasta_file,del_label)\n\n\"Split df in presence/absence of deletion\"\nsub_df = meta_df[meta_df[del_label] == 0]\ndel_sub_df = meta_df[meta_df[del_label] == 1]\n\n\"Subsample seqs without deletion\"\nsample_count = len(del_sub_df.index)\nsub_df = sub_df.sample(n=sample_count, axis=0)\n\n\"Merge back into one dataframe\"\nsub_df = sub_df.append(del_sub_df)\n\n\"Get subsampled alignment for samples in sub_df\"\nsampled_taxa = sub_df.index.tolist()\n#new_aln_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_nsp6_Delta9_subsampled_aligned.fasta\") \nnew_aln_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_ORF9_TRS-3_subsampled_aligned.fasta\") \nsubsample_align(sampled_taxa,aln_fasta_file,new_aln_file)\n\n\"Rename records in fasta file by accession id to match taxa labels in tree\"\nrecords = SeqIO.parse(new_aln_file, \"fasta\")\nrecords = rename_seqs(sub_df,records,del_label) # reanme by accession id\nSeqIO.write(records,new_aln_file, \"fasta\")\n\n\"Extract tree of desired samples using dendropy\"\n# filtered_taxa = set(filtered_taxa) # index is 'accession_id'\n# filtered_taxa = filtered_taxa.difference(set(missing_taxa)) # remove missing taxa\n# filtered_taxa_spaces = [t.replace('_',' ') for t in filtered_taxa] # need to replace underscores with spaces to match dendropy taxon labels\n# taxa = dendropy.TaxonNamespace()\n# global_tree = dendropy.Tree.get(file=open(tree_file, 'r'), schema=\"newick\", rooting=\"default-rooted\", taxon_namespace=taxa) \n# if not global_tree.is_rooted:\n# print('WARNING: Global tree is not rooted') # Should be rooted, this is just to check\n# taxa_to_retain = set([taxon for taxon in global_tree.taxon_namespace if taxon.label in filtered_taxa_spaces])\n# filtered_tree = global_tree.extract_tree_with_taxa(taxa=taxa_to_retain)\n# filtered_tree.write(path=filtered_tree_file,schema='newick',suppress_annotations=True,suppress_rooting=True)\n\n\"Get tip dates file for dating with lsd and date tree\"\n# tip_date_dict = {}\n# for sample, row in meta_df.iterrows():\n# \ttip_date_dict[sample] = date2FloatYear(row['collection_date'])\n# write_tip_dates(tip_date_dict,tip_date_file)\n# date_tree(filtered_tree_file,tip_date_file,rate_file)\n\n\n\n " }, { "alpha_fraction": 0.5720313191413879, "alphanum_fraction": 0.6856539845466614, "avg_line_length": 32.81632614135742, "blob_id": "31c142d47b83f863d78872ac96848577c8e4576a", "content_id": "890e4fc14edf2cf994cdb15eb42126ebb208b165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3318, "license_type": "no_license", "max_line_length": 107, "num_lines": 98, "path": "/code/plot_site_del_freqs.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Fri Feb 12 06:37:59 2021\n\nRuns bioinformatic pipeline for global trees/seqs from GISAID\n\n@author: david\n\"\"\"\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nfrom Bio import AlignIO\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport seaborn as sns\n\ndef make_features_map():\n \n features = ['nsp01','nsp02','nsp03','nsp04'\n\t\t,'nsp05','nsp06','nsp07','nsp08'\n\t\t,'nsp09','nsp10','nsp11','nsp12'\n\t\t,'nsp13','nsp14','nsp15','nsp16'\n\t\t,'Spike','ORF03a','ORF04','ORF05','ORF06'\n\t\t,'ORF07a','ORF07b','ORF08','ORF09','ORF10']\n start_locs = [266,806,2720,8555,10055,10973,11843,12092,12686,13025,13442,13442,16237,18040,19621\n \t\t\t,20659,21563,25393,26245,26523,27202,27394,27756,27894,28274,29558]\n end_locs = [805,2719,8554,10054,10972,11842,12092,12685,13024,13441,13483,16236,18039,19620,20658\n \t\t\t,21555,25384,26220,26472,27192,27387,27759,27887,28259,29533,29674]\n feature_data = {'Feature':features,'StartPosition': start_locs, 'EndPosition': end_locs} \n feature_df = pd.DataFrame(feature_data)\n feature_df.to_csv('hcov_genomic_features.csv',index=False)\n\ndef add_feature(ax, label, start, end, offset_cntr):\n \n \"Add gene or feature annotation to plot ax from start to end\"\n left = start\n bottom = (-0.2 - (offset_cntr*(0.1))) * ax.get_ylim()[1]\n width = end - start\n height = 0.08 * ax.get_ylim()[1]\n p = patches.Rectangle(\n (left, bottom), width, height,\n fill=True, color='gray', transform=ax.transData, clip_on=False, alpha=0.75\n )\n ax.add_patch(p)\n ax.text(left+(width/2), bottom+(height/2), label,\n horizontalalignment='center',\n verticalalignment='center',\n transform=ax.transData)\n\n\"\"\"\n Set up directories and files\n\"\"\"\n\n\"Set directories\"\nbase_dir = Path(__file__).parent.parent / \"data\"\n\n\"Subsampled data set file names\"\naln_fasta_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_aligned.fasta\")\n\nfeature_file = base_dir / 'hcov_genomic_features.csv'\nfeature_df = pd.read_csv(feature_file,sep=\",\",index_col='Feature')\n\n\"Count deletions at each site\"\nalign = AlignIO.read(aln_fasta_file, \"fasta\")\nn_sites = align.get_alignment_length()\nn_samples = len(align[:,0])\ndeletions = []\nfor i in range(n_sites):\n print(i)\n deletions.append(align[:,i].count('-'))\nsite_del_freq = np.array(deletions) / n_samples \nsite_del_freq[:200] = np.nan # ignore first 200 sites\nsite_del_freq[-200:] = np.nan # ignore last 200 sites\nsite_del_df = pd.DataFrame({'DeletionFreq':site_del_freq})\nsite_del_df.to_csv('hcov_site_del_freqs.csv',index=False)\n\n\"Or load in site del freqs if we already have them\"\n##site_del_df = pd.read_csv('hcov_site_del_freqs.csv')\n#site_del_freq = site_del_df['DeletionFreq'].values\n\n\"Plot deletion freqs across genome\"\nsns.set(style=\"darkgrid\")\nfig, ax = plt.subplots(1, 1)\nax.plot(site_del_freq)\n#ax.set_xlabel('Position')\nax.set_ylabel('Deletion Frequency')\nax.grid(True)\ncntr = 0\nfor feature, row in feature_df.iterrows():\n add_feature(ax,feature, row['StartPosition'], row['EndPosition'],cntr) # add genomic feature annotation\n cntr += 1\n if cntr > 3: cntr = 0\n#ax.set_xlim([25000, 29000])\n\nfig.set_size_inches(10, 6)\nfig.tight_layout()\nplt.show()\n\nfig.savefig('hcov_USA_post2020-09-01_site_del_freqs.png', dpi=200)\n\n\n\n " }, { "alpha_fraction": 0.7540983557701111, "alphanum_fraction": 0.7868852615356445, "avg_line_length": 29.5, "blob_id": "ce0c241861bcef46a892afee448881e761211bf9", "content_id": "d5f17dc118a12388efe6c1fe87d68ddbb1159a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 42, "num_lines": 2, "path": "/README.md", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "# EMSI-SARS-CoV-2\nEMSI SARS-CoV-2 Phylodynamics Team Project\n" }, { "alpha_fraction": 0.645714282989502, "alphanum_fraction": 0.677142858505249, "avg_line_length": 35.926605224609375, "blob_id": "22a482af658b3bb77a050e60a15ad7e3079ad9aa", "content_id": "004218580de7ff124d43c045e735edbf98484893", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8050, "license_type": "no_license", "max_line_length": 166, "num_lines": 218, "path": "/code/PlotAncLineageFeatureTree.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Thu Mar 28 12:14:01 2019\n\nPlot tree using baltic with lineages colored according to their fitness\n\n@author: david\n\"\"\"\nimport balticmod as bt\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\nimport pandas as pd\nimport TreeUtils\nfrom ete3 import Tree\nimport seaborn as sns\nimport random\nimport json\nfrom pathlib import Path\n\ndef add_line_feature(tree,df,feature):\n \n for node in tree.traverse(\"postorder\"):\n print(node.name)\n if node.name == '':\n node.name = 'root'\n node.add_features(state = df.loc[node.name][feature])\n \n return tree\n\ndef sample_tips(tree,number=None,fraction=None):\n\n \"Thin tree for plotting by randomly pruning samples\"\n #sample_tree = copy.deepcopy(tree) # Create sample tree\n names = [node.name for node in tree.traverse() if node.is_leaf()]\n leafs = len(names)\n \n sample_count = 0\n if not number and not fraction:\n print(\"Need to specify a number or fraction to subsample\")\n elif number:\n if number > leafs:\n print(\"Sample size greater than number of leafs in tree\")\n else:\n sample_count = number \n elif fraction:\n sample_count = round(fraction * leafs)\n \n \"Return subsample\" \n return random.sample(names, sample_count) \n\n\ndef thin_tree(tree,sample):\n \n problem_sample = 'hCoV-19/USA/WA-UW-1731/2020|EPI_ISL_424231|2020-03-21'\n if problem_sample in sample:\n print(\"Removing sample\")\n sample.remove(problem_sample)\n\n print(\"Pruning tree\")\n tree.prune(sample, preserve_branch_length=True)\n\n return tree\n\ndef transform_fvals(fit_vals, tr):\n \n \"Rescale fitness values\"\n max_fit = np.max(fit_vals)\n min_fit = np.min(fit_vals)\n new_max_fit = max_fit - min_fit # rescale\n fit_vals = np.array([])\n for k in tr.Objects: ## iterate over a flat list of branches\n k.traits['type'] = (float(k.traits['type']) - min_fit) / new_max_fit\n fit_vals = np.append(fit_vals, float(k.traits['type']))\n \n return fit_vals, tr, min_fit, max_fit\n\n\"Load in ancestral features as df before encoding\"\nbase_dir = Path(__file__).parent.parent / \"data\"\n\nfeatures_file = str(base_dir / 'hcov_USA_post2020-09-01_unencodedAncStates_dels+pangoline.csv')\ntree_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_mini_labeled.tre\") # tree with internal labels\n\nanc_df = pd.read_csv(features_file,sep=\",\",index_col='node')\n\nfeatures = ['nsp6_Delta9', 'ORF9_TRS-3']\n\n\"Import tree and index branches\"\ntree = Tree(tree_file, format=1)\nabsolute_time = 2021.1616 # absolute time of last sample ('2021-02-01')\n\n\"Reduce PANGOLINE lineages\"\npango_map = {'LowFreq':'LowFreq',\n 'B.1':'B.1',\n 'B.1.2':'B.1.2',\n 'B.1.234':'B.1',\n 'B.1.243':'B.1',\n 'B.1.427':'B.1.42(7/9)',\n 'B.1.429':'B.1.42(7/9)',\n 'B.1.526':'B.1.526',\n 'B.1.1.7':'B.1.1.7',\n 'B.1.1.222':'B.1.1.222'}\nanc_df['PANGOLINE'] = anc_df['PANGOLINE'].apply(lambda x: pango_map[x])\n\n\"Map anc states to integers\"\nfeature = 'PANGOLINE'\nstates = anc_df[feature].unique()\nnum_states = len(states)\nstate2int = {state:index for index, state in enumerate(states)}\nif feature == 'REGION':\n labels = ['REGION_' + str(i) for i in states]\nelse:\n labels = states\nint2Label = {index:label for index, label in enumerate(labels)}\nanc_df[feature] = anc_df[feature].apply(lambda x: state2int[x])\n\n\"If thinning by subsampling leaf taxa\"\nresample = True\nif resample:\n sample = sample_tips(tree,number=800)\n with open(\"sampled_tips_del_tree.json\", \"w\") as fp:\n json.dump(sample, fp)\nelse:\n with open(\"sampled_tips_del_tree.json\", \"r\") as fp:\n sample = json.load(fp)\n\n\"Add ancestral features\"\ntree = add_line_feature(tree,anc_df,feature)\ntree = thin_tree(tree,sample)\ntree, tree_times = TreeUtils.add_tree_times(tree)\nfinal_time = max(tree_times)\nroot_time = absolute_time - final_time\n\n\"Write tree with fit vals to multi-type Newick file\"\nmtt_file = 'hcov_PANGO_post2020-09-01_del_features.tre'\nfig_file = 'hcov_PANGO_post2020-09-01_del_features.png'\nTreeUtils.write_MTT_newick(tree,mtt_file)\n\n\"\"\"\n Plot tree with lineages colored by fitness\n\"\"\"\nsns.set(style=\"dark\")\nmyTree=bt.loadNewick(mtt_file,absoluteTime=False)\nmyTree.traverse_tree() ## required to set heights\nmyTree.setAbsoluteTime(absolute_time) ## set absolute time of all branches by specifying date of most recent tip\nmyTree.treeStats() ## report stats about tree\n\ncmap = sns.color_palette(\"husl\", 10) #mpl.cm.get_cmap('tab10', num_states)\nlocation_cmap = sns.color_palette(\"muted\", 10) #mpl.cm.get_cmap('tab10', 10)\n\nfig,ax = plt.subplots(figsize=(20,20),facecolor='w')\n\ngs = GridSpec(1,2,width_ratios=[6,1],wspace=0.0)\nax_tree = plt.subplot(gs[0])\nax_genome = plt.subplot(gs[1],sharey=ax_tree)\n\nx_attr=lambda k: k.absoluteTime ## x coordinate of branches will be absoluteTime attribute\n#c_func=lambda k: cmap(int(k.traits['type'])-1) if int(k.traits['type'])>1 else 'black' # for regions, set region 0 to black\nc_func=lambda k: cmap[int(k.traits['type'])] # for regions, set region 0 to black\ns_func=lambda k: 50-30*k.height/myTree.treeHeight ## size of tips\nz_func=lambda k: 100\n\ncu_func=lambda k: 'k' ## for plotting a black outline of tip circles\nsu_func=lambda k: 2*(50-30*k.height/myTree.treeHeight) ## black outline in twice as big as tip circle \nzu_func=lambda k: 99\nmyTree.plotTree(ax_tree,x_attr=x_attr,colour_function=c_func) ## plot branches\nmyTree.plotPoints(ax_tree,x_attr=x_attr,size_function=s_func,colour_function=c_func,zorder_function=z_func) ## plot circles at tips\nmyTree.plotPoints(ax_tree,x_attr=x_attr,size_function=su_func,colour_function=cu_func,zorder_function=zu_func) ## plot circles under tips (to give an outline)\n\n\"Add genome annotations\"\nfor k in myTree.Objects: ## iterate over branches\n if k.branchType=='leaf':\n for f in range(len(features)): ## iterate over trait keys\n ftype = anc_df.loc[k.numName][features[f]]\n c='midnightblue' if ftype==1 else 'darkgrey' # block mapper\n #c='darkorange' if k.traits[features[f]]=='1' else 'steelblue' # block mapper\n lineage=plt.Rectangle((f,k.y-0.5),1,1,facecolor=c,edgecolor='none') ## rectangle with height and width 1, at y position of tip and at the index of the key\n ax_genome.add_patch(lineage) ## add coloured rectangle to plot\nax_genome.set_xticks(np.arange(0.5,len(features)+0.5))\n#clean_feature_names = [n.replace('_',':') for n in features]\nclean_feature_names = [r'nsp6:$\\Delta$9','ORF9:TRS-3']\nax_genome.set_xticklabels(clean_feature_names,rotation=90)\n[ax_genome.axvline(x,color='w') for x in range(len(features))]\n\n\"Add legend for lineages\"\nimport matplotlib.patches as mpatches\nhandles = [mpatches.Patch(color=cmap[i], label=int2Label[i]) for i in range(num_states)]\nlegend1 = ax_tree.legend(handles=handles,prop={'size': 24},loc='upper left',bbox_to_anchor=(-0.36, 1.)) #was 1.32\n\n\"Add month labels as xticks\"\nstep_freq = 1/12\nxticks = np.arange(2020.25,absolute_time,step_freq)\nax_tree.set_xticks(xticks)\nlabels = ['Apr','May','June','July','Aug','Sep','Oct','Nov','Dec','Jan','Feb','March']\nax_tree.set_xticklabels(labels, fontsize=24) #rotation='vertical'\nax_tree.set_xlabel('Month', fontsize=24)\n\n\"Or set times\"\nats=[k.absoluteTime for k in myTree.Objects]\nfr=0.05\nax_tree.set_xlim(min(ats)-fr,max(ats)+fr)\n\nax_genome.set_xlim(0,len(features))\nax_tree.set_ylim(-5,myTree.ySpan+5)\n\n\"Turn axis spines invisible\"\n[ax_tree.spines[loc].set_visible(False) for loc in ['top','right','left']] ## no axes\n[ax_genome.spines[loc].set_visible(False) for loc in ['top','right','left','bottom']] ## no axes\n\nax_tree.tick_params(axis='x',size=24) ## no labels\nax_genome.tick_params(size=0,labelsize=24)\nax_tree.set_yticklabels([])\nax_genome.xaxis.set_ticks_position('top')\nax_tree.grid(axis='x')\n\n#plt.show()\nplt.subplots_adjust(left=0.22)\nplt.savefig(fig_file, dpi=300)\n" }, { "alpha_fraction": 0.6082959771156311, "alphanum_fraction": 0.6232333779335022, "avg_line_length": 37.62666702270508, "blob_id": "e3226da07e7a4ced8a940c5d7969296af24a5ea3", "content_id": "e9a80894495e67cd4d48cd19e41de58737bbb9a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8703, "license_type": "no_license", "max_line_length": 157, "num_lines": 225, "path": "/code/AncestralReconstruction.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Sat Jun 13 15:31:32 2020\n\n@author: david\n\"\"\"\n\n#from pastml.acr import pastml_pipeline\nfrom Bio import SeqIO\nfrom ete3 import Tree\nimport pandas as pd\nimport numpy as np\n\ndef fasta2csv(fasta_file,csv_file):\n \n \"Get sequences from fasta\"\n seq_dic = {}\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n seq_dic[record.id] = [i for i in record.seq] #str(record.seq)\n sites = len(seq_dic[next(iter(seq_dic))])\n traits = [\"site\" + str(i) for i in range(sites)]\n seqs_df = pd.DataFrame.from_dict(seq_dic, orient='index', columns=traits)\n seqs_df.to_csv(csv_file)\n \n return traits\n\n\"\"\"\n Max likelihood reconstruction using PastML\n\"\"\"\ndef reconstruct(tree_file,align_file,csv_file=\"temp-align.csv\"):\n \n if align_file.endswith('.fasta'):\n \"Convert fasta to csv\"\n traits = fasta2csv(align_file,csv_file) # convert fasta to csv\n data = csv_file # Path to the table containing tip/node annotations, in csv or tab format\n else:\n df = pd.read_csv(align_file, index_col=0)\n traits = [column for column in df.columns]\n data = align_file\n \n tree = tree_file # Path to the tree in newick format\n \n # Columns present in the annotation table,\n # for which we want to reconstruct ancestral states\n columns = traits #['Country']\n \n # Path to the output compressed map visualisation\n html_compressed = \"tree-000_map.html\"\n \n # (Optional) path to the output tree visualisation\n html = \"tree-000_tree.html\"\n \n #pastml_pipeline(data=data, data_sep=',', columns=columns, tree=tree, verbose=True)\n \n #pastml_pipeline(data=data, data_sep=',', columns=columns, name_column=traits[0], tree=tree,\n # html_compressed=html_compressed, html=html, verbose=True)\n\n\ndef label_internal_nodes(tree):\n \n internal_cntr = 0 # counter for internal nodes encountered\n for node in tree.traverse(\"preorder\"):\n if node.is_root():\n node.name = 'root'\n else:\n if not node.is_leaf():\n node.name = 'n' + str(internal_cntr)\n internal_cntr += 1 \n return tree\n\n\"\"\"\n Reconstruct ancestral states using Sankoff's max parsimony algorithm (see Felsenstein p15-18)\n Parsimony scores are computed assuming all transitions have a cost of one.\n Tip/ancestral features are input and output as dictionaries\n Internal nodes are labeled as n<X> where X is an int determined by the position of the node in a pre-order traversal\n\"\"\"\ndef reconstruct_MP(tree,feature_dic):\n \n state_set = set([feature_dic[node.name] for node in tree.traverse() if node.is_leaf()])\n states = len(state_set)\n \n \"Add a state2int dict map so this works with non-integer data types\"\n state2int = {state:index for index, state in enumerate(state_set)}\n int2state = {index:state for index, state in enumerate(state_set)}\n \n \"Post-order traversal to compute costs in terms of required state transitions\"\n for node in tree.traverse(\"postorder\"):\n if node.is_leaf():\n costs = [np.inf]*states\n tip_state = state2int[feature_dic[node.name]] # feature_dic[node.name]\n costs[tip_state] = 0\n node.add_features(costs = costs)\n else:\n costs = [0]*states\n for i in range(states):\n child_costs = []\n for child in node.children:\n temp_costs = [0]*states\n for j in range(states):\n temp_costs[j] = child.costs[j]\n if i != j:\n temp_costs[j] = temp_costs[j] + 1 # add cost for transitioning between i and j\n child_costs.append(temp_costs)\n costs[i] = sum([min(c) for c in child_costs])\n node.add_features(costs = costs)\n \n \"Pre-order traversal to select anc states based on least cost parsimony score\"\n anc_dic = {}\n internal_cntr = 0 # counter for internal nodes encountered\n for node in tree.traverse(\"preorder\"):\n costs = node.costs\n if node.is_root():\n root_state = costs.index(min(costs)) # or np.argmin(node.costs)\n node.add_features(state = root_state)\n anc_dic['root'] = root_state\n else:\n parent_state = node.up.state\n least_cost = min(costs)\n least_cost_state = costs.index(least_cost)\n if parent_state == least_cost_state:\n anc_state = parent_state\n else:\n parent_st_cost = costs[parent_state]\n if parent_st_cost < (least_cost+1):\n anc_state = parent_state # if parent state costs less than transitioning to least cost state\n else:\n anc_state = least_cost_state\n node.add_features(state = anc_state)\n if node.is_leaf():\n anc_dic[node.name] = anc_state\n else:\n name = 'n' + str(internal_cntr)\n anc_dic[name] = anc_state\n internal_cntr += 1\n \n \"Covert from integers back to original states\"\n for k,v in anc_dic.items():\n anc_dic[k] = int2state[v]\n\n \"Should return tree and anc_dic\"\n return tree, anc_dic\n\nif __name__ == '__main__':\n \n import time\n \n \"Old ML test data\"\n #path = './test-sets/testTF_timeVaryingSiteEffects_june2020/'\n #tree_file = path + 'tree-000.tre'\n #fasta_file = path + 'tree-000.fasta'\n #csv_file = path + 'tree-000.csv'\n #reconstruct(tree_file,fasta_file,csv_file)\n \n \"New covid test data for testing MP reconstructions\"\n tree_file = 'covid_ancestral_D614G_testMP.tre'\n csv_file = 'covid_features_D614G_testMP.csv'\n \n tree = Tree(tree_file, format=1)\n tree = label_internal_nodes(tree)\n #for node in tree.traverse(\"preorder\"): print(node.name)\n \n df = pd.read_csv(csv_file,index_col='node')\n feature_dic = {}\n for index, row in df.iterrows():\n feature_dic[index] = row['nsp12_P323L+S_D614G']\n tic = time.perf_counter()\n tree, anc_dic = reconstruct_MP(tree,feature_dic)\n toc = time.perf_counter()\n elapsed = toc - tic\n print(f\"Elapsed time: {elapsed:0.4f} seconds\")\n\n\n \"Plot\"\n import balticmod as bt\n import matplotlib as mpl\n from matplotlib import pyplot as plt\n import TreeUtils\n import seaborn as sns\n \n \"Write tree with fit vals to multi-type Newick file\"\n absolute_time = 2020.67\n mtt_file = 'covid_ancestral_D614G_testMP_mtt.tre'\n fig_file = 'covid_ancestral_D614G_testMP_MP.png'\n TreeUtils.write_MTT_newick(tree,mtt_file)\n \n sns.set(style=\"darkgrid\")\n myTree=bt.loadNewick(mtt_file,absoluteTime=False)\n myTree.traverse_tree() ## required to set heights\n myTree.setAbsoluteTime(absolute_time) ## set absolute time of all branches by specifying date of most recent tip\n myTree.treeStats() ## report stats about tree\n \n cmap = mpl.cm.get_cmap('tab10', 10)\n \n fig,ax = plt.subplots(figsize=(20,20),facecolor='w')\n\n x_attr=lambda k: k.absoluteTime ## x coordinate of branches will be absoluteTime attribute\n c_func=lambda k: 'darkorange' if k.traits['type']=='1' else 'steelblue' ## colour of branches\n s_func=lambda k: 50-30*k.height/myTree.treeHeight ## size of tips\n z_func=lambda k: 100\n \n cu_func=lambda k: 'k' ## for plotting a black outline of tip circles\n su_func=lambda k: 2*(50-30*k.height/myTree.treeHeight) ## black outline in twice as big as tip circle \n zu_func=lambda k: 99\n myTree.plotTree(ax,x_attr=x_attr,colour_function=c_func) ## plot branches\n myTree.plotPoints(ax,x_attr=x_attr,size_function=s_func,colour_function=c_func,zorder_function=z_func) ## plot circles at tips\n myTree.plotPoints(ax,x_attr=x_attr,size_function=su_func,colour_function=cu_func,zorder_function=zu_func) ## plot circles under tips (to give an outline)\n \n \"Add legend the hard way\"\n import matplotlib.patches as mpatches\n blue_patch = mpatches.Patch(color='steelblue', label = 'Spike 614D')\n red_patch = mpatches.Patch(color='darkorange', label = 'Spike 614G')\n handles = [blue_patch,red_patch]\n \n \"For Regions\"\n ax.legend(handles=handles,prop={'size': 24}) #loc='upper left'\n \n \"Add month labels as xticks\"\n step_freq = 1/12\n xticks = np.arange(2020,absolute_time,step_freq)\n ax.set_xticks(xticks)\n labels = ['Jan','Feb','Mar','Apr','May','June','July','Aug','Sep']\n ax.set_xticklabels(labels, fontsize=24) #rotation='vertical'\n ax.set_xlabel('Time', fontsize=24)\n \n ax.set_ylim(-5,myTree.ySpan+5)\n plt.savefig(fig_file, dpi=300)\n \n \n\n\n" }, { "alpha_fraction": 0.6047220230102539, "alphanum_fraction": 0.6207920908927917, "avg_line_length": 33.70634841918945, "blob_id": "b6bdc73e08234158ee662658abaca4c75534c1e0", "content_id": "b9f7cb1bb7617f72bf0e20ef323338625f29e321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13130, "license_type": "no_license", "max_line_length": 140, "num_lines": 378, "path": "/code/process_feature_tree_data.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Wed Aug 19 18:55:30 2020\n\nProcess SARS-CoV-2 sequences, reconstruct ancestral states and return fitness features for model trainng\n\n@author: david\n\"\"\"\n\nimport pandas as pd\nimport re\nimport dendropy\nfrom Bio import SeqIO\nimport AncestralReconstruction\nimport numpy as np\nfrom ete3 import Tree\nfrom pathlib import Path\n\ndef fasta2df(fasta_file):\n \n \"Get sequences from fasta\"\n seq_dic = {}\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n seq_dic[record.id] = [i for i in record.seq]\n df = pd.DataFrame.from_dict(seq_dic, orient='index')\n \n return df\n\ndef filter_sites(df,freq_cutoff = 0.0):\n \n \"Code mostly taken from filter_invariants.py function:\"\n \"https://github.com/btmartin721/raxml_ascbias/blob/master/ascbias.py\"\n \n #bases = [\"A\",\"G\",\"C\",\"T\"]\n alphabet = [\"A\",\"R\",\"N\",\"D\",\"C\",\"Q\",\"E\",\"G\",\"H\",\"I\",\"L\",\"K\",\"M\",\"F\",\"P\",\"S\",\"T\",\"W\",\"Y\",\"V\"]\n\n invariant_lst = list()\n\n # Loop through each dataframe column\n for i in df.columns:\n\n # Gets unique values at each column and saves to list\n column_unique = df[i].unique().tolist()\n\n # Intersects column_unique with bases list\n intersect = [value for value in alphabet if value in column_unique]\n\n # If column contains only ambigous or IUPAC characters\n # Save the column index for dropping later\n if not any(value for value in alphabet if value in column_unique):\n invariant_lst.append(i)\n \n # Get frequency of each variant\n freqs = df[i].value_counts(normalize=True)\n if len(freqs.index) > 1:\n major_var = freqs.index[1]\n major_var_freq = freqs[1]\n if major_var == 'X' and len(freqs.index) > 2: # was major_var == '-'\n major_var = freqs.index[2]\n major_var_freq = freqs[2]\n if major_var == '-': # added to catch case were major variant is '-\" in translated seqs\n major_var_freq = 0\n else:\n major_var_freq = 0\n \n # If site is invariant (only A, C, G, or T); ignores N's and \"-\"\n # OR frequency of major variant is less than frequency threshold cutoff\n if len(intersect) == 1 or major_var_freq < freq_cutoff:\n\n # Saves column indexes to list\n invariant_lst.append(i)\n\n # Drops invariant sites from dataframe\n df.drop(invariant_lst, axis=1, inplace=True)\n\n return df\n\ndef filter_variants(df,freq_cutoff):\n \n drop_lst = list()\n\n # Loop through each dataframe column\n for i in df.columns:\n\n # Get frequency of each variant\n freqs = df[i].value_counts(normalize=True)\n if len(freqs.index) > 1:\n var_freq = freqs[1]\n else:\n var_freq = 0 # Presumably this dosn't happen but just in case\n \n # If frequency of variant is less than frequency threshold cutoff\n if var_freq < freq_cutoff:\n drop_lst.append(i)\n\n # Drop sites from dataframe\n df.drop(drop_lst, axis=1, inplace=True)\n\n return df\n\ndef replace_missing(df):\n \n \"Replace missing characters with consensus type\"\n for i in df.columns:\n consensus = df[i].value_counts().index[0]\n if consensus == '-':\n print(\"WARNING: missing value '-' is consensus type at: \",str(i))\n df[i].replace(['-'], consensus, inplace=True)\n return df\n\ndef drop_consensus_dummies(df):\n \n \"Drop dummy variable columns for consensus type\"\n drop_lst = list()\n for i in df.columns:\n if '_' in i:\n var_str = i.split('_')[1]\n else:\n var_str = i \n splits = re.split(r'(\\d+)', var_str)\n consensus = splits[0]\n var = splits[-1]\n if consensus == var: # If variant is consensus\n drop_lst.append(i)\n df.drop(drop_lst, axis=1, inplace=True)\n \n return df\n\ndef drop_root_state_dummies(df):\n \n \"Drop dummy variable columns for consensus type\"\n drop_lst = list()\n for i in df.columns:\n if df.loc['root'][i]: # If dummy represents root state\n drop_lst.append(i)\n df.drop(drop_lst, axis=1, inplace=True)\n \n return df\n\ndef drop_X_variants(df):\n \n \"Drop dummy variable columns for consensus type\"\n drop_lst = list()\n for i in df.columns:\n if i[-1] == 'X': # if undetermined\n drop_lst.append(i)\n df.drop(drop_lst, axis=1, inplace=True)\n \n return df\n\ndef rename_sites_defunct(df,prefix=''):\n \n for i in df.columns:\n consensus = df[i].value_counts().index[0]\n new_label = prefix + consensus+str(i+1) # index from 1\n df.rename(columns={i: new_label},inplace=True)\n \n return df\n\ndef rename_sites(df,prefix='',method='consensus'):\n \n for i in df.columns:\n if method == 'consensus':\n consensus = df[i].value_counts().index[0]\n new_label = prefix + consensus+str(i+1) # index from 1\n elif method == 'root':\n root_state = df.loc['root'][i]\n splits = i.split('_')\n if len(splits) > 1:\n prefix = splits[0] + '_'\n variant = splits[1]\n else:\n prefix = ''\n variant = i\n new_label = prefix + root_state + variant[1:]\n else:\n print(\"Renaming method not recognized\")\n new_label = i \n df.rename(columns={i: new_label},inplace=True)\n \n return df\n\ndef get_geographic_locs(df,column_name,mapToRegion=False):\n \n path = './covid-analysis/maps/'\n code_map_file = path + 'code2StateMap.csv'\n state_map_file = path + 'state2HHSRegionMap.csv'\n \n code_df = pd.read_csv(code_map_file, index_col=0)\n codeMap = {index:row['State'] for index, row in code_df.iterrows()}\n \n state_df = pd.read_csv(state_map_file, index_col=0)\n stateMap = {index:row['Region'] for index, row in state_df.iterrows()}\n \n drop_indexes = []\n locations = []\n #for idx in df.index:\n for index, row in df.iterrows():\n \n gloc = row['virus_name'].split('/')[2] # if taking from GISAID metadata file\n #gloc = idx.split('/')[2] # after 2nd backslash\n gloc = gloc[:2] # first two chars should be state\n \n if gloc in codeMap:\n state_loc = codeMap[gloc]\n if mapToRegion:\n mapped_loc = stateMap[state_loc]\n else:\n mapped_loc = state_loc\n else:\n mapped_loc = 0 # 0 represents unknown\n print(\"Unrecognized geographic location: \" + gloc)\n \n locations.append(mapped_loc)\n \n df[column_name] = locations\n df.drop(drop_indexes,inplace=True) # drop unknowns\n \n return df\n\n\ndef assign_pango_lineage_from_gisaid(df,column_name,pango_df):\n \n \"Assign lineages\"\n lines = []\n for index, row in df.iterrows():\n if index in pango_df.index:\n lines.append(pango_df.loc[index]['covv_lineage'])\n else:\n lines.append('None')\n df[column_name] = lines\n df[column_name].replace([np.nan], 'None', inplace=True)\n \n \"Reduce # of lineages by iterativly mapping rare lineages to parent lineage until parent meets inclusion criteria\"\n line_counts = df[column_name].value_counts()\n lineages_to_retain = line_counts[line_counts>=35].index # using 300 for paper results, was 100\n lineages_to_retain = lineages_to_retain.insert(0,'B.1.526')\n lineages_to_retain = lineages_to_retain.insert(0,'B.1.427')\n lineMap = {}\n for index, value in line_counts.items():\n if index in lineages_to_retain:\n lineMap[index] = index\n else:\n parent = index\n while parent not in lineages_to_retain:\n splits = parent.split('.')\n if len(splits) == 1:\n parent = 'LowFreq' # splits[0]\n break\n parent = '.'.join(splits[0:-1]) # returns parent e.g. B.1.1.7 --> B.1.1\n lineMap[index] = parent\n \n \"Re-assign lineages\"\n lines = []\n for index, row in df.iterrows():\n lines.append(lineMap[row[column_name]])\n df[column_name] = lines\n \n return df\n\n\n\ndef get_nsp6_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the nsp6 del9 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[11287:11296]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'*9\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\ndef get_ORF9_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the ORF9 TRS -3 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[28270]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\n\ndef get_features(align_file,as_string=True):\n \n df = pd.read_csv(align_file, index_col=0)\n features = [column for column in df.columns]\n if as_string:\n features = ' '.join(features)\n return features\n\n\nanalysis_dir = Path(\"/Users/david/Documents/GitHub/phyloTF2/covid-analysis\")\ntree_dir = analysis_dir / \"phylogenies\" / 'GISAID-hCoV-19-phylogeny-2021-03-08'\nbase_dir = Path(__file__).parent.parent / \"data\"\n\nmeta_file = base_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_metadata.csv\"\ntree_file = str(tree_dir / \"hcov_march2021_USA_post2020-09-01_dated.tre\")\nsubtree_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_mini.tre\")\nlabeled_tree_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_mini_labeled.tre\") # tree with internal labels\n\nalign_file = base_dir / 'hcov_USA_post2020-09-01_EMSI_subsampled_aligned.fasta'\nunencoded_csv = base_dir / \"hcov_USA_post2020-09-01_unencodedAncStates_dels+pangoline.csv\"\n\n\"Get meta data file\"\nmerged_df = pd.read_csv(meta_file,sep=\",\",index_col='accession_id')\nfiltered_taxa = merged_df.index.tolist()\n\n\"Extract tree of desired samples using dendropy\"\n#subtree_file = str(tree_dir / \"hcov_USA_post2020-09-01_EMSI_mini.tre\")\n#filtered_taxa_spaces = [t.replace('_',' ') for t in filtered_taxa] # need to replace underscores with spaces to match dendropy taxon labels\n#taxa = dendropy.TaxonNamespace()\n#tree = dendropy.Tree.get(file=open(tree_file, 'r'), schema=\"newick\", rooting=\"default-rooted\", taxon_namespace=taxa) \n#taxa_to_retain = set([taxon for taxon in tree.taxon_namespace if taxon.label in filtered_taxa_spaces])\n#filtered_tree = tree.extract_tree_with_taxa(taxa=taxa_to_retain)\n#filtered_tree.write(path=subtree_file,schema='newick',suppress_annotations=True,suppress_rooting=True)\n\n\"Get geographic locations\"\n#merged_df = get_geographic_locs(merged_df,'STATE',mapToRegion=False) # get states\n#merged_df = get_geographic_locs(merged_df,'REGION',mapToRegion=True) # get states\n\n\"Work around since pangolin lineage assignments are not working\"\ndesktop_dir = Path('/Users/david/Desktop/')\ngisaid_metadata_file = desktop_dir / \"GISAID_metadata_2021-03-10_USA.csv\"\npangolin_df = pd.read_csv(gisaid_metadata_file,sep=\",\",index_col='covv_accession_id')\nmerged_df = assign_pango_lineage_from_gisaid(merged_df,'PANGOLINE',pangolin_df)\npangolin_lineage_counts = merged_df['PANGOLINE'].value_counts()\nprint(pangolin_lineage_counts)\n\n\"Add deletion features\"\n\"Encode nsp6 deletion as binary variable in meta data\"\ndel_label = 'nsp6_Delta9'\nmerged_df = get_nsp6_del_state(merged_df,align_file,del_label)\n\n\"Encode ORF9 TRS deletion as binary variable in meta data\"\ndel_label = 'ORF9_TRS-3'\nmerged_df = get_ORF9_del_state(merged_df,align_file,del_label)\n\n# \"Get features to reconstruct\"\nfeatures = ['PANGOLINE', 'nsp6_Delta9', 'ORF9_TRS-3']\n# #features = merged_df.columns.to_list()\n\n# \"First assign internal labels to tree and then create new df for ancestral features\"\ntree = Tree(subtree_file, format=1)\ntree = AncestralReconstruction.label_internal_nodes(tree)\nnode_labels = [node.name for node in tree.traverse(\"preorder\")]\nanc_df = pd.DataFrame(index=node_labels)\n\n\"Run MP ancestral state reconstruction for all features\"\nreconstruct = True # set false if already reconstructed\nif reconstruct:\n for f in features:\n print('Reconstructing: ' + f)\n feature_dic = {}\n for index, row in merged_df.iterrows():\n feature_dic[index] = row[f]\n tree, anc_dic = AncestralReconstruction.reconstruct_MP(tree,feature_dic)\n anc_df[f] = pd.Series(anc_dic)\n tree.write(format=1, outfile=labeled_tree_file)\n\n# \"Save unencoded ancestral states df\"\nanc_df.to_csv(unencoded_csv,index_label='node')\n# #anc_df = pd.read_csv(unencoded_csv,sep=\",\",index_col='node')\n\n\n \n " }, { "alpha_fraction": 0.6451863646507263, "alphanum_fraction": 0.6677018404006958, "avg_line_length": 35.94827651977539, "blob_id": "a3e0354a75029af9a03dab16c259f2cb55f727be", "content_id": "34de15fa6b11547ad48d265528fb539e93bc61f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6440, "license_type": "no_license", "max_line_length": 157, "num_lines": 174, "path": "/code/reconstruct_deletions.py", "repo_name": "davidrasm/EMSI-SARS-CoV-2", "src_encoding": "UTF-8", "text": "\"\"\"\n\nProcess SARS-CoV-2 sequences, reconstruct ancestral states and return anc state csv\n\n@author: david\n\"\"\"\nimport pandas as pd\nfrom Bio import SeqIO\nimport AncestralReconstruction\nfrom ete3 import Tree\nfrom pathlib import Path\n\ndef fasta2df(fasta_file):\n \n \"Get sequences from fasta\"\n seq_dic = {}\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n seq_dic[record.id] = [i for i in record.seq]\n df = pd.DataFrame.from_dict(seq_dic, orient='index')\n \n return df\n\ndef get_nsp6_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the nsp6 del9 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[11287:11296]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'*9\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\ndef get_ORF9_del_state(df,align_file,column_name):\n \n \"This is specifically hardcoded the ORF9 TRS -3 deletion\"\n seq_dict = SeqIO.to_dict(SeqIO.parse(align_file, \"fasta\"))\n deletions = []\n for index, row in df.iterrows():\n rec = seq_dict.get(index)\n seq_str = str(rec.seq[28270]) # genomic start pos is shifted -1 for zero-based indexing\n del_str = '-'\n if seq_str == del_str:\n deletions.append(1)\n else:\n deletions.append(0)\n \n df[column_name] = deletions\n \n return df\n\n\ndef get_features(align_file,as_string=True):\n \n df = pd.read_csv(align_file, index_col=0)\n features = [column for column in df.columns]\n if as_string:\n features = ' '.join(features)\n return features\n\ndef plot_tree(tree,mtt_file,fig_file):\n \n import balticmod as bt\n import matplotlib as mpl\n from matplotlib import pyplot as plt\n import TreeUtils\n import seaborn as sns\n \n \"Write tree with fit vals to multi-type Newick file\"\n absolute_time = 2020.67\n TreeUtils.write_MTT_newick(tree,mtt_file)\n \n sns.set(style=\"darkgrid\")\n myTree=bt.loadNewick(mtt_file,absoluteTime=False)\n myTree.traverse_tree() ## required to set heights\n myTree.setAbsoluteTime(absolute_time) ## set absolute time of all branches by specifying date of most recent tip\n myTree.treeStats() ## report stats about tree\n \n fig,ax = plt.subplots(figsize=(20,20),facecolor='w')\n\n x_attr=lambda k: k.absoluteTime ## x coordinate of branches will be absoluteTime attribute\n c_func=lambda k: 'darkorange' if k.traits['type']=='1' else 'steelblue' ## colour of branches\n s_func=lambda k: 50-30*k.height/myTree.treeHeight ## size of tips\n z_func=lambda k: 100\n \n cu_func=lambda k: 'k' ## for plotting a black outline of tip circles\n su_func=lambda k: 2*(50-30*k.height/myTree.treeHeight) ## black outline in twice as big as tip circle \n zu_func=lambda k: 99\n myTree.plotTree(ax,x_attr=x_attr,colour_function=c_func) ## plot branches\n myTree.plotPoints(ax,x_attr=x_attr,size_function=s_func,colour_function=c_func,zorder_function=z_func) ## plot circles at tips\n myTree.plotPoints(ax,x_attr=x_attr,size_function=su_func,colour_function=cu_func,zorder_function=zu_func) ## plot circles under tips (to give an outline)\n \n \"Add legend the hard way\"\n import matplotlib.patches as mpatches\n blue_patch = mpatches.Patch(color='steelblue', label = 'WT')\n red_patch = mpatches.Patch(color='darkorange', label = 'DEL')\n handles = [blue_patch,red_patch]\n \n \"For Regions\"\n ax.legend(handles=handles,prop={'size': 24}) #loc='upper left'\n \n \"Add month labels as xticks\"\n # step_freq = 1/12\n # xticks = np.arange(2020,absolute_time,step_freq)\n # ax.set_xticks(xticks)\n # labels = ['Jan','Feb','Mar','Apr','May','June','July','Aug','Sep']\n # ax.set_xticklabels(labels, fontsize=24) #rotation='vertical'\n # ax.set_xlabel('Time', fontsize=24)\n \n ax.set_ylim(-5,myTree.ySpan+5)\n plt.savefig(fig_file, dpi=300)\n\nplot = True # plot tree colored by anc states?\nbase_dir = Path(__file__).parent.parent / \"data\"\n\n\"Input files\"\nmeta_file = base_dir / \"hcov_USA_post2020-09-01_EMSI_subsampled_metadata.csv\"\ntree_file = str(base_dir / \"hcov_USA_post2020-09-01_EMSI_mini.tre\")\nalign_file = base_dir / 'hcov_USA_post2020-09-01_EMSI_subsampled_aligned.fasta'\n\n\"Output files\"\nlabeled_tree_file = str(base_dir / \"hcov_USA_post2020-09-01_labeled_forCatherine.tre\") # tree with internal labels\nanc_state_csv = base_dir / \"hcov_USA_post2020-09-01_anc_states_forCatherine.csv\"\n\n\"Get meta data file\"\nmerged_df = pd.read_csv(meta_file,sep=\",\",index_col='accession_id')\n\n\"Add deletion features\"\n\"Encode nsp6 deletion as binary variable in meta data\"\ndel_label = 'nsp6_Delta9'\nmerged_df = get_nsp6_del_state(merged_df,align_file,del_label)\n\n\"Encode ORF9 TRS deletion as binary variable in meta data\"\ndel_label = 'ORF9_TRS-3'\nmerged_df = get_ORF9_del_state(merged_df,align_file,del_label)\n\n\"Get features to reconstruct\"\nfeatures = ['nsp6_Delta9', 'ORF9_TRS-3']\n#features = merged_df.columns.to_list()\n\n\"First assign internal labels to tree and then create new df for ancestral features\"\ntree = Tree(tree_file, format=1)\ntree = AncestralReconstruction.label_internal_nodes(tree)\nnode_labels = [node.name for node in tree.traverse(\"preorder\")]\nanc_df = pd.DataFrame(index=node_labels)\n\n\"Run MP ancestral state reconstruction for all features\"\nreconstruct = True # set false if already reconstructed\nif reconstruct:\n for f in features:\n print('Reconstructing: ' + f)\n feature_dic = {}\n for index, row in merged_df.iterrows():\n feature_dic[index] = row[f]\n tree, anc_dic = AncestralReconstruction.reconstruct_MP(tree,feature_dic)\n anc_df[f] = pd.Series(anc_dic)\n if plot:\n mtt_file = \"hcov_USA_post2020-09-01_MPreconstruct\" + f + \".tre\"\n mtt_file = str(base_dir / mtt_file)\n fig_file = \"hcov_USA_post2020-09-01_MPreconstruct\" + f + \".png\"\n fig_file = str(base_dir / fig_file)\n plot_tree(tree,mtt_file,fig_file)\n tree.write(format=1, outfile=labeled_tree_file)\n\n\"Save unencoded ancestral states df\"\nanc_df.to_csv(anc_state_csv,index_label='node')\n#anc_df = pd.read_csv(anc_state_csv,sep=\",\",index_col='node')\n\n\n \n " } ]
7
gbrandon2/domesticWork
https://github.com/gbrandon2/domesticWork
85bf12a2390078dd8b0fb674a6c9f9454739a704
85651907d1b84cd93941164426ecb03f37bb9d9a
9fb8c6d98d72b843c60d246f0a6abec5e19b05ff
refs/heads/master
2020-04-07T15:40:39.255219
2018-11-21T05:38:20
2018-11-21T05:38:20
158,496,003
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5577358603477478, "alphanum_fraction": 0.5826414823532104, "avg_line_length": 32.125, "blob_id": "5144f63e742732312b10f207662fa23d7247ed5f", "content_id": "43352039a6ea46597b98205f4a12ce7505de9844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 127, "num_lines": 40, "path": "/findWork/migrations/0011_auto_20181120_2051.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-21 01:51\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0010_auto_20181118_1741'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='applicant',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.AlterField(\n model_name='user',\n name='Phone',\n field=models.IntegerField(),\n ),\n migrations.AddField(\n model_name='applicant',\n name='UserBoss',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Boss1', to='findWork.User'),\n ),\n migrations.AddField(\n model_name='applicant',\n name='UserEmployee',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Employee1', to='findWork.User'),\n ),\n migrations.AddField(\n model_name='applicant',\n name='WorkID',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='WorkID', to='findWork.User'),\n ),\n ]\n" }, { "alpha_fraction": 0.4732798933982849, "alphanum_fraction": 0.4789579212665558, "avg_line_length": 33.41379165649414, "blob_id": "84778167d3d95c371d5151ce94a6ebc2f95101bd", "content_id": "c607398538fe772f27f9c3ccbe7bb1eb19f00803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2994, "license_type": "no_license", "max_line_length": 92, "num_lines": 87, "path": "/templates/findWork/User.html", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "<!DOCTYPE HTML>\n<!--\n\tIon by TEMPLATED\n\ttemplated.co @templatedco\n\tReleased for free under the Creative Commons Attribution 3.0 license (templated.co/license)\n-->\n<html>\n\t<head>\n\t\t<title></title>\n\t\t<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\" />\n\t\t<meta name=\"description\" content=\"\" />\n\t\t<meta name=\"keywords\" content=\"\" />\n\t\t{%load static%}\n\t\t<!--[if lte IE 8]><script src=\"js/html5shiv.js\"></script><![endif]-->\n\t\t<script src=\"/static/js/jquery.min.js\"></script>\n\t\t<script src=\"/static/js/skel.min.js\"></script>\n\t\t<script src=\"/static/js/skel-layers.min.js\"></script>\n\t\t<script src=\"/static/js/init.js\"></script>\n\t\t<link rel=\"stylesheet\" href=\"{% static 'css/skel.css'%}\" />\n\t\t<link rel=\"stylesheet\" href=\"{% static 'css/style.css'%}\" />\n\t <link rel=\"stylesheet\" href=\"{% static 'css/style-xLarge.css'%}\" />\n\t <link rel=\"stylesheet\" href=\"{% static 'css/estilos.css'%}\" />\n\t</head>\n\t<body id=\"top\">\n\n\t\t<!-- Header -->\n\t\t\t<header id=\"header\" class=\"skel-layers-fixed\">\n\t\t\t <h1>DomesticWork</h1>\n\t\t\t <p>&nbsp;</p>\n\t\t\t <nav id=\"nav\">\n\t\t\t\t\t<ul>\n\t\t\t\t\t\t<li><a href=\"ShowWorks\">Works</a></li>\n\t\t\t\t\t\t<li><a href=\"postWork\">Post Work</a></li>\n\t\t\t\t\t\t{%for temp in userName2%}\n\t\t\t\t\t\t<li><a href=\"#\">{{temp.Username}} </a></li>\n\t\t\t{%endfor%}\n <li><a href=\"#\" >Contact Us</a></li>\n\t\t\t\t\t\t<li><a href=\"logOut\" id=\"button\" class=\"button special\">Log out</a></li>\n\t\t\t\t\t</ul>\n\t\t\t\t</nav>\n\t\t\t</header>\n\t\t\t<h1 style=\"font-size:50px;padding-left: 40%;\n\">My works</h1>\n\t\t\t<div class=\"container\" id=\"box\">\n\n\n\n\t\t\t\t <div class=\"container\" id=\"box2\">\n\t\t\t\t {%for temp in data%}\n\n\t\t\t\t <div class=\"column\" onclick=\"redirect({{temp.id}})\">\n\t\t\t\t <form action=\"/findWork/home/delete\" method=\"post\">\n\t\t\t\t {% csrf_token %}\n\t\t\t\t <h3>Work: {{temp.Type}}</h3>\n\t\t\t\t <p>Description: {{temp.Description}}<br>\n\t\t\t\t Start date: {{temp.StartDate}}<br>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tFinal date: {{temp.Finaldate}}<br>\n\t\t\t\t Price:{{temp.Price}}<br>\n \tApplicant: {{temp.Finaldate}}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t</p>\n\n\t\t\t\t <input value=\"{{temp.id}}\" name=\"delete\" hidden>\n\t\t\t\t <input type=\"submit\" id=\"botonB\" value=\"delete\">\n\n\n\t\t\t\t </form>\n\t\t\t\t <form action=\"/findWork/home/update\" method=\"post\">\n\t\t\t\t {% csrf_token %}\n\t\t\t\t <input value=\"{{temp.id}}\" name=\"update\" hidden>\n\t\t\t\t <input type=\"submit\" id=\"botonB\" value=\"update\">\n\t\t\t\t </form>\n\t\t\t\t </div>\n\t\t\t\t\t\t\t\t{%endfor%}\n\n\t\t\t\t </div>\n\n\t\t\t\t\t\t\t\t<script>\n\t\t\t\t\t\t\t\tfunction redirect(param)\n\t\t\t\t\t\t\t\t{\n\n\t\t\t\t\t\t\t\twindow.location.replace(\"User/mywork/\"+param);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t</script>\n\n\t\t\t</div>\n </body>\n </html>\n" }, { "alpha_fraction": 0.7319090366363525, "alphanum_fraction": 0.7512060403823853, "avg_line_length": 44.3125, "blob_id": "a2b299d3e9262a7fef802e765a54776e863d8a62", "content_id": "62d2e54c34a2cc8b19820f75ddef0590b8c704a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/findWork/models.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "\nfrom django.db import models,connection\nclass User(models.Model):\n FirstName =models.CharField(max_length=30)\n LastName =models.CharField(max_length=30)\n Username =models.CharField(max_length=30)\n Password =models.CharField(max_length=30)\n Phone =models.IntegerField()\n\nclass Administrator(models.Model):\n FirstName =models.CharField(max_length=30)\n LastName =models.CharField(max_length=30)\n Username =models.CharField(max_length=30)\n Password =models.CharField(max_length=30)\n\nclass Payment(models.Model):\n Type =models.CharField(max_length=30)\n User = models.ForeignKey(User, on_delete=models.CASCADE)\n\nclass Works(models.Model):\n Description =models.CharField(max_length=30)\n Type =models.CharField(max_length=30)\n StartDate =models.DateField(auto_now=False, auto_now_add=False)\n Finaldate =models.DateField(auto_now=False, auto_now_add=False)\n Price=models.IntegerField()\n UserBoss = models.ForeignKey(User, on_delete=models.CASCADE,related_name='Boss')\n UserEmployee = models.ForeignKey(User, on_delete=models.CASCADE, related_name='Employee')\n Address=models.CharField(max_length=30)\n\nclass applicant(models.Model):\n UserBoss1 = models.ForeignKey(User, on_delete=models.CASCADE,related_name='Boss1')\n UserEmployee1 = models.ForeignKey(User, on_delete=models.CASCADE, related_name='Employee1')\n WorkID= models.ForeignKey(Works, on_delete=models.CASCADE,related_name='WorkID')\n" }, { "alpha_fraction": 0.5296609997749329, "alphanum_fraction": 0.5627118349075317, "avg_line_length": 30.052631378173828, "blob_id": "49bf4fb9e3a4acbaab3656c6da242c373d0431c3", "content_id": "4233d6c640b95c44531ae6cb0c8a3d8cc76d4a23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1180, "license_type": "no_license", "max_line_length": 114, "num_lines": 38, "path": "/findWork/migrations/0005_auto_20181112_1639.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-12 21:39\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0004_auto_20181112_1633'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Payment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('Type', models.CharField(max_length=30)),\n ('User', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='findWork.User')),\n ],\n ),\n migrations.RemoveField(\n model_name='administrator',\n name='User',\n ),\n migrations.AddField(\n model_name='administrator',\n name='Password',\n field=models.CharField(default=1, max_length=30),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='administrator',\n name='Username',\n field=models.CharField(default=1, max_length=30),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.6828358173370361, "alphanum_fraction": 0.6840795874595642, "avg_line_length": 27.714284896850586, "blob_id": "04966da8beaf8aa85c98833a05b59d8c04312070", "content_id": "9b05f3cefe4bec4de15a54ff559d639d99b378d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "no_license", "max_line_length": 55, "num_lines": 28, "path": "/findWork/urls.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "#from django.shortcuts import render\n\n# Create your views here.\n#from django.http import HttpResponse\n\n\n#def index(request):\n #return HttpResponse(\"<h1> hola mundo \")\n\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('home/', views.index),\n path('home/Signin', views.post),\n path('home/postWork', views.postWork),\n path('home/login', views.postLogIn),\n path('home/ShowWorks', views.ShowWorks),\n path('home/logOut', views.logOut),\n path('home/User', views.User),\n path('home/ShowWorks/filters', views.queryfilters),\n path('home/<int:id>',views.workDetails),\n path('home/delete', views.delete),\n path('home/update', views.sendToUpdateView),\n path('home/updateWork', views.updateWork),\n path('home/User/mywork/<int:id>',views.Applicant),\n]\n" }, { "alpha_fraction": 0.5768463015556335, "alphanum_fraction": 0.6387225389480591, "avg_line_length": 25.36842155456543, "blob_id": "1edafaa4379d0fe7920e26eb1ecc03091240360c", "content_id": "cbb13af59d515f34aeb8e4aa74ee27f475d4a689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 126, "num_lines": 19, "path": "/findWork/migrations/0008_auto_20181113_2318.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-14 04:18\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0007_auto_20181113_1120'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='works',\n name='UserEmployee',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Employee', to='findWork.User'),\n ),\n ]\n" }, { "alpha_fraction": 0.5413534045219421, "alphanum_fraction": 0.5827067494392395, "avg_line_length": 26.517240524291992, "blob_id": "bfd8aafea121c11b186e44ea75e431f29ed2201f", "content_id": "f7eeef45ab35bb36fab91dda8b7203bafd98494b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 125, "num_lines": 29, "path": "/findWork/migrations/0012_auto_20181120_2144.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-21 02:44\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0011_auto_20181120_2051'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='applicant',\n old_name='UserBoss',\n new_name='UserBoss1',\n ),\n migrations.RenameField(\n model_name='applicant',\n old_name='UserEmployee',\n new_name='UserEmployee1',\n ),\n migrations.AlterField(\n model_name='applicant',\n name='WorkID',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='WorkID', to='findWork.Works'),\n ),\n ]\n" }, { "alpha_fraction": 0.6651734709739685, "alphanum_fraction": 0.6715854406356812, "avg_line_length": 42.95366668701172, "blob_id": "7b77f63432575ec9cd29b4fa8b7a47451963ba84", "content_id": "50f2f1cb2ca1f22efee7c94a8639b99399d3c6ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11385, "license_type": "no_license", "max_line_length": 255, "num_lines": 259, "path": "/findWork/views.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "\nfrom django.shortcuts import render, redirect\n#from django.http import HttpRequest\nfrom django.http import HttpResponse\n#from django.template import RequestContext\nfrom django.template import loader\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom . import models\nUser=''\ndef index(request):\n template = loader.get_template('findWork/index.html')\n context={}\n return HttpResponse(template.render(context,request))\n #assert isinstance(request, HttpRequest)\n #return render(request,'index.html')\n\ndef post(request):\n if request.method == 'POST':\n nuevo2=models.User.objects.filter(Username=request.POST['userN']).exists()\n if(not nuevo2):\n nuevo= models.User()\n nuevo.FirstName=request.POST['firstN']\n nuevo.LastName=request.POST['lastN']\n nuevo.Password=request.POST['pass']\n nuevo.Username=request.POST['userN']\n nuevo.Phone=request.POST['conta']\n request.session['member_id']=nuevo.id\n request.session['password']=nuevo.Password\n request.session['username']=nuevo.Username\n nuevo.save()\n template = loader.get_template('findWork/User.html')\n context={}\n return HttpResponse(template.render(context,request))\n\n else:\n messages.warning(request, 'User already exist')\n template = loader.get_template('findWork/index.html')\n context={}\n\n return redirect('/findWork/home/')\n\n\ndef postWork(request):\n\n\n if request.method == 'POST':\n nuevo= models.Works()\n auxiliar=models.User.objects.filter(id=request.session['member_id']).first()\n nuevo.Description=request.POST['description']\n nuevo.Type=request.POST['work']\n nuevo.StartDate=request.POST['startdate']\n nuevo.Finaldate=request.POST['finaldate']\n nuevo.Price=request.POST['price']\n nuevo.Address=request.POST['address']\n nuevo.UserBoss=auxiliar\n nuevo.UserEmployee=auxiliar\n\n nuevo.save()\n nuev3=models.User.objects.filter(Username=request.session['username'])\n template = loader.get_template('findWork/postWork.html')\n context={'userName2':nuev3}\n return HttpResponse(template.render(context,request))\n\ndef postLogIn(request):\n if request.method == 'POST':\n if(\"username\" in request.session):\n data=models.Works.objects.filter(UserBoss=request.session['member_id'])\n template = loader.get_template('findWork/User.html')\n nuevo=models.User.objects.filter(Username=request.session['username'])\n context={'userName2':nuevo,'data':data}\n return HttpResponse(template.render(context,request))\n else:\n nuevo=models.User.objects.filter(Username=request.POST['userNa'],Password=request.POST['passw']).first()\n if(nuevo is not None):\n if (nuevo.Password ==request.POST['passw']):\n request.session['member_id']=nuevo.id\n request.session['password']=nuevo.Password\n request.session['username']=nuevo.Username\n data=models.Works.objects.filter(UserBoss=request.session['member_id'])\n template = loader.get_template('findWork/User.html')\n nuevo=models.User.objects.filter(Username=request.session['username'])\n context={'userName2':nuevo,'data':data}\n return HttpResponse(template.render(context,request))\n else:\n messages.warning(request, 'Wrong User')\n return redirect('/findWork/home/')\n\n\n\n #template = loader.get_template('findWork/index.html')\n #context={}\n #return HttpResponse(template.render(context,request))\n\ndef PublicWorks(request):\n print(request.session['member_id'])\n if request.method == 'POST':\n nuevo= models.Works()\n auxiliar=models.User.objects.filter(id=request.session['member_id']).first()\n nuevo.Description=request.POST['description']\n nuevo.Type=request.POST['work']\n nuevo.StartDate=request.POST['startdate']\n nuevo.Finaldate=request.POST['finaldate']\n nuevo.Price=request.POST['price']\n nuevo.Address=request.POST['address']\n nuevo.UserBoss=auxiliar\n nuevo.UserEmployee=auxiliar\n\n nuevo.save()\n\n template = loader.get_template('findWork/User.html')\n context={}\n return HttpResponse(template.render(context,request))\n\ndef ShowWorks(request):\n nuev3=models.User.objects.filter(Username=request.session['username'])\n if(request.method=='GET'):\n if(request.GET.get('filtro') is None):\n data=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE UserBoss_id!= %s AND UserBoss_id=UserEmployee_id\",[request.session['member_id']])\n w=models.Works.objects.raw(\"SELECT id,Type FROM findwork_works GROUP BY id,Type\")\n template = loader.get_template('findWork/ShowWorks.html')\n nuev3=models.User.objects.filter(Username=request.session['username'])\n context={'data':data,'ejemplo':w,'userName2':nuev3}\n return HttpResponse(template.render(context,request))\n if(request.GET.get('filtro')!=('filter')):\n nuevo2=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE Type=%s AND UserBoss_id!=%s\",[request.GET.get('filtro'),request.session['member_id']])\n else:\n\n nuevo2=models.Works.objects.all()\n\n w=models.Works.objects.raw(\"SELECT id,Type FROM findwork_works GROUP BY id,Type\")\n template = loader.get_template('findWork/showWorks.html')\n\n context={'data':nuevo2,'ejemplo':w,'userName2':nuev3}\n return HttpResponse(template.render(context,request))\n elif(request.method=='POST'):\n\n query1='%'+request.POST['query']+'%'\n print(query1)\n w2=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE UserBoss_id!= %s AND Description LIKE %s\",[request.session['member_id'],query1])\n w=models.Works.objects.raw(\"SELECT id,Type FROM findwork_works GROUP BY id,Type\")\n sw=0;\n for x in w2:\n sw=1;\n if(sw==0):\n context={'data':0,'ejemplo':w,'userName2':nuev3}\n else:\n context={'data':w2,'ejemplo':w,'userName2':nuev3}\n template = loader.get_template('findWork/showWorks.html')\n return HttpResponse(template.render(context,request))\n\n else:\n data=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE UserBoss_id!= %s AND UserBoss_id=UserEmployee_id\",[request.session['member_id']])\n\n\ndef logOut(request):\n del request.session['member_id']\n del request.session['username']\n del request.session['password']\n return redirect('/findWork/home/')\n\ndef User(request):\n nuevo2=models.Works.objects.filter(UserBoss=request.session['member_id'])\n template = loader.get_template('findWork/User.html')\n nuev3=models.User.objects.filter(Username=request.session['username'])\n context={'data':nuevo2,'userName2':nuev3}\n return HttpResponse(template.render(context,request))\n\ndef queryfilters(request):\n nuev3=models.User.objects.filter(Username=request.session['username'])\n if(request.method=='GET'):\n\n if(request.GET.get('filtro')!=('filter')):\n nuevo2=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE Type=%s AND UserBoss_id!=%s\",[request.GET.get('filtro'),request.session['member_id']])\n else:\n nuevo2=models.Works.objects.all()\n\n w=models.Works.objects.raw(\"SELECT id,Type FROM findwork_works GROUP BY id,Type\")\n template = loader.get_template('findWork/showWorksFilter.html')\n\n context={'data':nuevo2,'ejemplo':w,'userName2':nuev3}\n return HttpResponse(template.render(context,request))\n if(request.method=='POST'):\n query1='%'+request.POST['query']+'%'\n print(query1)\n w2=models.Works.objects.raw(\"SELECT * FROM findwork_works WHERE UserBoss_id!= %s AND Description LIKE %s\",[request.session['member_id'],query1])\n w=models.Works.objects.raw(\"SELECT id,Type FROM findwork_works GROUP BY id,Type\")\n sw=0;\n for x in w2:\n sw=1;\n if(sw==0):\n context={'data':0,'ejemplo':w,'userName2':nuev3}\n else:\n context={'data':w2,'ejemplo':w,'userName2':nuev3}\n template = loader.get_template('findWork/showWorksFilter.html')\n return HttpResponse(template.render(context,request))\n context={}\n template = loader.get_template('findWork/ShowWorks.html')\n return HttpResponse(template.render(context,request))\n\n\ndef workDetails(request,id):\n s=str(id)\n nuev3=models.User.objects.filter(Username=request.session['username'])\n\n w2=models.Works.objects.raw(\"SELECT * FROM findwork_works,findwork_user=d WHERE findwork_works.id = %s AND d.id=findwork_works.UserBoss_id\",[s])\n if(request.method=='POST'):\n nuev=models.Works.objects.filter(id=id).first()\n work=models.applicant()\n work.UserBoss1=nuev.UserBoss\n work.UserEmployee1=nuev.UserEmployee\n work.WorkID=nuev\n\n work.save()\n\n ## hacer update aqui\n #UserEmployee con el requestsession.id\n\n context={'data':w2,'sw':0,'userName2':nuev3}\n\n else:\n context={'data':w2,'sw':1,'userName2':nuev3}\n template = loader.get_template('findWork/workDetails.html')\n return HttpResponse(template.render(context,request))\n\ndef delete(request):\n #if request.method == 'POST':\n data=models.Works.objects.filter(UserBoss=request.session['member_id'])\n models.Works.objects.filter(id=request.POST['delete']).delete()\n template = loader.get_template('findWork/User.html')\n context={'data':data,'username':request.session['username']}\n return HttpResponse(template.render(context,request))\n\ndef sendToUpdateView(request):\n work = models.Works.objects.filter(id=request.POST['update'])\n template = loader.get_template('findWork/updateWork.html')\n context={'username':request.session['username'],'work':work}\n return HttpResponse(template.render(context,request))\n\ndef updateWork(request):\n work = work = models.Works.objects.filter(id=request.POST['update']).update(Type=request.POST['type'] ,Description = request.POST['description'],StartDate = request.POST['startDate'],Finaldate = request.POST['startDate'],Price = request.POST['price'])\n\n data=models.Works.objects.filter(UserBoss=request.session['member_id'])\n print(data)\n template = loader.get_template('findWork/User.html')\n context={'username':request.session['username'],'data':data}\n return HttpResponse(template.render(context,request))\n\n\ndef Applicant(request,id):\n s=str(id)\n nuev3=models.User.objects.filter(Username=request.session['username'])\n\n w2=models.Works.objects.raw(\"SELECT * FROM findwork_works,findwork_user=d WHERE findwork_works.id = %s AND d.id=findwork_works.UserBoss_id\",[s])\n work=models.Works.objects.raw(\"SELECT * FROM findwork_applicant,findwork_user=s WHERE UserEmployee1_id=s.id AND WorkID_id=%s\",[str(id)])\n template = loader.get_template('findWork/myWorkDetail.html')\n context={'data':w2,'userName2':nuev3,'User':work}\n return HttpResponse(template.render(context,request))\n" }, { "alpha_fraction": 0.5205479264259338, "alphanum_fraction": 0.5726027488708496, "avg_line_length": 19.27777862548828, "blob_id": "a8d47430c16e71dcb5593388666f5150ffb7e473", "content_id": "ad84f8b150b01909a8f7cee957c2ec141ec59f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/findWork/migrations/0004_auto_20181112_1633.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-12 21:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0003_administrator'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='administrator',\n old_name='User_ID',\n new_name='User',\n ),\n ]\n" }, { "alpha_fraction": 0.571286141872406, "alphanum_fraction": 0.5952143669128418, "avg_line_length": 30.34375, "blob_id": "b49719f823d1b2dec5e5a3d2ec8c40cb797ff720", "content_id": "e22205f93c82e2dabfc4130000b258d6e0ddbb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 138, "num_lines": 32, "path": "/findWork/migrations/0007_auto_20181113_1120.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-13 16:20\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0006_works'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='works',\n name='Address',\n field=models.CharField(default=1, max_length=30),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='works',\n name='UserBoss',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Boss', to='findWork.User'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='works',\n name='UserEmployee',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Employees', to='findWork.User'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5250659584999084, "alphanum_fraction": 0.5804749131202698, "avg_line_length": 20.05555534362793, "blob_id": "e1e9e89eceb61f7ce805ba70b9dbfd432b33c73e", "content_id": "a9f4de8bc5f9d17c6ca5e82c77677d02ab71d3a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/findWork/migrations/0010_auto_20181118_1741.py", "repo_name": "gbrandon2/domesticWork", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-18 22:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findWork', '0009_user_phone'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='Phone',\n field=models.IntegerField(max_length=30),\n ),\n ]\n" } ]
11
HongleiXie/CS109
https://github.com/HongleiXie/CS109
4c8217be011d250b2b5dd095f490e6a4c56e206e
fa2a8c94a3ca2bc4f6504f77f851e14d674ebd8f
75f23faf0ad41e655acd8359c369d9ce0127750f
refs/heads/master
2016-08-12T21:26:52.235637
2016-04-30T20:49:01
2016-04-30T20:49:01
55,370,355
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.7631579041481018, "avg_line_length": 37, "blob_id": "bf4ea89455fbb02d4f0a858eb1ed7c342f9bc1e3", "content_id": "197a72f238b823abc6bcd7602745c1df1b9e72d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/README.md", "repo_name": "HongleiXie/CS109", "src_encoding": "UTF-8", "text": "# Harvard CS109 Data Science\nCourse [github](https://github.com/cs109/2015)\n" }, { "alpha_fraction": 0.6207951307296753, "alphanum_fraction": 0.6539245843887329, "avg_line_length": 26.94285774230957, "blob_id": "73de6df1baec0e3eb1f390d6ea5e75515e05abcc", "content_id": "bb3a266a604fda2b55e20812936e054fb1458c93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1962, "license_type": "no_license", "max_line_length": 188, "num_lines": 70, "path": "/scrapy_IMDB.py", "repo_name": "HongleiXie/CS109", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 18, 2016\n@author: Honglei\nPurpose: To scrapy top 250 movies in IMDB and visualize the frequency varying time\n\"\"\"\n\nimport urllib2\nimport os\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nos.getcwd() # current working directory\nos.chdir('/Users/Honglei/Desktop') # change current working directory\n\n# get the current encoding\ntype = sys.getfilesystemencoding()\n\n# request the webpage\nreq = requests.get(\"http://www.imdb.com/chart/top?pf_rd_m=A2FGELUUNOQJNL&pf_rd_p=2417962742&pf_rd_r=0M85G1V8JHW928EHBETF&pf_rd_s=right-4&pf_rd_t=15506&pf_rd_i=moviemeter&ref_=chtmvm_ql_3\")\npage = req.text\n\nsoup = BeautifulSoup(page, 'html.parser')\n#print soup.prettify()\n\n# get top 250 movie names and years\nmovie_names = []\nmovie_year = [0] * 250\n\nj = 0\nfor i in range(250):\n content = str(soup.findAll('td', {'class':'titleColumn'})[i])\n content = content.decode(\"UTF-8\").encode(type)\n name = re.findall ( '>(.*?)</a>', content)\n movie_names.insert(len(movie_names), name)\n \n year = str(soup.findAll('span', {'class':'secondaryInfo'})[i])\n movie_year[i] = int(re.findall(r\"\\(([0-9_]+)\\)\", year)[0]) \n # same: m = re.search(r\"\\(([0-9_]+)\\)\", year)\n # print m.group(1)\n # [A-Za-z0-9_] can be replaced by [w]\n print('We now have ' + str(j) + ' movies') \n j = j+1\n\n\n\nprint movie_names\nprint movie_year\n\n# export to the text file\nopen(\"top250names.txt\", \"w\").write(\"\\n\".join((\"\\t\".join(item)) for item in movie_names))\n\n######## plot the count by time #########################\n\n# compute the frequency table\ny = np.bincount(movie_year)\nii = np.nonzero(y)[0]\nout = zip(ii, y[ii])\n# crete a dataframe\ndf = pd.DataFrame(out, columns = ['Year', 'Freq'], index = ii)\n# drop the first Year column since I already assign valid index\ndf.drop(df.columns[0], axis = 1)\n# plot\nplt.plot(ii, df['Freq']);\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.611020565032959, "alphanum_fraction": 0.6286273002624512, "avg_line_length": 32.326087951660156, "blob_id": "08d6183463d2a4a3983e4e8bda071e91afc8e404", "content_id": "3d28ea823195130bf0f348e1f6f20e04607b4c1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3067, "license_type": "no_license", "max_line_length": 111, "num_lines": 92, "path": "/mh_problem.py", "repo_name": "HongleiXie/CS109", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nIdea is from Harvard CS109 HW0. See my blog http://hongleixie.github.io/blog/MH_problem/ to see results.\nTO DO: add plots\n@author: Honglei\n\"\"\"\n\nimport numpy as np # imports a fast numerical programming library\nimport scipy as sp #imports stats functions, amongst other things\nimport matplotlib as mpl # this actually imports matplotlib\nimport matplotlib.cm as cm #allows us easy access to colormaps\nimport matplotlib.pyplot as plt #sets up plotting under plt\nimport pandas as pd #lets us handle data as dataframes\n#sets up pandas table display\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\nimport seaborn as sns #sets up styles and gives us more plotting options\n\n\"\"\"\nSet Parameters\n\"\"\"\nnsim = 1000\ndoors = 100 # >2 integers\n\n\ndef simulate_prizedoor(nsim):\n return np.random.randint(0, doors, (nsim))\n\ndef simulate_guess(nsim):\n return np.zeros(nsim, dtype = np.int)\n #return np.random.randint(0, 100, (nsim))\n \n \n##############################################################################\n#Following the same strategy as the CS109 HW did, however, very inefficient \n#when there are relatively big number of doors. \n#out = pd.DataFrame();\n#\n#for j in range(0,nsim):\n# out[j] = np.random.choice(range(0,100), 98, replace = False)\n# \n#def goat_door(prizedoors, guesses):\n# while True:\n# bad = map(lambda x: (out[x] == prizedoors[x]) | (out[x] == guesses[x]), range(0,nsim))\n# if sum(bad).sum() == 0 and (final.T.apply(lambda x: x.nunique(), axis = 1) == 98).all():\n# return out\n# else: \n# for i in range(0,nsim):\n# out[i][bad[i]] = np.random.choice(range(0,100), bad[i].sum(), replace = False)\n# \n#len(out[t]) > len(set(out[t]))\n###############################################################################\n \ndef goat_door(prizedoors, guesses):\n out = pd.DataFrame();\n while out.shape[1] != nsim:\n for t in range(0, nsim):\n if prizedoors[t] == guesses[t]:\n same_thing = prizedoors[t]\n out[t] = np.random.choice([x for x in range(0,doors) if x != same_thing], doors-2, replace = False)\n else:\n out[t] = [x for x in range(0,doors) if x != prizedoors[t] and x != guesses[t]]\n return out \n \n \n\ndef switch_guess(guesses, goatdoors):\n result = pd.DataFrame()\n for t in range(0, nsim):\n result[t] = list(set(range(0,doors)) - set([guesses[t]]) - set(goatdoors[t]))\n return result\n\ndef win_percentage(guesses, prizedoors):\n return 100 * (guesses == prizedoors).mean()\n \n#keep guesses\nprint (\"Win percentage when keeping original door\")\nprint win_percentage(simulate_guess(nsim), simulate_prizedoor(nsim))\n\n#switch\nprize = simulate_prizedoor(nsim)\nguess = simulate_guess(nsim)\ngoats = goat_door(prize, guess)\n\nguess = switch_guess(guess, goats)\nprint (\"Win percentage when switching doors\")\nprint (win_percentage(prize, guess).mean()) \n\n#clear all variables\n#import sys\n#sys.modules[__name__].__dict__.clear() \n" } ]
3
amstan/floppy
https://github.com/amstan/floppy
f9645195f9d188f76243eaa45f26bcb82ee2c25f
c8ff0142dbad94557a3fc5bc3cf27b589a0a3d9f
d9fec83ac35aa3f21fd02bcfe83e9090e6b0b361
refs/heads/master
2021-01-20T11:59:58.465972
2012-04-20T13:33:47
2012-04-20T13:33:47
2,853,785
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5789721608161926, "alphanum_fraction": 0.594059407711029, "avg_line_length": 25.848100662231445, "blob_id": "32f90d95e673ee2617a0b51b822d33d04f71d940", "content_id": "bf67fa69771e1059d1eb5654e8c9ef8603a20e60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 92, "num_lines": 79, "path": "/firmware/ringbuffer.h", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "/*\n * ringbuffer.h - template for a circular buffer\n *\n * License: Do with this code what you want. However, don't blame\n * me if you connect it to a heart pump and it stops. This source\n * is provided as is with no warranties. It probably has bugs!!\n * You have been warned!\n *\n * Author: Rick Kimball\n * email: [email protected]\n * Version: 1.00 Initial version 05-12-2011\n */\n\n#ifndef RINGBUFFER_H_\n#define RINGBUFFER_H_\n\n/**\n * ringbuffer - a template based interrupt safe circular buffer structure with functions\n */\n\ntemplate<typename T, int MAX_ITEMS>\nstruct ringbuffer {\n volatile int head;\n volatile int tail;\n volatile T buffer[MAX_ITEMS];\n\n /**\n * empty() - checks the buffer for data\n *\n * returns true if empty, false if there is data\n */\n inline bool empty() {\n bool isEmpty;\n\n _disable_interrupts(); // prevent inconsistent reads\n isEmpty = (head == tail);\n _enable_interrupts();\n\n return isEmpty;\n }\n\n /**\n * push_back() - append a byte to the buffer is possible\n * assumed to be called from the recv interrupt\n */\n inline void push_back(T c) {\n int i = (unsigned int) (head + 1) % MAX_ITEMS;\n if (i != tail) {\n buffer[head] = c;\n head = i;\n }\n }\n\n /**\n * pop_front() - remove a value from front of ring buffer\n */\n inline T pop_front() {\n T c = -1;\n\n _disable_interrupts(); // disable interrupts to protect head and tail values\n // This prevents the RX_ISR from modifying them\n // while we are trying to read and modify\n\n // if the head isn't ahead of the tail, we don't have any characters\n if (head != tail) {\n c = (T) buffer[tail];\n tail = (unsigned int) (tail + 1) % MAX_ITEMS;\n }\n\n _enable_interrupts(); // ok .. let everyone at them\n\n return c;\n }\n};\n\ntypedef ringbuffer<uint8_t, 16> ringbuffer_ui8_16; // ringbuffer, max of 16 uint8_t values\ntypedef ringbuffer<uint8_t, 32> Ringbuffer_uint8_32; // ringbuffer, max of 32 uint8_t values\n\n#endif /* RINGBUFFER_H_ */\n" }, { "alpha_fraction": 0.6235294342041016, "alphanum_fraction": 0.658417820930481, "avg_line_length": 23.176469802856445, "blob_id": "f785b7c4493af990e284fc4af35bbbed190c3907", "content_id": "682f32b99530349b4774e53163dc543111a927bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2465, "license_type": "no_license", "max_line_length": 114, "num_lines": 102, "path": "/interface/jackfloppy.py", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport jacklib\nimport Queue\nimport sys\n\nMIDI_MASK =0b11110000\nMIDI_NOTEOFF=0b10000000\nMIDI_NOTEON =0b10010000\nMIDI_PITCH =0b11100000\nMIDI_MODE =0b10110000\n\nfrom floppy import *\nfrom notes import *\n\n# Globals\njack_client = None\nglobal jack_midi_in_port, jack_midi_in_data\njack_midi_in_port = None\njack_midi_in_data = Queue.Queue(1024)\n\ndef jack_process_callback(nframes, arg):\n\ttry:\n\t\tglobal jack_midi_in_port, jack_midi_in_data\n\t\t\n\t\t# MIDI In\n\t\tmidi_in_buffer = jacklib.port_get_buffer(jack_midi_in_port, nframes)\n\t\tevent_count = jacklib.midi_get_event_count(midi_in_buffer)\n\t\t\n\t\tif (event_count > 0):\n\t\t\tevent = jacklib.jack_midi_event_t()\n\t\t\t\n\t\t\tfor i in range(event_count):\n\t\t\t\tif (jacklib.midi_event_get(jacklib.pointer(event), midi_in_buffer, i) == 0):\n\t\t\t\t\tdata = jacklib.translate_midi_event_buffer(event.buffer)\n\t\t\t\t\t\n\t\t\t\t\tif (len(data) == 1):\n\t\t\t\t\t\tjack_midi_in_data.put_nowait((data[0], 0, 0))\n\t\t\t\t\t\n\t\t\t\t\telif (len(data) == 2):\n\t\t\t\t\t\tjack_midi_in_data.put_nowait((data[0], data[1], 0))\n\t\t\t\t\t\n\t\t\t\t\telif (len(data) == 3):\n\t\t\t\t\t\tjack_midi_in_data.put_nowait((data[0], data[1], data[2]))\n\t\t\t\t\t\n\t\t\t\t\tif (jack_midi_in_data.full()):\n\t\t\t\t\t\tbreak\n\t\t\t\n\t\t\tdel event\n\texcept Exception as e:\n\t\tprint e\n\treturn 0\n\nif __name__ == '__main__':\n\t# Start jack\n\tjack_client = jacklib.client_open(\"Floppy Drive\", jacklib.NullOption, 0)\n\tjack_midi_in_port = jacklib.port_register(jack_client, \"midi\", jacklib.DEFAULT_MIDI_TYPE, jacklib.PortIsInput, 0)\n\tjacklib.set_process_callback(jack_client, jack_process_callback, 0)\n\t\n\tjacklib.activate(jack_client)\n\t\n\tfloppy=Floppy(port=sys.argv[1],reset=True)\n\t\n\tnoteplaying=None\n\twhile 1:\n\t\ttry:\n\t\t\tmode, noteid, velo = jack_midi_in_data.get(True,1)\n\t\t\tnote=Note(noteid)\n\t\t\t\n\t\t\tif (mode&MIDI_MASK)==MIDI_NOTEON:\n\t\t\t\tfloppy.play(note)\n\t\t\t\tnoteplaying=note\n\t\t\t\n\t\t\telif (mode&MIDI_MASK)==MIDI_NOTEOFF:\n\t\t\t\tif note==noteplaying:\n\t\t\t\t\tfloppy.stop()\n\t\t\t\n\t\t\telif (mode&MIDI_MASK)==MIDI_PITCH:\n\t\t\t\tpitch=velo*256+note\n\t\t\t\tfloppy.pitchbend(pitch)\n\t\t\t\n\t\t\telif (mode&MIDI_MASK)==MIDI_MODE:\n\t\t\t\tif note in (64,120,121,123):\n\t\t\t\t\t#print \"Everything off(%s) on channel %s.\" % (note, mode&(~MIDI_MASK))\n\t\t\t\t\tfloppy.stop()\n\t\t\t\n\t\t\telse:\n\t\t\t\tprint \"ignoring\",mode,note,velo\n\t\t\t\tpass\n\t\texcept Queue.Empty:\n\t\t\tpass\n\t\texcept ValueError as e:\n\t\t\tprint e\n\t\texcept KeyboardInterrupt:\n\t\t\tstop()\n\t\t\traise\n\t\n\t# Close Jack\n\tif (jack_client):\n\t\tjacklib.deactivate(jack_client)\n\t\tjacklib.client_close(jack_client)" }, { "alpha_fraction": 0.5126621127128601, "alphanum_fraction": 0.558987021446228, "avg_line_length": 24.296875, "blob_id": "0520a58a2027e0dd5898abdd1b373d87d24201a3", "content_id": "56083637028f8c478b4d875b49cd4d8889c5f659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 66, "num_lines": 64, "path": "/firmware/usci_serial.h", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#ifndef HW_SERIAL_H\n#define HW_SERIAL_H\n\n/**\n * Serial - simple access to USCI UART hardware\n * code implements interrupt driven input\n * and poll driven output.\n *\n * License: Do with this code what you want. However, don't blame\n * me if you connect it to a heart pump and it stops. This source\n * is provided as is with no warranties. It probably has bugs!!\n * You have been warned!\n *\n * Author: Rick Kimball\n * email: [email protected]\n * Version: 1.00 Initial version 05-12-2011\n */\n\ntemplate<typename T_STORAGE>\nstruct Serial {\n T_STORAGE &_recv_buffer;\n\n /**\n * init - setup the USCI UART hardware for 9600-8-N-1\n * P1.1 = RX PIN, P1.2 = TX PIN\n */\n inline void init() {\n P1SEL = BIT1 + BIT2; // P1.1=RXD, P1.2=TXD\n P1SEL2 = BIT1 + BIT2; // P1.1=RXD, P1.2=TXD\n\n UCA0CTL1 |= UCSSEL_2; // use SMCLK for USCI clock\n UCA0BR0 = 130; // 16MHz 9600\n UCA0BR1 = 6; // 16MHz 9600\n UCA0MCTL = UCBRS1 + UCBRS0; // Modulation UCBRSx = 3\n UCA0CTL1 &= ~UCSWRST; // **Initialize USCI state machine**\n IE2 |= UCA0RXIE; // Enable USCI0RX_ISR interrupt\n }\n\n inline bool empty() {\n return _recv_buffer.empty();\n }\n\n inline int recv() {\n while(empty());\n return _recv_buffer.pop_front();\n }\n\n void xmit(uint8_t c) {\n while (!(IFG2 & UCA0TXIFG))\n ; // USCI_A0 TX buffer ready?\n\n UCA0TXBUF = (uint8_t) c; // TX -> RXed character\n }\n\n void xmit(const char *s) {\n while (*s) {\n xmit((uint8_t) *s);\n ++s;\n }\n }\n\n};\n\n#endif /* HW_SERIAL_H */\n" }, { "alpha_fraction": 0.6988155841827393, "alphanum_fraction": 0.700507640838623, "avg_line_length": 28.549999237060547, "blob_id": "e7e93ddd733899ee10bc0446205b52d65a7b3322", "content_id": "b619d97973a177b382b12626bb86776b21b02086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 591, "license_type": "no_license", "max_line_length": 95, "num_lines": 20, "path": "/firmware/debug.h", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#ifndef DEBUG_H\n#define DEBUG_H\n\n/*! @file debug.h\n * Provides various debugging functions to make programming easier\n * @param DEBUG flag, can either be defined inside here or in the makefile\n * @note All printing functions will automatically include \\\\n after them and fflush the stream\n * @code #ifdef DEBUG\n Code that will only get executed while debugging\n #endif\n */\n\n///Prints debug info using printf(to stdout) if DEBUG is defined\n#ifdef DEBUG\n\t#define debug(...) printf(dbg,__VA_ARGS__); printf(\"\\n\"); fflush(stdout);\n#else\n\t#define debug(...) (1);\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.5865026116371155, "alphanum_fraction": 0.6319910287857056, "avg_line_length": 16.19230842590332, "blob_id": "cb648acb7d1dacac2ba183160fd303648aeb7f66", "content_id": "5d1525bb102a3b7b692261a69e7f6a7960078773", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2682, "license_type": "no_license", "max_line_length": 67, "num_lines": 156, "path": "/firmware/main.c", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#include <msp430.h>\n\n#include \"bitop.h\"\n#include \"debug.h\"\n\n#include <stdint.h>\n#include \"config.h\"\n#include \"ringbuffer.h\"\n#include \"usci_serial.h\"\n\n#define ALIGN 0\n#define STOP 1\n#define PLAY 2\n#define INSTR 3\n#define TOGGLE_DIR 4\n#define RESET 5\n\n#define INSTR_OSCILATE 0\n#define INSTR_VIOLIN 1\nunsigned char instr=INSTR_OSCILATE;\n\n#define POS_START 0\n#define POS_END 160\n#define POS_THRESHOLD 20\nunsigned int pos=0;\nunsigned char dir=0;\n\n#define PERIOD TACCR0\n\nringbuffer_ui8_16 usci_buffer = { 0, 0, { 0 } };\nSerial<ringbuffer_ui8_16> usci0 = { usci_buffer };\nvoid __attribute__((interrupt (USCIAB0RX_VECTOR))) USCI0RX_ISR() {\n\tusci_buffer.push_back(UCA0RXBUF);\n}\n\nvoid __delay_ms(unsigned int ms) {\n\tfor(;ms!=0;ms--) {\n\t\t__delay_cycles(16000);\n\t}\n}\n\nvoid chip_init(void) {\n\tWDTCTL = WDTPW + WDTHOLD; // Stop watchdog timer\n\tDCOCTL = CALDCO_16MHZ; // Load the clock calibration\n\tBCSCTL1 = CALBC1_16MHZ;\n}\n\nvoid io_init(void) {\n\t#define LED_R 0\n\t#define LED_G 6\n\t#define RUN 4\n\t#define DIR 5\n\t#define SW 3\n\t\n\tP1DIR=0b01110001;\n\tP1OUT=0b00000000;\n}\n\nvoid timer_init(unsigned int on) {\n\t//internal osc, 8xprescaler, up mode\n\tTACTL = TASSEL_2 + ID_3 + MC_1*(on!=0);\n\n\t//Enable Interrupts\n\tCCTL0 |= CCIE; //Timer\n\t_BIS_SR(GIE); //Global\n}\n\nvoid toggle_dir(void) {\n\tdir^=1;\n\ttoggle_bit(P1OUT,LED_R);\n}\n\nvoid floppy_init(unsigned int target_pos) {\n\ttimer_init(0);\n\t\n\t//move to the bottom\n\tset_bit(P1OUT,DIR);\n\tfor(unsigned char i=0;i<200;i++) {\n\t\ttoggle_bit(P1OUT,RUN);\n\t\t__delay_ms(10);\n\t}\n\t\n\t//move to target_pos\n\tclear_bit(P1OUT,DIR);\n\tfor(pos=0;pos<target_pos;pos++) {\n\t\ttoggle_bit(P1OUT,RUN);\n\t\t__delay_ms(10);\n\t}\n}\n\nvoid __attribute__((interrupt (TIMER0_A0_VECTOR))) timer_vector() {\n\ttoggle_bit(P1OUT,RUN);\n\t\n\tif(instr==INSTR_OSCILATE) {\n\t\tif(test_bit(P1OUT,RUN))\n\t\t\ttoggle_bit(P1OUT,DIR);\n\t} else {\n\t\t//instr==INSTR_VIOLIN\n\t\tchange_bit(P1OUT,DIR,dir);\n\t\t\n\t\tif(dir==0) {\n\t\t\tpos++;\n\t\t\tif(pos>(POS_END-POS_THRESHOLD)) toggle_dir();\n\t\t} else {\n\t\t\tpos--;\n\t\t\tif(pos<(POS_START+POS_THRESHOLD)) toggle_dir();\n\t\t}\n\t}\n}\n\nint main(void) {\n\tchip_init();\n\tio_init();\n\tPERIOD=0xFFFF;\n\t\n\tusci0.init();\n\t\n\tfloppy_init(POS_END/2);\n\tset_bit(P1OUT,LED_G);\n\t\n\twhile(1) {\n\t\tunsigned char d0, d1;\n\t\tunsigned char type = usci0.recv();\n\t\tswitch(type) {\n\t\t\tcase STOP:\n\t\t\t\ttimer_init(0);\n\t\t\t\tclear_bit(P1OUT,LED_G);\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tcase PLAY:\n\t\t\t\td0=usci0.recv();\n\t\t\t\td1=usci0.recv();\n\t\t\t\tPERIOD=(d0<<8)|d1;\n\t\t\t\ttimer_init(1);\n\t\t\t\tset_bit(P1OUT,LED_G);\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tcase INSTR:\n\t\t\t\tinstr=usci0.recv();\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tcase TOGGLE_DIR:\n\t\t\t\ttoggle_dir();\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tcase RESET:\n\t\t\t\tfloppy_init(POS_END/2);\n\t\t\t\tbreak;\n\t\t\t\n\t\t\tdefault:\n\t\t\tcase ALIGN:\n\t\t\t\tbreak;\n\t\t}\n\t\t__delay_ms(10);\n\t}\n}\n" }, { "alpha_fraction": 0.6229116916656494, "alphanum_fraction": 0.6521479487419128, "avg_line_length": 20.62580680847168, "blob_id": "1368034cbc1f785eded9a64a97ab18e52455c187", "content_id": "741cc2a04383bee4d16466a138af0458081cf39d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3352, "license_type": "no_license", "max_line_length": 118, "num_lines": 155, "path": "/interface/floppy.py", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\nfrom notes import *\n\nclass Floppy(object):\n\tFCPU=16000000\n\tTIMERCLOCK=FCPU\n\tTIMERCLOCK/=8 #prescaler\n\tTIMERCLOCK/=2 #still too big\n\t\n\t#Protcol message types\n\t_ALIGN=0\n\t_STOP=1\n\t_PLAY=2\n\t_INSTR=3\n\t_TOGGLE_DIR=4\n\t_RESET=5\n\t\n\tdef __init__(self,port=\"/dev/ttyACM0\",reset=True):\n\t\timport serial\n\t\tself.port=port\n\t\tself.serial=serial.Serial(port=self.port,baudrate=9600,timeout=1)\n\t\tself.serial.flush()\n\t\t\n\t\tif(reset):\n\t\t\tself.reset()\n\t\t\n\t\tself.instrument=0\n\t\n\tdef __repr__(self):\n\t\treturn \"Floppy(%r)\" % (self.port)\n\t\n\tdef sync(self,times=3):\n\t\t\"\"\"Syncs the serial protocol. WARNING: don't spam the line, new bytes are discarded by something for some reason.\"\"\"\n\t\tself.serial.write(bytearray((self._ALIGN,)*times))\n\t\tself.serial.flush()\n\t\tself.serial.write(bytearray((self._ALIGN,)))\n\t\n\tdef reset(self):\n\t\timport time\n\t\t\n\t\tself.sync()\n\t\ttime.sleep(0.1)\n\t\t\n\t\tprint \"Resetting %r\" % (self)\n\t\tself.serial.write(bytearray((self._RESET,)))\n\t\ttime.sleep(3)\n\t\t\n\t\tself.sync()\n\t\n\tdef stop(self):\n\t\tself.serial.write(bytearray((self._STOP,)))\n\t\tself._note=None\n\t\n\tdef play_period(self,period):\n\t\tperiod=int(period)\n\t\tif(period>2**16):\n\t\t\traise ValueError(\"Period(%d) for %r too big!\" % (period,self))\n\t\t\n\t\td0=(period&0xFF00)>>8\n\t\td1=period&0xFF\n\t\tdata=bytearray((self._PLAY,d0,d1))\n\t\tself.serial.write(data)\n\t\n\tdef play(self,note):\n\t\tif (note>128):\n\t\t\traise ValueError(\"%r cannot play that high! %r\" % (self,note))\n\t\t\n\t\tself._note=note\n\t\tself._period=note.period(self.TIMERCLOCK)\n\t\tprint \"%r - %d\" % (note,self._period)\n\t\t\n\t\tif self.instrument==\"violin\":\n\t\t\tself.toggle_dir()\n\t\t\n\t\tself.play_period(self._period)\n\t\n\tdef pitchbend(self,pitch):\n\t\t\"\"\"Pitchbends according to a midi pitchbend value.\"\"\"\n\t\tif pitch==0:\n\t\t\t#apparently this is a no bend somehow...\n\t\t\tpitch=0x4000\n\t\t\n\t\tpitch-=0x4000\n\t\tpitch*=1.0\n\t\tpitch/=0x18000\n\t\tpitch=2**pitch\n\t\t\n\t\tself.play_period(self._period/pitch)\n\t\tprint \"Pitchbending %s*%2.2f\" % (self._note,pitch)\n\t\n\tinstruments=[\"oscillate\",\"violin\"]\n\t\n\t@property\n\tdef instrument(self):\n\t\treturn self.instruments[self._instrument]\n\t\t\n\[email protected]\n\tdef instrument(self,value):\n\t\ttry:\n\t\t\tvalue=self.instruments.index(value)\n\t\texcept ValueError:\n\t\t\tif value not in range(len(self.instruments)):\n\t\t\t\traise ValueError(\"No such instrument(%r) exists.\" % value)\n\t\t\n\t\ttry:\n\t\t\tif self._instrument!=value:\n\t\t\t\tprint \"Setting instrument to %s.\" % (self.instrument)\n\t\texcept:\n\t\t\tpass\n\t\tfinally:\n\t\t\tself._instrument=value\n\t\t\n\t\tself.serial.write(bytearray((self._INSTR,self._instrument)))\n\t\n\tdef toggle_dir(self):\n\t\tself.serial.write(bytearray((self._TOGGLE_DIR,)))\n\t\n\tdef __enter__(self):\n\t\treturn self\n\t\n\tdef __exit__(self,type,value,traceback):\n\t\tprint \"Stopping %r\" % (self)\n\t\ttime.sleep(0.5)\n\t\tself.sync(6)\n\t\ttime.sleep(0.1)\n\t\tself.stop()\n\t\tself.serial.flush()\n\nif __name__==\"__main__\":\n\tPITCH_DEMO=False\n\t\n\timport time,sys\n\twith Floppy(port=sys.argv[1],reset=False) as floppy:\n\t\twhile 1:\n\t\t\tfor noteid in range(0,128):\n\t\t\t\ttry:\n\t\t\t\t\t#play next note\n\t\t\t\t\tnote=Note(noteid)\n\t\t\t\t\tfloppy.play(note)\n\t\t\t\t\t\n\t\t\t\t\t#wait for key\n\t\t\t\t\traw_input()\n\t\t\t\t\t\n\t\t\t\t\tif PITCH_DEMO:\n\t\t\t\t\t\tfor pitch in range(0x4000,0x6000,0x100):\n\t\t\t\t\t\t\tfloppy.pitchbend(pitch)\n\t\t\t\t\t\t\ttime.sleep(0.01)\n\t\t\t\t\t\ttime.sleep(0.2)\n\t\t\t\t\t\tfloppy.sync()\n\t\t\t\t\telse:\n\t\t\t\t\t\tfloppy.stop()\n\t\t\t\t\t\ttime.sleep(0.05)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint repr(e)\n\t\t\tfloppy.sync()\n" }, { "alpha_fraction": 0.7229008674621582, "alphanum_fraction": 0.7308087944984436, "avg_line_length": 35.255245208740234, "blob_id": "65fb2caa1a3aebd294d57ea3ff2ebf28ce99d820", "content_id": "ff3c4496051885fc77e3e0f4fe9344aa84ddcf49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31108, "license_type": "no_license", "max_line_length": 140, "num_lines": 858, "path": "/interface/jacklib.py", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Imports\nfrom ctypes import *\nfrom sys import platform, version_info\n\n# Test for python 3.x\nif (version_info >= (3,0)):\n PYTHON3 = True\nelse:\n PYTHON3 = False\n\n# Load JACK shared library\ntry:\n if (platform == 'win32' or platform == 'win64'):\n jacklib = cdll.LoadLibrary(\"libjack.dll\")\n else:\n jacklib = cdll.LoadLibrary(\"libjack.so.0\")\nexcept:\n jacklib = None\n\n# Defines\nMAX_FRAMES = 4294967295\nLOAD_INIT_LIMIT = 1024\n\nDEFAULT_AUDIO_TYPE = \"32 bit float mono audio\"\nDEFAULT_MIDI_TYPE = \"8 bit raw midi\"\n\n# Jack Options\nNullOption = 0x00\nNoStartServer = 0x01\nUseExactName = 0x02\nServerName = 0x04\nLoadName = 0x08\nLoadInit = 0x10\nSessionID = 0x20\nOpenOptions = SessionID|ServerName|NoStartServer|UseExactName\nLoadOptions = LoadInit|LoadName|UseExactName\n\n# Jack Status\nFailure = 0x01\nInvalidOption = 0x02\nNameNotUnique = 0x04\nServerStarted = 0x08\nServerFailed = 0x10\nServerError = 0x20\nNoSuchClient = 0x40\nLoadFailure = 0x80\nInitFailure = 0x100\nShmFailure = 0x200\nVersionError = 0x400\nBackendError = 0x800\nClientZombie = 0x1000\n\n# Jack Latency Callback Mode\nCaptureLatency = 0 # FIXME\nPlaybackLatency = 1\n\n# Jack Port Flags\nPortIsInput = 0x1\nPortIsOutput = 0x2\nPortIsPhysical = 0x4\nPortCanMonitor = 0x8\nPortIsTerminal = 0x10\n\n# Transport states\nTransportStopped = 0\nTransportRolling = 1\nTransportLooping = 2\nTransportStarting = 3\nTransportNetStarting = 4\n\n# Optional struct jack_position_t fields\nPositionBBT = 0x10\nPositionTimecode = 0x20\nBBTFrameOffset = 0x40\nAudioVideoRatio = 0x80\nVideoFrameOffset = 0x100\nPOSITION_MASK = PositionBBT|PositionTimecode\n\n# Optional struct jack_position_bits_t/jack_transport_info_t fields\nTransportState = 0x1\nTransportPosition = 0x2\nTransportLoop = 0x4\nTransportSMPTE = 0x8\nTransportBBT = 0x10\n\n# ? Not in the API:\nAUDIO = 0\nMIDI = 1\n\n\n# Types\njack_shmsize_t = c_int32\njack_nframes_t = c_uint32\njack_time_t = c_uint64\njack_intclient_t = c_uint64\njack_port_t = c_void_p #_jack_port\njack_client_t = c_void_p #_jack_client\njack_port_id_t = c_uint32\njack_port_type_id_t = c_uint32\njack_default_audio_sample_t = c_float\njack_unique_t = c_uint64\njack_native_thread_t = c_long #HANDLE/pthread_t\njack_midi_data_t = c_char\n\n\n# TODO - Enums\njack_options_t = c_int\njack_status_t = c_int\njack_latency_callback_mode_t = c_int\njack_transport_state_t = c_int\njack_position_bits_t = c_int\njack_transport_bits_t = c_int\n\n\n# Structs\nclass jack_latency_range_t(Structure):\n _fields_ = [\n (\"min\", jack_nframes_t),\n (\"max\", jack_nframes_t)\n ]\n#jack_latency_range_t = _jack_latency_range_t()\n\nclass jack_position_t(Structure):\n _fields_ = [\n (\"unique_1\", jack_unique_t),\n (\"usecs\", jack_time_t),\n (\"frame_rate\", jack_nframes_t),\n (\"frame\", jack_nframes_t),\n (\"valid\", jack_position_bits_t),\n (\"bar\", c_int32),\n (\"beat\", c_int32),\n (\"tick\", c_int32),\n (\"bar_start_tick\", c_double),\n (\"beats_per_bar\", c_float),\n (\"beat_type\", c_float),\n (\"ticks_per_beat\", c_double),\n (\"beats_per_minute\", c_double),\n (\"frame_time\", c_double),\n (\"next_time\", c_double),\n (\"bbt_offset\", jack_nframes_t),\n (\"audio_frames_per_video_frame\", c_float),\n (\"video_offset\", jack_nframes_t),\n (\"padding\", ARRAY(c_int32, 7)),\n (\"unique_2\", jack_unique_t)\n ]\n#jack_position_t = _jack_position_t()\n\nclass jack_transport_info_t(Structure):\n _fields_ = [\n (\"frame_rate\", jack_nframes_t),\n (\"usecs\", jack_time_t),\n (\"valid\", jack_transport_bits_t),\n (\"transport_state\", jack_transport_state_t),\n (\"frame\", jack_nframes_t),\n (\"loop_start\", jack_nframes_t),\n (\"loop_end\", jack_nframes_t),\n (\"smpte_offset\", c_long),\n (\"smpte_frame_rate\", c_float),\n (\"bar\", c_int),\n (\"beat\", c_int),\n (\"tick\", c_int),\n (\"bar_start_tick\", c_double),\n (\"beats_per_bar\", c_float),\n (\"beat_type\", c_float),\n (\"ticks_per_beat\", c_double),\n (\"beats_per_minute\", c_double),\n ]\n#jack_transport_info_t = _jack_transport_info_t()\n\nclass jack_midi_event_t(Structure):\n _fields_ = [\n (\"time\", jack_nframes_t),\n (\"size\", c_size_t),\n (\"buffer\", c_char_p), #POINTER(jack_midi_data_t)\n ]\n#jack_midi_event_t = _jack_midi_event_t()\n\n\n# Special Callback defines\nglobal LatencyCallback, ProcessCallback, ThreadCallback, ThreadInitCallback, GraphOrderCallback, XRunCallback\nglobal BufferSizeCallback, SampleRateCallback, PortRegistrationCallback, ClientRegistrationCallback, PortConnectCallback, PortRenameCallback\nglobal FreewheelCallback, ShutdownCallback, InfoShutdownCallback, SyncCallback, TimebaseCallback, SessionCallback\n\n\n# Internal C char** -> Python list conversion\ndef __pointer_to_list(list_p):\n final_list = []\n i = 0\n\n if (not list_p):\n return final_list\n\n while (True):\n new_char_p = list_p[i]\n if (new_char_p):\n final_list.append(str(new_char_p))\n else:\n break\n i += 1\n\n jack_free(list_p)\n return final_list\n\n# External helper funtions\ndef translate_audio_port_buffer(void_p):\n return cast(void_p, POINTER(jack_default_audio_sample_t))\n\ndef translate_midi_event_buffer(void_p):\n if (not void_p):\n return list()\n\n if (len(void_p) == 1):\n if (PYTHON3):\n return (int(void_p),)\n else:\n return (ord(void_p),)\n\n elif (len(void_p) == 2):\n if (PYTHON3):\n return (int(void_p[0]), int(void_p[1]))\n else:\n return (ord(void_p[0]), ord(void_p[1]))\n\n elif (len(void_p) == 3):\n if (PYTHON3):\n return (int(void_p[0]), int(void_p[1]), int(void_p[2]))\n else:\n return (ord(void_p[0]), ord(void_p[1]), ord(void_p[2]))\n\n elif (len(void_p) == 4):\n if (PYTHON3):\n return (int(void_p[0]), int(void_p[1]), int(void_p[2]), int(void_p[3]))\n else:\n return (ord(void_p[0]), ord(void_p[1]), ord(void_p[2]), ord(void_p[3]))\n\n else:\n return list()\n\ndef encode_midi_data(d1, d2=None, d3=None, d4=None):\n if (d2 == None):\n return \"%s\" % (chr(d1))\n elif (d3 == None):\n return \"%s%s\" % (chr(d1), chr(d2))\n elif (d4 == None):\n return \"%s%s%s\" % (chr(d1), chr(d2), chr(d3))\n else:\n return \"%s%s%s%s\" % (chr(d1), chr(d2), chr(d3), chr(d4))\n\n\n# Functions\n\ndef get_version(): # FIXME - does not work!\n major_ptr = c_int(0)\n minor_ptr = c_int(0)\n micro_ptr = c_int(0)\n proto_ptr = c_int(0)\n jacklib.jack_get_version.argtypes = [POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)]\n jacklib.jack_get_version.restype = None\n jacklib.jack_get_version(pointer(major_ptr), pointer(minor_ptr), pointer(micro_ptr), pointer(proto_ptr))\n return (major_ptr.value, minor_ptr.value, micro_ptr.value, proto_ptr.value)\n\ndef get_version_string():\n jacklib.jack_get_version_string.argtypes = None\n jacklib.jack_get_version_string.restype = c_char_p\n return jacklib.jack_get_version_string()\n\ndef client_open(client_name, options, status):\n if (PYTHON3): client_name = client_name.encode()\n if (options == None): options = 0\n if (status == None): status = 0\n jacklib.jack_client_open.argtypes = [c_char_p, c_int, c_int]\n jacklib.jack_client_open.restype = jack_client_t\n return jacklib.jack_client_open(client_name, options, status)\n\ndef client_new(client_name):\n if (PYTHON3): client_name = client_name.encode()\n jacklib.jack_client_new.argtypes = [c_char_p]\n jacklib.jack_client_new.restype = jack_client_t\n return jacklib.jack_client_new(client_name)\n\ndef client_close(client):\n jacklib.jack_client_close.argtypes = [jack_client_t]\n jacklib.jack_client_close.restype = c_int\n return jacklib.jack_client_close(client)\n\ndef client_name_size():\n jacklib.jack_client_name_size.argtypes = None\n jacklib.jack_client_name_size.restype = c_int\n return jacklib.jack_client_name_size()\n\ndef get_client_name(client):\n jacklib.jack_get_client_name.argtypes = [jack_client_t]\n jacklib.jack_get_client_name.restype = c_char_p\n return jacklib.jack_get_client_name(client)\n\ndef internal_client_new(client_name, load_name, load_init):\n if (PYTHON3): client_name = client_name.encode()\n if (PYTHON3): load_name = load_name.encode()\n if (PYTHON3): load_init = load_init.encode()\n jacklib.jack_internal_client_new.argtypes = [c_char_p, c_char_p, c_char_p]\n jacklib.jack_internal_client_new.restype = c_int\n return jacklib.jack_internal_client_new(client_name, load_name, load_init)\n\ndef internal_client_close(client_name):\n if (PYTHON3): client_name = client_name.encode()\n jacklib.jack_internal_client_close.argtypes = [c_char_p]\n jacklib.jack_internal_client_close.restype = None\n return jacklib.jack_internal_client_close(client_name)\n\ndef activate(client):\n jacklib.jack_activate.argtypes = [jack_client_t]\n jacklib.jack_activate.restype = c_int\n return jacklib.jack_activate(client)\n\ndef deactivate(client):\n jacklib.jack_deactivate.argtypes = [jack_client_t]\n jacklib.jack_deactivate.restype = c_int\n return jacklib.jack_deactivate(client)\n\ndef get_client_pid(name):\n if (PYTHON3): name = name.encode()\n jacklib.jack_get_client_pid.argtypes = [c_char_p]\n jacklib.jack_get_client_pid.restype = c_int\n return jacklib.jack_get_client_pid(name)\n\ndef client_thread_id(client):\n jacklib.jack_client_thread_id.argtypes = [jack_client_t]\n jacklib.jack_client_thread_id.restype = jack_native_thread_t\n return jacklib.jack_client_thread_id(client)\n\ndef is_realtime(client):\n jacklib.jack_is_realtime.argtypes = [jack_client_t]\n jacklib.jack_is_realtime.restype = c_int\n return jacklib.jack_is_realtime(client)\n\n\n# Non Callback API\n\ndef thread_wait(client):\n jacklib.jack_thread_wait.argtypes = [jack_client_t, c_int]\n jacklib.jack_thread_wait.restype = jack_nframes_t\n return jacklib.jack_thread_wait(client, status)\n\ndef cycle_wait(client):\n jacklib.jack_cycle_wait.argtypes = jack_client_t\n jacklib.jack_cycle_wait.restype = jack_nframes_t\n return jacklib.jack_cycle_wait(client)\n\ndef cycle_signal(client, status):\n jacklib.jack_cycle_signal.argtypes = [jack_client_t, status]\n jacklib.jack_cycle_signal.restype = None\n return jacklib.jack_cycle_signal(client, status)\n\ndef set_process_thread(client, thread_callback, arg=None):\n global ThreadCallback\n ThreadCallback = CFUNCTYPE(c_int, c_void_p)(thread_callback)\n jacklib.jack_set_process_thread.restype = c_int\n return jacklib.jack_set_process_thread(client)\n\n\n# Client Callbacks\n\ndef set_thread_init_callback(client, thread_init_callback, arg=None):\n global ThreadInitCallback\n ThreadInitCallback = CFUNCTYPE(c_int, c_void_p)(thread_init_callback)\n jacklib.jack_set_thread_init_callback.restype = c_int\n return jacklib.jack_set_thread_init_callback(client, ThreadInitCallback, arg)\n\ndef on_shutdown(client, shutdown_callback, arg=None):\n global ShutdownCallback\n ShutdownCallback = CFUNCTYPE(c_int, c_void_p)(shutdown_callback)\n jacklib.jack_on_shutdown(client, ShutdownCallback, arg)\n\ndef on_info_shutdown(client, shutdown_callback, arg=None):\n global InfoShutdownCallback\n InfoShutdownCallback = CFUNCTYPE(c_int, c_int, c_char_p, c_void_p)(shutdown_callback)\n jacklib.jack_on_info_shutdown(client, InfoShutdownCallback, arg)\n\ndef set_process_callback(client, process_callback, arg=None):\n global ProcessCallback\n ProcessCallback = CFUNCTYPE(c_int, c_int, c_void_p)(process_callback)\n jacklib.jack_set_process_callback.restype = c_int\n return jacklib.jack_set_process_callback(client, ProcessCallback, arg)\n\ndef set_freewheel_callback(client, freewheel_callback, arg=None):\n global FreewheelCallback\n FreewheelCallback = CFUNCTYPE(c_int, c_int, c_void_p)(freewheel_callback)\n jacklib.jack_set_freewheel_callback.restype = c_int\n return jacklib.jack_set_freewheel_callback(client, FreewheelCallback, arg)\n\ndef set_buffer_size_callback(client, buffer_size_callback, arg=None):\n global BufferSizeCallback\n BufferSizeCallback = CFUNCTYPE(c_int, c_int, c_void_p)(buffer_size_callback)\n jacklib.jack_set_buffer_size_callback.restype = c_int\n return jacklib.jack_set_buffer_size_callback(client, BufferSizeCallback, arg)\n\ndef set_sample_rate_callback(client, srate_callback, arg=None):\n global SampleRateCallback\n SampleRateCallback = CFUNCTYPE(c_int, c_int, c_void_p)(srate_callback)\n jacklib.jack_set_sample_rate_callback.restype = c_int\n return jacklib.jack_set_sample_rate_callback(client, SampleRateCallback, arg)\n\ndef set_client_registration_callback(client, registration_callback, arg=None):\n global ClientRegistrationCallback\n ClientRegistrationCallback = CFUNCTYPE(c_int, c_char_p, c_int, c_void_p)(registration_callback)\n jacklib.jack_set_client_registration_callback.restype = c_int\n return jacklib.jack_set_client_registration_callback(client, ClientRegistrationCallback, arg)\n\ndef set_port_registration_callback(client, registration_callback, arg=None):\n global PortRegistrationCallback\n PortRegistrationCallback = CFUNCTYPE(c_int, c_int, c_int, c_void_p)(registration_callback)\n jacklib.jack_set_port_registration_callback.restype = c_int\n return jacklib.jack_set_port_registration_callback(client, PortRegistrationCallback, arg)\n\ndef set_port_connect_callback(client, connect_callback, arg=None):\n global PortConnectCallback\n PortConnectCallback = CFUNCTYPE(c_int, c_int, c_int, c_int, c_void_p)(connect_callback)\n jacklib.jack_set_port_connect_callback.restype = c_int\n return jacklib.jack_set_port_connect_callback(client, PortConnectCallback, arg)\n\ndef set_port_rename_callback(client, rename_callback, arg=None):\n global PortRenameCallback\n PortRenameCallback = CFUNCTYPE(c_int, c_int, c_char_p, c_char_p, c_void_p)(rename_callback)\n jacklib.jack_set_port_rename_callback.restype = c_int\n return jacklib.jack_set_port_rename_callback(client, PortRenameCallback, arg)\n\ndef set_graph_order_callback(client, graph_callback, arg=None):\n global GraphOrderCallback\n GraphOrderCallback = CFUNCTYPE(c_int, c_void_p)(graph_callback)\n jacklib.jack_set_graph_order_callback.restype = c_int\n return jacklib.jack_set_graph_order_callback(client, GraphOrderCallback, arg)\n\ndef set_xrun_callback(client, xrun_callback, arg=None):\n global XRunCallback\n XRunCallback = CFUNCTYPE(c_int, c_void_p)(xrun_callback)\n jacklib.jack_set_xrun_callback.restype = c_int\n return jacklib.jack_set_xrun_callback(client, XRunCallback, arg)\n\ndef set_latency_callback(client, latency_callback, arg=None):\n global LatencyCallback\n LatencyCallback = CFUNCTYPE(c_int, c_void_p)(latency_callback)\n jacklib.jack_set_latency_callback.restype = c_int\n return jacklib.jack_set_latency_callback(client, LatencyCallback, arg)\n\n\n# Server Client Control\n\ndef set_freewheel(client, onoff):\n jacklib.jack_set_freewheel.argtypes = [jack_client_t, c_int]\n jacklib.jack_set_freewheel.restype = c_int\n return jacklib.jack_set_freewheel(client, onoff)\n\ndef set_buffer_size(client, nframes):\n jacklib.jack_set_buffer_size.argtypes = [jack_client_t, jack_nframes_t]\n jacklib.jack_set_buffer_size.restype = c_int\n return jacklib.jack_set_buffer_size(client, nframes)\n\ndef get_sample_rate(client):\n jacklib.jack_get_sample_rate.argtypes = [jack_client_t]\n jacklib.jack_get_sample_rate.restype = jack_nframes_t\n return jacklib.jack_get_sample_rate(client)\n\ndef get_buffer_size(client):\n jacklib.jack_get_buffer_size.argtypes = [jack_client_t]\n jacklib.jack_get_buffer_size.restype = jack_nframes_t\n return jacklib.jack_get_buffer_size(client)\n\ndef engine_takeover_timebase(client):\n jacklib.jack_engine_takeover_timebase.argtypes = [jack_client_t]\n jacklib.jack_engine_takeover_timebase.restype = c_int\n return jacklib.jack_engine_takeover_timebase(client)\n\ndef cpu_load(client):\n jacklib.jack_cpu_load.argtypes = [jack_client_t]\n jacklib.jack_cpu_load.restype = c_float\n return jacklib.jack_cpu_load(client)\n\n\n# Port Functions\n\ndef port_register(client, port_name, port_type, flags, buffer_size):\n if (PYTHON3): port_name = port_name.encode()\n if (PYTHON3): port_type = port_type.encode()\n jacklib.jack_port_register.argtypes = [jack_client_t, c_char_p, c_char_p, c_ulong, c_ulong]\n jacklib.jack_port_register.restype = jack_port_t\n return jacklib.jack_port_register(client, port_name, port_type, flags, buffer_size)\n\ndef port_unregister(client, port):\n jacklib.jack_port_unregister.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_port_unregister.restype = c_int\n return jacklib.jack_port_unregister(client, port)\n\ndef port_get_buffer(port, nframes):\n jacklib.jack_port_get_buffer.argtypes = [jack_port_t, jack_nframes_t]\n jacklib.jack_port_get_buffer.restype = c_void_p\n return jacklib.jack_port_get_buffer(port, nframes)\n\ndef port_name(port):\n jacklib.jack_port_name.argtypes = [jack_port_t]\n jacklib.jack_port_name.restype = c_char_p\n return jacklib.jack_port_name(port)\n\ndef port_short_name(port):\n jacklib.jack_port_short_name.argtypes = [jack_port_t]\n jacklib.jack_port_short_name.restype = c_char_p\n return jacklib.jack_port_short_name(port)\n\ndef port_flags(port):\n jacklib.jack_port_flags.argtypes = [jack_port_t]\n jacklib.jack_port_flags.restype = c_int\n return jacklib.jack_port_flags(port)\n\ndef port_type(port):\n jacklib.jack_port_type.argtypes = [jack_port_t]\n jacklib.jack_port_type.restype = c_char_p\n return jacklib.jack_port_type(port)\n\ndef port_type_id(port):\n jacklib.jack_port_type_id.argtypes = [jack_port_t]\n jacklib.jack_port_type_id.restype = jack_port_type_id_t\n return jacklib.jack_port_type_id(port)\n\ndef port_is_mine(client, port):\n jacklib.jack_port_is_mine.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_port_is_mine.restype = c_int\n return jacklib.jack_port_is_mine(client, port)\n\ndef port_connected(port):\n jacklib.jack_port_connected.argtypes = [jack_port_t]\n jacklib.jack_port_connected.restype = c_int\n return jacklib.jack_port_connected(port)\n\ndef port_connected_to(port, port_name):\n if (PYTHON3): port_name = port_name.encode()\n jacklib.jack_port_connected_to.argtypes = [jack_port_t, c_char_p]\n jacklib.jack_port_connected_to.restype = c_int\n return jacklib.jack_port_connected_to(port, port_name)\n\ndef port_get_connections(port):\n jacklib.jack_port_get_connections.argtypes = [jack_port_t]\n jacklib.jack_port_get_connections.restype = POINTER(c_char_p)\n list_p = jacklib.jack_port_get_connections(port)\n return __pointer_to_list(list_p)\n\ndef port_get_all_connections(client, port):\n jacklib.jack_port_get_all_connections.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_port_get_all_connections.restype = POINTER(c_char_p)\n list_p = jacklib.jack_port_get_all_connections(client, port)\n return __pointer_to_list(list_p)\n\ndef port_tie(src, dst):\n jacklib.jack_port_tie.argtypes = [jack_port_t, jack_port_t]\n jacklib.jack_port_tie.restype = c_int\n return jacklib.jack_port_tie(src, dst)\n\ndef port_untie(port):\n jacklib.jack_port_untie.argtypes = [jack_port_t]\n jacklib.jack_port_untie.restype = c_int\n return jacklib.jack_port_untie(port)\n\ndef port_set_name(port, port_name):\n if (PYTHON3): port_name = port_name.encode()\n jacklib.jack_port_set_name.argtypes = [jack_port_t, c_char_p]\n jacklib.jack_port_set_name.restype = c_int\n return jacklib.jack_port_set_name(port, port_name)\n\ndef port_set_alias(port, alias):\n if (PYTHON3): alias = alias.encode()\n jacklib.jack_port_set_alias.argtypes = [jack_port_t, c_char_p]\n jacklib.jack_port_set_alias.restype = c_int\n return jacklib.jack_port_set_alias(port, alias)\n\ndef port_unset_alias(port, alias):\n if (PYTHON3): alias = alias.encode()\n jacklib.jack_port_unset_alias.argtypes = [jack_port_t, c_char_p]\n jacklib.jack_port_unset_alias.restype = c_int\n return jacklib.jack_port_unset_alias(port, alias)\n\ndef port_get_aliases(port):\n # NOTE - this function has no 2nd argument in jacklib\n # Instead, aliases will be passed in return value, in form of (int ret, str alias1, str alias2)\n name_size = port_name_size()\n alias_type = c_char_p*2\n aliases = alias_type(\" \"*name_size, \" \"*name_size)\n\n jacklib.jack_port_get_aliases.argtypes = [jack_port_t, POINTER(ARRAY(c_char_p, 2))]\n jacklib.jack_port_get_aliases.restype = c_int\n\n ret = jacklib.jack_port_get_aliases(port, pointer(aliases))\n return (ret, aliases[0], aliases[1])\n\ndef port_request_monitor(port, onoff):\n jacklib.jack_port_request_monitor.argtypes = [jack_port_t, c_int]\n jacklib.jack_port_request_monitor.restype = c_int\n return jacklib.jack_port_request_monitor(port, onoff)\n\ndef port_request_monitor_by_name(client, port_name, onoff):\n if (PYTHON3): port_name = port_name.encode()\n jacklib.jack_port_request_monitor_by_name.argtypes = [jack_client_t, c_char_p, c_int]\n jacklib.jack_port_request_monitor_by_name.restype = c_int\n return jacklib.jack_port_request_monitor_by_name(client, port_name, onoff)\n\ndef port_ensure_monitor(port, onoff):\n jacklib.jack_port_ensure_monitor.argtypes = [jack_port_t, c_int]\n jacklib.jack_port_ensure_monitor.restype = c_int\n return jacklib.jack_port_ensure_monitor(port, onoff)\n\ndef port_monitoring_input(port):\n jacklib.jack_port_monitoring_input.argtypes = [jack_port_t]\n jacklib.jack_port_monitoring_input.restype = c_int\n return jacklib.jack_port_monitoring_input(port)\n\ndef connect(client, source_port, destination_port):\n if (PYTHON3): source_port = source_port.encode()\n if (PYTHON3): destination_port = destination_port.encode()\n jacklib.jack_connect.argtypes = [jack_client_t, c_char_p, c_char_p]\n jacklib.jack_connect.restype = c_int\n return jacklib.jack_connect(client, source_port, destination_port)\n\ndef disconnect(client, source_port, destination_port):\n if (PYTHON3): source_port = source_port.encode()\n if (PYTHON3): destination_port = destination_port.encode()\n jacklib.jack_disconnect.argtypes = [jack_client_t, c_char_p, c_char_p]\n jacklib.jack_disconnect.restype = c_int\n return jacklib.jack_disconnect(client, source_port, destination_port)\n\ndef port_disconnect(client, port):\n jacklib.jack_port_disconnect.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_port_disconnect.restype = c_int\n return jacklib.jack_port_disconnect(client, port)\n\ndef port_name_size():\n jacklib.jack_port_name_size.argtypes = None\n jacklib.jack_port_name_size.restype = c_int\n return jacklib.jack_port_name_size()\n\ndef port_type_size():\n jacklib.jack_port_type_size.argtypes = None\n jacklib.jack_port_type_size.restype = c_int\n return jacklib.jack_port_type_size()\n\ndef port_type_get_buffer_size(client, port_type):\n if (PYTHON3): port_type = port_type.encode()\n jacklib.jack_port_type_get_buffer_size.argtypes = [jack_client_t, c_char_p]\n jacklib.jack_port_type_get_buffer_size.restype = c_size_t\n return jacklib.jack_port_type_get_buffer_size(client, port_type)\n\n\n# Latency Functions\n\ndef port_set_latency(port, nframes):\n jacklib.jack_port_set_latency.argtypes = [jack_port_t, jack_nframes_t]\n jacklib.jack_port_set_latency.restype = None\n jacklib.jack_port_set_latency(port, nframes)\n\ndef port_get_latency_range(port, mode, range_):\n jacklib.jack_port_get_latency_range.argtypes = [jack_port_t, jack_latency_callback_mode_t, POINTER(jack_latency_range_t)]\n jacklib.jack_port_get_latency_range.restype = None\n jacklib.jack_port_get_latency_range(port, mode, range_)\n\ndef port_set_latency_range(port, mode, range_):\n jacklib.jack_port_set_latency_range.argtypes = [jack_port_t, jack_latency_callback_mode_t, POINTER(jack_latency_range_t)]\n jacklib.jack_port_set_latency_range.restype = None\n jacklib.jack_port_set_latency_range(port, mode, range_)\n\ndef recompute_total_latencies():\n jacklib.recompute_total_latencies.argtypes = [jack_client_t]\n jacklib.recompute_total_latencies.restype = c_int\n return jacklib.recompute_total_latencies()\n\ndef port_get_latency(port):\n jacklib.jack_port_get_latency.argtypes = [jack_port_t]\n jacklib.jack_port_get_latency.restype = jack_nframes_t\n return jacklib.jack_port_get_latency(port)\n\ndef port_get_total_latency(client, port):\n jacklib.jack_port_get_total_latency.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_port_get_total_latency.restype = jack_nframes_t\n return jacklib.jack_port_get_total_latency(client, port)\n\ndef recompute_total_latency(client, port):\n jacklib.jack_recompute_total_latency.argtypes = [jack_client_t, jack_port_t]\n jacklib.jack_recompute_total_latency.restype = c_int\n return jacklib.jack_recompute_total_latency(client, port)\n\n\n# Port Searching\n\ndef get_ports(client, port_name_pattern, type_name_pattern, flags):\n if (PYTHON3 and port_name_pattern): port_name_pattern = port_name_pattern.encode()\n if (PYTHON3 and type_name_pattern): type_name_pattern = type_name_pattern.encode()\n jacklib.jack_get_ports.argtypes = [jack_client_t, c_char_p, c_char_p, c_ulong]\n jacklib.jack_get_ports.restype = POINTER(c_char_p)\n list_p = jacklib.jack_get_ports(client, port_name_pattern, type_name_pattern, flags)\n return __pointer_to_list(list_p)\n\ndef port_by_name(client, port_name):\n if (PYTHON3): port_name = port_name.encode()\n jacklib.jack_port_by_name.argtypes = [jack_client_t, c_char_p]\n jacklib.jack_port_by_name.restype = jack_port_t\n return jacklib.jack_port_by_name(client, port_name)\n\ndef port_by_id(client, port_id):\n jacklib.jack_port_by_id.argtypes = [jack_client_t, jack_port_id_t]\n jacklib.jack_port_by_id.restype = jack_port_t\n return jacklib.jack_port_by_id(client, port_id)\n\n\n# Time Functions\n\ndef frames_since_cycle_start(client):\n jacklib.jack_frames_since_cycle_start.argtypes = [jack_client_t]\n jacklib.jack_frames_since_cycle_start.restype = jack_nframes_t\n return jacklib.jack_frames_since_cycle_start(client)\n\ndef frame_time(client):\n jacklib.jack_frame_time.argtypes = [jack_client_t]\n jacklib.jack_frame_time.restype = jack_nframes_t\n return jacklib.jack_frame_time(client)\n\ndef last_frame_time(client):\n jacklib.jack_last_frame_time.argtypes = [jack_client_t]\n jacklib.jack_last_frame_time.restype = jack_nframes_t\n return jacklib.jack_last_frame_time(client)\n\ndef frames_to_time(client, nframes):\n jacklib.jack_frames_to_time.argtypes = [jack_client_t, jack_nframes_t]\n jacklib.jack_frames_to_time.restype = jack_time_t\n return jacklib.jack_frames_to_time(client, nframes)\n\ndef time_to_frames(client, time):\n jacklib.jack_time_to_frames.argtypes = [jack_client_t, jack_time_t]\n jacklib.jack_time_to_frames.restype = jack_nframes_t\n return jacklib.jack_time_to_frames(client, time)\n\ndef get_time():\n jacklib.jack_get_time.argtypes = [None]\n jacklib.jack_get_time.restype = jack_time_t\n return jacklib.jack_get_time()\n\n\n# Error Output\n# TODO\n\n\n# Transport\n\ndef release_timebase(client):\n jacklib.jack_release_timebase.argtypes = [jack_client_t]\n jacklib.jack_release_timebase.restype = c_int\n return jacklib.jack_release_timebase(client)\n\ndef set_sync_callback(client, sync_callback, arg=None):\n global SyncCallback\n SyncCallback = CFUNCTYPE(c_int, c_int, POINTER(jack_position_t), c_void_p)(sync_callback)\n jacklib.jack_set_sync_callback.restype = c_int\n return jacklib.jack_set_sync_callback(client, SyncCallback, arg)\n\ndef set_sync_timeout(client, timeout):\n jacklib.jack_set_sync_timeout.argtypes = [jack_client_t, jack_time_t]\n jacklib.jack_set_sync_timeout.restype = c_int\n return jacklib.jack_set_sync_timeout(client, timeout)\n\ndef set_timebase_callback(client, conditional, timebase_callback, arg=None):\n global TimebaseCallback\n TimebaseCallback = CFUNCTYPE(c_int, c_int, c_int, POINTER(jack_position_t), c_int, c_void_p)(sync_callback)\n jacklib.jack_set_timebase_callback.restype = c_int\n return jacklib.jack_set_timebase_callback(client, conditional, TimebaseCallback, arg)\n\ndef transport_locate(client, frame):\n jacklib.jack_transport_locate.argtypes = [jack_client_t, jack_nframes_t]\n jacklib.jack_transport_locate.restype = c_int\n return jacklib.jack_transport_locate(client, frame)\n\ndef transport_query(client, pos=None):\n jacklib.jack_transport_query.restype = c_int\n if (pos != None):\n jacklib.jack_transport_query.argtypes = [jack_client_t, POINTER(jack_position_t)]\n return jacklib.jack_transport_query(client, pointer(pos))\n else:\n return jacklib.jack_transport_query(client, None)\n\ndef get_current_transport_frame(client):\n jacklib.jack_get_current_transport_frame.argtypes = [jack_client_t]\n jacklib.jack_get_current_transport_frame.restype = jack_nframes_t\n return jacklib.jack_get_current_transport_frame(client)\n\ndef transport_reposition(client, pos):\n jacklib.jack_transport_reposition.argtypes = [jack_client_t, POINTER(jack_position_t)]\n jacklib.jack_transport_reposition.restype = c_int\n return jacklib.jack_transport_reposition(client, pointer(pos))\n\ndef transport_start(client):\n jacklib.jack_transport_start.argtypes = [jack_client_t]\n jacklib.jack_transport_start.restype = None\n return jacklib.jack_transport_start(client)\n\ndef transport_stop(client):\n jacklib.jack_transport_stop.argtypes = [jack_client_t]\n jacklib.jack_transport_stop.restype = None\n return jacklib.jack_transport_stop(client)\n\ndef get_transport_info(client, tinfo):\n jacklib.jack_get_transport_info.argtypes = [jack_client_t, POINTER(jack_transport_info_t)]\n jacklib.jack_get_transport_info.restype = None\n return jacklib.jack_get_transport_info(client, pointer(tinfo))\n\ndef set_transport_info(client, tinfo):\n jacklib.jack_set_transport_info.argtypes = [jack_client_t, POINTER(jack_transport_info_t)]\n jacklib.jack_set_transport_info.restype = None\n return jacklib.jack_set_transport_info(client, pointer(tinfo))\n\n\n# MIDI\n\ndef midi_get_event_count(port_buffer):\n jacklib.jack_midi_get_event_count.argtypes = [c_void_p]\n jacklib.jack_midi_get_event_count.restype = jack_nframes_t\n return jacklib.jack_midi_get_event_count(port_buffer)\n\ndef midi_event_get(event, port_buffer, event_index):\n jacklib.jack_midi_event_get.argtypes = [POINTER(jack_midi_event_t), c_void_p, jack_nframes_t]\n jacklib.jack_midi_event_get.restype = c_int\n return jacklib.jack_midi_event_get(event, port_buffer, event_index)\n\ndef midi_clear_buffer(port_buffer):\n jacklib.jack_midi_clear_buffer.argtypes = [c_void_p]\n jacklib.jack_midi_clear_buffer.restype = None\n return jacklib.jack_midi_clear_buffer(port_buffer)\n\ndef midi_max_event_size(port_buffer):\n jacklib.jack_midi_max_event_size.argtypes = [c_void_p]\n jacklib.jack_midi_max_event_size.restype = c_size_t\n return jacklib.jack_midi_max_event_size(port_buffer)\n\ndef midi_event_reserve(port_buffer, time, data_size):\n jacklib.jack_midi_event_reserve.argtypes = [c_void_p, jack_nframes_t, c_size_t]\n jacklib.jack_midi_event_reserve.restype = c_char_p #POINTER(jack_midi_event_t)\n return jacklib.jack_midi_event_reserve(port_buffer, time, data_size)\n\ndef midi_event_write(port_buffer, time, data, data_size):\n jacklib.jack_midi_event_write.argtypes = [c_void_p, jack_nframes_t, c_char_p, c_size_t] #POINTER(jack_midi_event_t)\n jacklib.jack_midi_event_write.restype = c_int\n return jacklib.jack_midi_event_write(port_buffer, time, data, data_size)\n\ndef midi_get_lost_event_count(port_buffer):\n jacklib.jack_midi_get_lost_event_count.argtypes = [c_void_p]\n jacklib.jack_midi_get_lost_event_count.restype = jack_nframes_t\n return jacklib.jack_midi_get_lost_event_count(port_buffer)\n\n\ndef jack_free(ptr):\n jacklib.jack_free.argtypes = [c_void_p]\n jacklib.jack_free.restype = None\n return jacklib.jack_free(ptr)\n\n" }, { "alpha_fraction": 0.6483705043792725, "alphanum_fraction": 0.6723842024803162, "avg_line_length": 22.34000015258789, "blob_id": "348987913f369aa7c6f0c528771ba5fae4f757e0", "content_id": "923f14b35f908f4bdbca0c9f54d6b3fbc9ea0b18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 162, "num_lines": 50, "path": "/interface/notes.py", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\nimport math\n\noctave=\"C C# D D# E F F# G G# A A# B\".split()\n\nclass Note(object):\n\tdef __init__(self,noteid):\n\t\tself.noteid=noteid\n\t\n\tdef __repr__(self):\n\t\treturn \"Note(%s)<%s>\" % (self.noteid, self)\n\t\n\tdef __str__(self):\n\t\treturn \"%s%s\" % (octave[self.noteid%12],self.noteid/12-1)\n\t\n\tdef __add__(self,other):\n\t\ttry:\n\t\t\tothernoteid=other.noteid\n\t\texcept:\n\t\t\tothernoteid=other\n\t\treturn Note(self.noteid+othernoteid)\n\t\n\tdef __cmp__(self,other):\n\t\ttry:\n\t\t\tothernoteid=other.noteid\n\t\texcept:\n\t\t\tothernoteid=other\n\t\treturn cmp(self.noteid,othernoteid)\n\t\n\tdef __sub__(self,other):\n\t\ttry:\n\t\t\tothernoteid=other.noteid\n\t\texcept:\n\t\t\tothernoteid=other\n\t\treturn Note(self.noteid-othernoteid)\n\t\n\t@property\n\tdef frequency(self):\n\t\treturn math.pow(2,(self.noteid-69.0)/12)*440\n\t\n\tdef period(self,ClockFreq=1000000):\n\t\t\"\"\"Returns how many clock cycles for a specific clock frequency. Frequency is automatically divided by 2 because this is for the step, not for the direction.\"\"\"\n\t\tClockFreq/=2.0\n\t\t\n\t\treturn ClockFreq/self.frequency\n\nif __name__==\"__main__\":\n\tfor i in range(128):\n\t\tnote=Note(i)\n\t\tprint \"%d\\t%r\\t%f\\t%d\" % (i,note,note.frequency,note.period())" }, { "alpha_fraction": 0.6590163707733154, "alphanum_fraction": 0.6934426426887512, "avg_line_length": 24.41666603088379, "blob_id": "47c1b7d0e80ef367ab2587776912e9ac04e6f3f6", "content_id": "212802c3f5a29cfaffcb4e049184428239253f01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 610, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/firmware/config.h", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "/**\n * config.h - configure baud rates, MCLK frequency\n *\n * License: Do with this code what you want. However, don't blame\n * me if you connect it to a heart pump and it stops. This source\n * is provided as is with no warranties. It probably has bugs!!\n * You have been warned!\n *\n * Author: Rick Kimball\n * email: [email protected]\n * Version: 1.00 Initial version 05-12-2011\n */\n\n#ifndef CONFIG_H_\n#define CONFIG_H_\n\n#define F_CPU 16000000 // use calibrated 16MHZ clock\n\n#ifdef __MSPGCC__\n#define _enable_interrupts() __bis_status_register(GIE)\n#define _disable_interrupts() __bic_status_register(GIE)\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.5710872411727905, "alphanum_fraction": 0.6033452749252319, "avg_line_length": 15.115385055541992, "blob_id": "2fd6458879972fc3a6ade61d6624812135dda800", "content_id": "20fa8fd1c53bb81d2368a67dc230d24e17dd8da7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 837, "license_type": "no_license", "max_line_length": 45, "num_lines": 52, "path": "/firmware/Makefile", "repo_name": "amstan/floppy", "src_encoding": "UTF-8", "text": "# Makefile created by gnomad\n\nPROG = main\nCC = msp430-gcc\nCXX = msp430-g++\nOBJDUMP = msp430-objdump\nSIZE = msp430-size\nMSPDEBUG = mspdebug\nCFLAGS = -O0 -Wall -mmcu=msp430g2553\nFET = rf2500\nGDB = msp430-gdb\nGDBTUI = $(GDB)tui\n\n# Uncomment the following to enable debugging\n#CFLAGS += -g -DDEBUG\n\nOBJS=$(PROG).o\n\nall: $(PROG).elf $(PROG).lst\n\t$(SIZE) $(PROG).elf\n\n.PHONY: all\n\n$(PROG).elf: $(OBJS)\n\t$(CC) $(CFLAGS) -o $(PROG).elf $(OBJS)\n\n%.o: %.c\n\t$(CXX) $(CFLAGS) -c $<\n\n%.lst: %.elf\n\t$(OBJDUMP) -DS $< >$@\n\nclean:\n\trm -fr $(PROG).elf $(PROG).lst $(OBJS)\n\ninstall: $(PROG).elf\n\t$(MSPDEBUG) $(FET) \"prog $(PROG).elf\"\n\nmspdebug: $(PROG).elf\n\t$(MSPDEBUG) $(FET)\n\ndebug: $(PROG).elf\n\t$(MSPDEBUG) $(FET) gdb\n\ngdb: $(PROG).elf\n\t$(GDB) $(PROG).elf\n \ntui: $(PROG).elf\n\t$(GDBTUI) $(PROG).elf\n \nddd: $(PROG).elf\n\tddd --debugger $(GDB) $(PROG).elf" } ]
10
TomRobson19/fourthYearCode
https://github.com/TomRobson19/fourthYearCode
f7f6066c15f3cc13c9cd8f6a2200f18c6f540b66
603ef9be8bf5dce572d555725d7f64a97cb0d781
9ab91bc64782ae0ebc8e31d38f90cd55c49d0d32
refs/heads/master
2021-03-24T11:08:30.756532
2018-04-24T14:46:01
2018-04-24T14:46:01
100,200,430
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7102774381637573, "alphanum_fraction": 0.7490542531013489, "avg_line_length": 24.7967472076416, "blob_id": "15460c533c515c84ed50d831921b4cae6b60e1ae", "content_id": "2a18a5a7fc9c897419ea72b61520d2e4a48d1c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3172, "license_type": "no_license", "max_line_length": 81, "num_lines": 123, "path": "/Visualisation/hzwr87/main.py", "repo_name": "TomRobson19/fourthYearCode", "src_encoding": "UTF-8", "text": "\"\"\"\npython main.py 256 256 113 head/*\n\npython main.py 256 256 109 brain/*\n\npython main.py 512 512 360 bunny/*\n\"\"\"\nimport sys\nimport os\nimport vtk\nimport argparse\nimport re\n\nparser = argparse.ArgumentParser(description=\"Generate an isosurface...\")\nparser.add_argument('xPixelsPerImage',type=int)\nparser.add_argument('yPixelsPerImage',type=int)\nparser.add_argument('noOfImages',type=int)\nparser.add_argument('imageFiles',type=str,nargs='+')\nargs = parser.parse_args()\nxPixels = args.xPixelsPerImage\nyPixels = args.yPixelsPerImage\nnoImages = args.noOfImages\nimages = args.imageFiles\n\n\n\ndef natural_sort(l): \n convert = lambda text: int(text) if text.isdigit() else text.lower() \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\n\n\nimages = natural_sort(images)\n\n\nvtkFiles = vtk.vtkStringArray()\nfor f in images:\n\tvtkFiles.InsertNextValue(f)\n\nreaderVolume = vtk.vtkImageReader()\n# readerVolume.SetDataScalarType( vtk.VTK_UNSIGNED_CHAR ) #8-bit\nreaderVolume.SetDataScalarType( vtk.VTK_UNSIGNED_SHORT ) #16-bit\nreaderVolume.SetFileDimensionality( 2 )\nreaderVolume.SetDataExtent ( 0, xPixels-1, 0, yPixels-1, 0, noImages-1)\n\nreaderVolume.SetDataSpacing( 1,1,1 )\n#readerVolume.SetDataSpacing( 1,1,1 ) # for bunny\n\nreaderVolume.SetNumberOfScalarComponents( 1 )\nreaderVolume.SetDataByteOrderToBigEndian()\nreaderVolume.SetFileNames( vtkFiles )\n\nreaderVolume.Update()\nmaximumValue = readerVolume.GetOutput().GetScalarRange()[1]\nprint(maximumValue)\n\n# Create renderer\nren = vtk.vtkRenderer()\nren.SetBackground( 0, 0, 0 ) \n#ren.SetBackground( 1,0,1 ) \n\n# Create a window for the renderer of size 250x250\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\nrenWin.SetSize(1000, 1000)\n\n# Set an user interface interactor for the render window\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n#10%\nthresholds = [0.1]\ncolours = [(1,1,1)]\n\n#HEAD\n# thresholds = [0.35, 0.25]\n# colours = [(1,1,1), (0,1,0)]\n\n#BRAIN\n# thresholds = [0.3]\n# colours = [(1,1,1)]\n\n#BRAIN 2-PASS\n# thresholds = [0.48, 0.45]\n# colours = [(1,1,1), (0,1,0)]\n\n#BUNNY\n# thresholds = [0.03]\n# colours = [(1,1,1)]\n\n\nfor i in range(len(thresholds)):\n\t# Generate an isosurface\n\tcontours = vtk.vtkMarchingCubes()\n\tcontours.SetInputConnection( readerVolume.GetOutputPort() )\n\tcontours.ComputeNormalsOn()\n\tcontours.ComputeGradientsOn()\n\tcontours.SetValue( 0, int(thresholds[i]*maximumValue )) # isovalue\n\n\tconfilter = vtk.vtkPolyDataConnectivityFilter()\n\tconfilter.SetInputConnection(contours.GetOutputPort())\n\tconfilter.SetExtractionModeToLargestRegion()\n\n\t# Take the isosurface data and create geometry\n\tgeoBoneMapper = vtk.vtkPolyDataMapper()\n\tgeoBoneMapper.SetInputConnection( confilter.GetOutputPort() )\n\tgeoBoneMapper.ScalarVisibilityOff()\n\n\tactorBone = vtk.vtkLODActor()\n\tactorBone.SetNumberOfCloudPoints( 1000000 )\n\tactorBone.SetMapper( geoBoneMapper )\n\tactorBone.GetProperty().SetColor(colours[i])\n\tif i == 0:\n\t\tactorBone.GetProperty().SetOpacity( 1.0 )\n\telse:\n\t\tactorBone.GetProperty().SetOpacity( 0.5 )\n\n\tren.AddActor(actorBone)\n\n# Start the initialization and rendering\niren.Initialize()\nrenWin.Render()\niren.Start()" }, { "alpha_fraction": 0.8253968358039856, "alphanum_fraction": 0.8253968358039856, "avg_line_length": 30.5, "blob_id": "abeca1cb735bbef6ec21246ed68631b9fe3d4dda", "content_id": "f6370e3f08000337ebe86b91919c6b101c4be7fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/README.md", "repo_name": "TomRobson19/fourthYearCode", "src_encoding": "UTF-8", "text": "# fourthYearCode\nCode for Durham Computer Science Fourth Year\n" }, { "alpha_fraction": 0.5723595023155212, "alphanum_fraction": 0.597520112991333, "avg_line_length": 31.69230842590332, "blob_id": "1ab8c44486a7f442296beff902bbcd9a5cf266a2", "content_id": "f5954837c1d993104f2439451b324579a4e11123", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11049, "license_type": "no_license", "max_line_length": 205, "num_lines": 338, "path": "/Vision/main.py", "repo_name": "TomRobson19/fourthYearCode", "src_encoding": "UTF-8", "text": "######################################################################\n'''\nThis implementation requires matplotlib, geopy and gmplot, all of which are available through pip. \"pip install geopy\" etc. \nMay also require python3-tk, availbe from apt/yum. \"apt install python3-tk\"\n\nThe code can then be run using \"python3 main.py\"\n\nThe matplotlib plot will be displayed first, when this window is close, the google maps plot will be available in the same directory as \"mymap.html\"\nLines 327-330 control whether correction to ground truth is used, and what the distance threshold is\n\n'''\n######################################################################\nimport cv2\nimport os\nimport numpy as np\nimport csv\nimport math\nfrom matplotlib import pyplot as plt\nimport geopy\nimport geopy.distance\nimport gmplot\n\n#####################################################################\n\nmaster_path_to_dataset = \"TTBB-durham-02-10-17-sub5\"\ndirectory_to_cycle = \"left-images\" # edit this for left or right image set\n\n# get scale from ground truth GPS\ndef getScale(allGPS,index):\n\n previousImage = allGPS[index-1]\n currentImage = allGPS[index]\n\n previousLat = previousImage[0]\n previousLon = previousImage[1]\n currentLat = currentImage[0]\n currentLon = currentImage[1]\n\n dlon = currentLon - previousLon\n dlat = currentLat - previousLat\n\n distance = math.sqrt(dlon**2 + dlat**2)\n\n return distance\n\n#read in ground truth GPS\ndef originalGPS():\n GPS = []\n gpsFile = open(master_path_to_dataset+\"/GPS.csv\") \n for i, line in enumerate(gpsFile):\n if i != 0:\n temp = line.split(\",\")\n GPS.append([float(temp[1]), float(temp[2])])\n gpsFile.close()\n return GPS\n\n#convert GPS coordinates to XYZ translations\ndef GPSToXYZ():\n GPSXYZ = originalGPS()\n\n start = geopy.Point(GPSXYZ[0][0],GPSXYZ[0][1])\n\n newGPSXYZ = []\n\n for p in GPSXYZ:\n newP = [0,0]\n lat = geopy.Point(p[0],start.longitude)\n lon = geopy.Point(start.latitude,p[1])\n\n newP[0] = geopy.distance.vincenty(start,lat).meters\n\n if(p[0] > start[0]):\n newP[0]*=-1\n\n newP[1] = geopy.distance.vincenty(start,lon).meters\n if (p[1] < start[1]):\n newP[1]*=-1\n\n newGPSXYZ.append(newP)\n\n return newGPSXYZ\n\n#convert XYZ translations to GPS to be plotted on a map\ndef XYZtoGPS(allGPS):\n temp = originalGPS()\n start = geopy.Point(temp[0][0],temp[0][1])\n \n for p in allGPS:\n d = geopy.distance.VincentyDistance(meters = p[0])\n newP = d.destination(point=start, bearing = 180)\n\n d = geopy.distance.VincentyDistance(meters = p[1])\n newP = d.destination(point=newP, bearing = 90)\n\n p[0] = newP.latitude\n p[1] = newP.longitude\n\n return allGPS\n\n#place features in bins when FAST features is used\ndef featureBinning(kp):\n bin_size = 100\n features_per_bin = 50\n\n kp.sort(key=lambda x: x.response) \n\n bins_y = math.ceil(img.shape[0]/bin_size)\n bins_x = math.ceil(img.shape[1]/bin_size)\n\n number_of_bins = bins_x * bins_y\n\n temp_kp = [[] for _ in range(number_of_bins)]\n\n for i,p in enumerate(kp):\n bin_to_place = int(p.pt[0]//bin_size + bins_x*(p.pt[1]//bin_size))\n if len(temp_kp[bin_to_place]) < bin_size:\n if len(temp_kp[bin_to_place]) != 0:\n temp_kp[bin_to_place].append(kp[i])\n else:\n temp_kp[bin_to_place] = [kp[i]]\n\n kp = [item for sublist in temp_kp for item in sublist]\n return kp\n\n#plot results on MatPlotLib\ndef plotResults(allT,allGPS):\n allGPS = allGPS[:len(allT)]\n\n plt.figure(1)\n GPS, = plt.plot(*zip(*allGPS), color='red', marker='o', label='GPS')\n pyMVO, = plt.plot(*zip(*allT), color='blue', marker='o', label='py-MVO')\n plt.legend(handles=[pyMVO, GPS])\n # Set plot parameters and show it\n plt.axis('equal')\n plt.grid()\n plt.show()\n\n\n#plot results on google maps\ndef plotResultsOnMap(allT):\n GPS = originalGPS()\n T = XYZtoGPS(allT)\n\n originalLat = []\n originalLon = []\n myLat = []\n myLon = []\n\n GPS = GPS[:len(T)]\n \n for i in range(len(T)):\n originalLat.append(GPS[i][0])\n originalLon.append(GPS[i][1])\n myLat.append(T[i][0])\n myLon.append(T[i][1])\n\n gmap = gmplot.GoogleMapPlotter(54.767093,-1.570038, 16)\n\n gmap.plot(originalLat, originalLon, 'red', edge_width=10)\n gmap.plot(myLat, myLon, 'cornflowerblue', edge_width=10)\n\n gmap.draw(\"mymap.html\")\n\n#Returns a rotated list(function) by the provided angle - taken from https://github.com/Transportation-Inspection/visual_odometry/blob/master/src/Trajectory_Tools.py\ndef rotateFunct(pts_l, angle, degrees=False):\n if degrees == True:\n theta = math.radians(angle)\n else:\n theta = angle\n\n R = np.array([ [math.cos(theta), -math.sin(theta)],\n [math.sin(theta), math.cos(theta)] ])\n rot_pts = []\n for v in pts_l:\n v = np.array(v).transpose()\n v = R.dot(v)\n v = v.transpose()\n rot_pts.append(v)\n\n return rot_pts\n\n#correct odometry results to ground truth when it goes wrong by a specific distance threshold\ndef correctToGroundTruth(allT, allGPS,threshold):\n correctionThreshold = threshold\n\n allT = np.array(allT)\n allGPS = np.array(allGPS)\n\n startIndex = 0\n newAllT = []\n angle = 0\n correction = 0\n\n counter = 0\n\n for i,p in enumerate(allT):\n if i > 0 and i < 2893:\n distance = math.hypot(allGPS[i-1][0]-newAllT[i-1][0],allGPS[i-1][1]-newAllT[i-1][1])\n if distance > correctionThreshold:\n counter += 1\n #find correct position \n startIndex = i\n if(len(allT) > i+5):\n initialPoint = allT[i+5]-allT[i]\n else:\n initialPoint = allT[-1]-allT[i]\n angle = np.degrees(np.arctan2(initialPoint[0],initialPoint[1]))\n gpsInitialPoint = allGPS[i+5]-allGPS[i]\n gpsAngle = np.degrees(np.arctan2(gpsInitialPoint[0],gpsInitialPoint[1]))\n angle -= gpsAngle\n correction = allGPS[i] - p\n newT = p+correction\n initialPoint = allGPS[startIndex]\n newT = rotateFunct([newT-initialPoint], np.radians(angle))[0]+initialPoint\n newAllT.append(newT)\n\n return newAllT, counter, len(allT)\n\n\n#####################################################################\n\n# full camera parameters - from camera calibration\n# supplied images are stereo rectified\n\ncamera_focal_length_px = 399.9745178222656; # focal length in pixels (fx, fy)\ncamera_focal_length_m = 4.8 / 1000; # focal length in metres (4.8 mm, f)\nstereo_camera_baseline_m = 0.2090607502; # camera baseline in metres (B)\ncamera_height_above_wheelbase_m = (1608.0 + 31.75 + 90) / 1000; # in mm\n\noptical_image_centre_h = 262.0; # from calibration - cy\noptical_image_centre_w = 474.5; # from calibration - cx\n\nimage_height = 544;\nimage_width = 1024;\n\n#####################################################################\n\n# Parameters for lucas kanade optical flow\nlk_params = dict(winSize = (3, 3), #default is 21\n #maxLevel = 3,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))\n\nfull_path_directory = os.path.join(master_path_to_dataset, directory_to_cycle);\n\nprevious_kp = None\nprevious_img = None\n\nfirst_image = True\n\ncurrentR = []\ncurrentT = np.array([0,0,0],dtype=np.float64)\n\nallT = []\n\ndetector = cv2.FastFeatureDetector_create(threshold=50, nonmaxSuppression=True)\n\nallGPS = GPSToXYZ()\n\nminFlowFeatures = 500\n\nimu = originalIMU()\n\nfor index, filename in enumerate(sorted(os.listdir(full_path_directory))):#[:100]):\n full_path_filename = os.path.join(full_path_directory, filename)\n\n img = cv2.imread(full_path_filename, cv2.IMREAD_COLOR)\n img = img[0:340, 0:image_width]\n\n if(first_image): #special case for first image using FAST\n first_image = False\n kp = detector.detect(img) \n kp = featureBinning(kp)\n img2 = cv2.drawKeypoints(img,kp,img)\n kp = np.array([x.pt for x in kp], dtype=np.float32)\n cv2.imshow(\"input image\", img2)\n else: #use Optical flow\n kp, st, err = cv2.calcOpticalFlowPyrLK(previous_img, img, previous_kp, None, **lk_params)\n st = st.reshape(st.shape[0])\n\n good_matches1 = (kp[st==1])\n good_matches2 = (previous_kp[st==1])\n \n if len(good_matches1) > 5: #ensure there are sufficent matches to compute the essential matrix\n essential_matrix,_ = cv2.findEssentialMat(good_matches1,good_matches2,focal=camera_focal_length_px,pp=(optical_image_centre_w,optical_image_centre_h),method=cv2.RANSAC,prob=0.999,threshold=1.0)\n _,R,t,_ = cv2.recoverPose(essential_matrix,good_matches1,good_matches2,focal=camera_focal_length_px,pp=(optical_image_centre_w,optical_image_centre_h))\n\n scale = getScale(allGPS,index)\n\n if scale > 0.00001: #ensure there is movement present\n isForwardDominant = 100*t[2] > t[0] #ensure the forward component is significant, then update currentR and currentT\n if currentR == []:\n currentT = t*scale\n currentR = R\n elif isForwardDominant: \n currentR = R.dot(currentR)\n currentT += scale*currentR.dot(t) \n else:\n print(\"Dominant motion not forward - ignored\")\n else:\n print(\"Insufficient movement - assumed stationary\")\n\n if len(good_matches1) < minFlowFeatures: #if insufficient features returned by flow, replace with FAST for the next frame\n kp = detector.detect(img) \n kp = featureBinning(kp)\n \n img2 = cv2.drawKeypoints(img,kp,img)\n kp = np.array([x.pt for x in kp], dtype=np.float32)\n cv2.imshow('input image',img2)\n else:\n cv2.imshow('input image',img)\n\n key = cv2.waitKey(1) \n if (key == ord('x')): \n print(\"Keyboard exit requested : exiting now - bye!\")\n break # exit\n\n allT.append([currentT.item(0), currentT.item(1), currentT.item(2)])\n previous_kp = kp\n previous_img = img\n\n# remove y (vertical) component\nnewT = []\nfor i,t in enumerate(allT):\n newT.append([t[0], t[2]])\n\n\n# comment and uncomment these lines to change between correcting to ground truth or not, and specify the threshold\n#correctedT = newT\n\ncorrectedT, counter, numberOfFrames = correctToGroundTruth(newT,allGPS,100)\nprint(\"Number of Corrections required was \"+ str(counter) + \" over the course of \" + str(numberOfFrames) + \" frames\")\n\n# plot reuslts on matplotlib and graph, then close\nplotResults(correctedT,allGPS)\nplotResultsOnMap(correctedT)\ncv2.destroyAllWindows()\n\n#####################################################################" }, { "alpha_fraction": 0.6477587223052979, "alphanum_fraction": 0.662285566329956, "avg_line_length": 26.591602325439453, "blob_id": "e7122a09e5149dd5aee6d2a4141d1993242a8b91", "content_id": "ebe7b53b0fbb9a7eedfa9a8186860b78e8084401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7228, "license_type": "no_license", "max_line_length": 148, "num_lines": 262, "path": "/BigData/main.py", "repo_name": "TomRobson19/fourthYearCode", "src_encoding": "UTF-8", "text": "import numpy as np\nimport csv\nimport string\nimport time\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\nfrom keras.callbacks import TensorBoard\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, CuDNNLSTM, LSTM, SimpleRNN, RNN, Dense, Input, GlobalMaxPooling1D, Dropout, Flatten, Layer\nfrom keras.models import Model\n\nimport keras.backend as K\n\noutputFolder = \"output\"\nimport time\nts = time.time()\noutputFolder = outputFolder+\"/\"+str(ts).split(\".\")[0]\ntbCallBack = TensorBoard(log_dir=outputFolder+'/log', histogram_freq=0, write_graph=True, write_images=True)\n\n\nclass MinimalRNNCell(Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n\ndef readData():\n\tfilename = \"news_ds.csv\"\n\n\tids = []\n\ttext = []\n\tlabels = []\n\n\twith open(filename) as file:\n\t\tcsvReader = csv.reader(file, delimiter=',', quotechar='\"') \n\t\tfirst = True\n\t\tfor item in csvReader:\n\t\t\tif first:\n\t\t\t\tfirst = False\n\t\t\telif len(item[1]) < 10:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tids.append(int(item[0]))\n\t\t\t\ttext.append(item[1])\n\t\t\t\tlabels.append(int(item[2]))\n\n\treturn ids, text, labels\n\n\n#there are some empty articles in the data, need to remove these and their labels\ndef cleanData(text):\n\tfor i in range(len(text)):\n\t\ttext[i] = text[i].lower()\n\t\ttext[i] = text[i].replace('\\n', ' ')\n\t\timport re\n\t\ttext[i] = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text[i])\n\t\ttext[i] = re.sub( '\\s+', ' ', text[i]).strip()\n\n\t\ttext[i] = re.sub(r'(\\S+)@(\\S+)', '', text[i])\n\t\ttext[i] = re.sub(r'(\\A|\\s)@(\\w+)', '', text[i])\n\t\ttext[i] = re.sub(r'(\\A|\\s)#(\\w+)', '', text[i])\n\t\ttext[i] = re.sub(r'([^\\s\\w]|_)+', '', text[i])\n\treturn text\n\ndef evaluatePrediction(prediction,true):\n\tprint(classification_report(true,prediction,digits=4, target_names=[\"Fake\",\"Real\"]))\n\tprint(\"Accuracy: \"+str(accuracy_score(true,prediction)))\n\ndef getWordVectors(text):\n\tnlp = spacy.load('en_core_web_lg')\n\n\tword2VecText = []\n\n\tfor article in text:\n\t\ttokens = nlp(article)\n\t\tword2VecArticle = []\n\n\t\tfor token in tokens:\n\t\t\tword2VecArticle.append(token.vector_norm)\n\n\t\tword2VecText.append(word2VecArticle)\n\n\treturn word2VecText\n\ndef main():\n\tprint(\"Reading Data\")\n\tids, text, labels = readData()\n\n\tprint(\"Cleaning Data\")\n\ttext = cleanData(text)\n\n\tx_train, x_test, y_train, y_test = train_test_split(text, labels, test_size=0.2, random_state=26)\n\n\tprint(\"Shallow Learning - tf-idf, 1-4 grams\")\n\n\tstart = time.time()\n\n\tprint(\"Extracting Features\")\n\n\t# print(\"Using tf\")\n\t# vect = CountVectorizer(\n\t# \tngram_range=(1,3),\n\t# \tmin_df=10,\n\t# \tanalyzer=\"word\")\n\n\tprint(\"Using tf-idf\")\n\tvect = TfidfVectorizer(\n\t\tngram_range=(1,4),\n\t\tmin_df=10,\n\t\tanalyzer=\"word\"\n\t\t)\n\n\tprint(\"Training MultinomialNaiveBayes\")\n\n\tvect = vect.fit(x_train)\n\tx_train_vec = vect.transform(x_train)\n\tx_test_vec = vect.transform(x_test)\n\n\tclassifier = MultinomialNB(alpha=0)\n\tclassifier.fit(x_train_vec, y_train)\n\tprediction = classifier.predict(x_test_vec)\n\ttrue = y_test\n\n\tend = time.time()\n\n\tprint(\"Evaluating\")\n\tevaluatePrediction(prediction,true)\n\tprint(\"Runtime: \"+str(end-start))\n\n\t####################################################################################\t\n\n\tprint(\"Deep Learning - LSTM, 128 units\")\n\tstart = time.time()\n\n\t#Ref: https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html\n\n\tprint(\"Tokenising text data\")\n\tMAX_NUM_WORDS = 20000\n\tMAX_SEQUENCE_LENGTH = 1000\n\tEMBEDDING_DIM = 300\n\tVALIDATION_SPLIT = 0.2\n\n\ttokenizer = Tokenizer(num_words=MAX_NUM_WORDS)\n\ttokenizer.fit_on_texts(text)\n\tsequences = tokenizer.texts_to_sequences(x_train)\n\tsequences_test = tokenizer.texts_to_sequences(x_test)\n\n\tword_index = tokenizer.word_index\n\tprint('Found %s unique tokens.' % len(word_index))\n\n\tdata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\n\ttest_data = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)\n\n\tlabels = np.asarray(y_train)\n\tprint('Shape of data tensor:', data.shape)\n\tprint('Shape of label tensor:', labels.shape)\n\n\t# split the data into a training set and a validation set\n\tindices = np.arange(data.shape[0])\n\tnp.random.shuffle(indices)\n\tdata = data[indices]\n\tlabels = labels[indices]\n\tnb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])\n\n\tx_train_deep = data[:-nb_validation_samples]\n\ty_train_deep = labels[:-nb_validation_samples]\n\tx_val = data[-nb_validation_samples:]\n\ty_val = labels[-nb_validation_samples:]\n\n\tprint(\"Getting GloVe embeddings\")\n\tembeddings_index = {}\n\tf = open(\"glove.6B.300d.txt\")\n\tfor line in f:\n\t values = line.split()\n\t word = values[0]\n\t coefs = np.asarray(values[1:], dtype='float32')\n\t embeddings_index[word] = coefs\n\tf.close()\n\n\tprint('Found %s word vectors.' % len(embeddings_index))\n\n\tprint(\"Making embedding matrix\")\n\tembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\n\tfor word, i in word_index.items():\n\t embedding_vector = embeddings_index.get(word)\n\t if embedding_vector is not None:\n\t # words not found in embedding index will be all-zeros.\n\t embedding_matrix[i] = embedding_vector\n\n\tend = time.time()\n\textractionTime = end-start\n\n\tlayers = [\"LSTM\",\"RNN\"]\n\n\tfor i in layers:\n\t\tstart = time.time()\n\n\t\tprint(\"Using \"+i)\n\n\t\tembedding_layer = Embedding(len(word_index) + 1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)\n\n\t\tsequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n\t\tembedded_sequences = embedding_layer(sequence_input)\n\n\t\tx = Conv1D(128, 5, activation='relu')(embedded_sequences)\n\t\tx = MaxPooling1D(5)(x)\n\t\tx = Conv1D(128, 5, activation='relu')(x)\n\t\tx = MaxPooling1D(5)(x)\n\n\t\tif i==\"LSTM\":\n\t\t\tx = LSTM(128)(x)\n\t\t\t#x = CuDNNLSTM(128)(x)\n\t\telse:\n\t\t\tcell = MinimalRNNCell(64)\n\t\t\tx = RNN(cell)(x)\n\t\t\n\n\t\tx = Dropout(0.25)(x)\n\t\tx = Dense(128)(x)\n\t\tpreds = Dense(1, activation='sigmoid')(x)\n\n\t\tmodel = Model(sequence_input, preds)\n\t\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])\n\n\t\tmodel.fit(x_train_deep, y_train_deep, validation_data=(x_val, y_val), epochs=20, batch_size=128, callbacks=[tbCallBack], verbose=2)\n\n\t\tprediction = K.eval(K.cast(K.greater(model.predict(test_data),0.5), \"float32\"))\n\n\t\tend = time.time()\n\n\t\tevaluatePrediction(prediction,y_test)\n\n\t\tprint(\"Runtime: \"+str((end-start)+extractionTime))\n\t\t\n\nif __name__ == '__main__':\n\tmain()" } ]
4
bpenchas/video2speed
https://github.com/bpenchas/video2speed
be44ebdeaa7872567bb97984f8bbf3aeeaeedbb3
6aa2f297d98880aae6bbd26fa6688c7ee66d36e4
9adfffcc6487b1dd057c137e902f150cff7da8e7
refs/heads/master
2021-08-28T16:12:09.843512
2017-12-12T17:44:28
2017-12-12T17:44:28
113,232,505
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5295712947845459, "alphanum_fraction": 0.5795347690582275, "avg_line_length": 38.619834899902344, "blob_id": "89a653990d404201eab8e69828cc33dcc25b5ee4", "content_id": "7ad21d90d4a8fe26751d8821a6a247098782aa05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9587, "license_type": "no_license", "max_line_length": 123, "num_lines": 242, "path": "/tensorflow-vgg/vgg19.py", "repo_name": "bpenchas/video2speed", "src_encoding": "UTF-8", "text": "import os\nimport tensorflow as tf\n\nimport numpy as np\nimport time\nimport inspect\n\nVGG_MEAN = [103.939, 116.779, 123.68]\n\nclass BallerVgg:\n def __init__(self, vgg19_npy_path):\n # if vgg19_npy_path is None:\n # path = inspect.getfile(Vgg19)\n # path = os.path.abspath(os.path.join(path, os.pardir))\n # path = os.path.join(path, \"vgg19.npy\")\n # vgg19_npy_path = path\n # print(vgg19_npy_path)\n\n self.var_dict = {}\n self.trainable = True\n\n self.data_dict = np.load(vgg19_npy_path, encoding='latin1').item()\n print(\"npy file loaded\")\n\n def build(self, batch):\n vgg_vol = self.vgg_volume(batch) # batch_size x 28 x 28 x 256\n\n evens = tf.strided_slice(vgg_vol,(0,0,0,0), (batch.shape[0],28,28,256), (2,1,1,1))\n odds = tf.strided_slice(vgg_vol,(1,0,0,0), (batch.shape[0],28,28,256), (2,1,1,1))\n\n self.concat_vol = tf.concat([evens, odds], 3)\n\n # Make some conv layers, make some fc layers, that are variable/trainable.\n self.conv4_1 = self.conv_layer_trainable(self.concat_vol, 512, 512, \"baller_conv4_1\")\n self.conv4_2 = self.conv_layer_trainable(self.conv4_1, 512, 512, \"baller_conv4_2\")\n self.conv4_3 = self.conv_layer_trainable(self.conv4_2, 512, 512, \"baller_conv4_3\")\n self.conv4_4 = self.conv_layer_trainable(self.conv4_3, 512, 512, \"baller_conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'baller_pool4')\n\n self.conv5_1 = self.conv_layer_trainable(self.pool4, 512, 512, \"baller_conv5_1\")\n self.conv5_2 = self.conv_layer_trainable(self.conv5_1, 512, 512, \"baller_conv5_2\")\n self.conv5_3 = self.conv_layer_trainable(self.conv5_2, 512, 512, \"baller_conv5_3\")\n self.conv5_4 = self.conv_layer_trainable(self.conv5_3, 512, 512, \"baller_conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'baller_pool5')\n\n self.fc6 = self.fc_layer_trainable(self.pool5, 25088, 4096, \"baller_fc6\") # 25088 = ((224 // (2 ** 5)) ** 2) * 512\n self.relu6 = tf.nn.relu(self.fc6)\n # if train_mode is not None:\n # self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)\n # elif self.trainable:\n # self.relu6 = tf.nn.dropout(self.relu6, self.dropout)\n\n self.fc7 = self.fc_layer_trainable(self.relu6, 4096, 4096, \"baller_fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n # if train_mode is not None:\n # self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)\n # elif self.trainable:\n # self.relu7 = tf.nn.dropout(self.relu7, self.dropout)\n\n self.fc8 = self.fc_layer_trainable(self.relu7, 4096, 20, \"baller_fc8\")\n\n self.prob = tf.nn.softmax(self.fc8, name=\"baller_prob\")\n\n def vgg_volume(self, rgb):\n \"\"\"\n load variable from npy to build the VGG\n\n :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]\n \"\"\"\n\n start_time = time.time()\n print(\"build model started\")\n rgb_scaled = rgb * 255.0\n\n # Convert RGB to BGR\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n # 28 x 28 x 256\n\n return self.pool3\n\n # self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n # self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n # self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n # self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n # self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n # self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n # self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n # self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n # self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n # self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n # self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n # assert self.fc6.get_shape().as_list()[1:] == [4096]\n # self.relu6 = tf.nn.relu(self.fc6)\n\n # self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n # self.relu7 = tf.nn.relu(self.fc7)\n\n # self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")\n\n # self.data_dict = None\n # print((\"build model finished: %ds\" % (time.time() - start_time)))\n\n def avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def conv_layer(self, bottom, name):\n with tf.variable_scope(name):\n filt = self.get_conv_filter(name)\n\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n\n conv_biases = self.get_bias(name)\n bias = tf.nn.bias_add(conv, conv_biases)\n\n relu = tf.nn.relu(bias)\n return relu\n \n def conv_layer_trainable(self, bottom, in_channels, out_channels, name):\n with tf.variable_scope(name):\n filt, conv_biases = self.get_conv_var(3, in_channels, out_channels, name)\n\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n bias = tf.nn.bias_add(conv, conv_biases)\n relu = tf.nn.relu(bias)\n\n return relu\n\n def fc_layer(self, bottom, name):\n with tf.variable_scope(name):\n shape = bottom.get_shape().as_list()\n dim = 1\n for d in shape[1:]:\n dim *= d\n x = tf.reshape(bottom, [-1, dim])\n\n weights = self.get_fc_weight(name)\n biases = self.get_bias(name)\n\n # Fully connected layer. Note that the '+' operation automatically\n # broadcasts the biases.\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n\n return fc\n \n def fc_layer_trainable(self, bottom, in_size, out_size, name):\n with tf.variable_scope(name):\n weights, biases = self.get_fc_var(in_size, out_size, name)\n\n x = tf.reshape(bottom, [-1, in_size])\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n\n return fc\n\n def get_conv_filter(self, name):\n return tf.constant(self.data_dict[name][0], name=\"filter\")\n\n def get_bias(self, name):\n return tf.constant(self.data_dict[name][1], name=\"biases\")\n\n def get_fc_weight(self, name):\n return tf.constant(self.data_dict[name][0], name=\"weights\")\n\n def get_conv_var(self, filter_size, in_channels, out_channels, name):\n initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, 0.001)\n filters = self.get_var(initial_value, name, 0, name + \"_filters\")\n\n initial_value = tf.truncated_normal([out_channels], .0, .001)\n biases = self.get_var(initial_value, name, 1, name + \"_biases\")\n\n return filters, biases\n\n def get_fc_var(self, in_size, out_size, name):\n initial_value = tf.truncated_normal([in_size, out_size], 0.0, 0.001)\n weights = self.get_var(initial_value, name, 0, name + \"_weights\")\n\n initial_value = tf.truncated_normal([out_size], .0, .001)\n biases = self.get_var(initial_value, name, 1, name + \"_biases\")\n\n return weights, biases\n\n def save_npy(self, sess, npy_path=\"./vgg19-save.npy\"):\n assert isinstance(sess, tf.Session)\n data_dict = {}\n\n for (name, idx), var in list(self.var_dict.items()):\n var_out = sess.run(var)\n if name not in data_dict:\n data_dict[name] = {}\n data_dict[name][idx] = var_out\n\n np.save(npy_path, data_dict)\n print((\"file saved\", npy_path))\n return npy_path\n\n def get_var(self, initial_value, name, idx, var_name):\n # TODO add new_dict lookup here for learned parameters\n if self.data_dict is not None and name in self.data_dict:\n value = self.data_dict[name][idx]\n else:\n value = initial_value\n\n if self.trainable:\n var = tf.Variable(value, name=var_name)\n else:\n var = tf.constant(value, dtype=tf.float32, name=var_name)\n\n self.var_dict[(name, idx)] = var\n\n # print(var_name, var.get_shape().as_list())\n assert var.get_shape() == initial_value.get_shape()\n\n return var" }, { "alpha_fraction": 0.7604790329933167, "alphanum_fraction": 0.7784430980682373, "avg_line_length": 83, "blob_id": "e1c6c0cf2236c1c3a9ba4ec8655ae3952bb0f99b", "content_id": "219a06353b448b3b0b9e46cab0c4f89c71133e63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 167, "license_type": "no_license", "max_line_length": 155, "num_lines": 2, "path": "/fh", "repo_name": "bpenchas/video2speed", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfloyd run --gpu --data bpenchas/datasets/train/3:train --data bpenchas/datasets/vgg19-weights:vgg-weights --data bpenchas/datasets/test:test --mode jupyter" } ]
2
datacuriosity/Python_Level_1
https://github.com/datacuriosity/Python_Level_1
a4364e4e8c4adb0123c2cdffb5c2c2e5fea8bd43
25f784a762dcda43e1e7fe83842170ade6f54920
41078e6a38258b0b42290f7f1cd9496fe162d813
refs/heads/main
2023-08-28T03:58:14.588240
2021-10-25T14:52:44
2021-10-25T14:52:44
412,315,445
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5089605450630188, "alphanum_fraction": 0.5232974886894226, "avg_line_length": 16.5, "blob_id": "cfdf970abdb30a0c0f1af5b375d49e161bfbe187", "content_id": "dc52919fb791ab3df7fdef3d9f93393e2d62b13e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 27, "num_lines": 16, "path": "/Live_lectures/helper.py", "repo_name": "datacuriosity/Python_Level_1", "src_encoding": "UTF-8", "text": "def maximum(li):\n largest = li[0]\n for item in li[1:]:\n if largest < item:\n largest = item\n\n return largest\n\n\ndef minimum(li):\n smallest = li[0]\n for item in li[1:]:\n if smallest > item:\n smallest = item\n \n return smallest" } ]
1
sahusaurabh65/olympic_project_new
https://github.com/sahusaurabh65/olympic_project_new
80a76573069866c94ee8832a8d920ac6526f5b87
39139a4c931e37a917dc48f0f936d0e672936489
5f3871968f8b9a823e617d75a652a0458b51ed54
refs/heads/master
2020-03-25T06:30:42.244764
2018-08-07T21:13:41
2018-08-07T21:13:41
143,506,623
0
0
null
2018-08-04T07:13:18
2018-08-04T07:08:43
2018-08-04T07:08:41
null
[ { "alpha_fraction": 0.673252284526825, "alphanum_fraction": 0.7036474347114563, "avg_line_length": 31.75, "blob_id": "72489b239c0b3cd5b60559abc4f997e1b1e9abf8", "content_id": "ccaa1e3cb02726357d0b875896745dbb96f614b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 116, "num_lines": 20, "path": "/q03_better_event/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q03_better_event/build.py\n#default imports\nfrom greyatomlib.olympics_project_new.q02_country_operations.build import q02_country_operations, q01_rename_columns\nimport numpy as np\nimport pandas as pd\n\n#Previous function\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\n\ndef q03_better_event(q02_country_operations):\n df=OlympicsDF\n df['BetterEvent']=np.where((df['Total_Summer']>df['Total_Winter']),'Summer',\n np.where(df['Total_Summer']<df['Total_Winter'],'Winter','Both'))\n return df\n \n \nc = q03_better_event(q02_country_operations)\nc\n\n\n\n" }, { "alpha_fraction": 0.5499181747436523, "alphanum_fraction": 0.5990179777145386, "avg_line_length": 32.77777862548828, "blob_id": "6fbb7e87b3ad5668ca6c2e65f7f28e56cda7240e", "content_id": "506f6599f70077487cd10ddaa9906e9f11decadc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 96, "num_lines": 18, "path": "/q01_rename_columns/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q01_rename_columns/build.py\n# default imports\nimport pandas as pd\n\npath='./data/olympics.csv'\n\ndef q01_rename_columns(path):\n df=pd.read_csv(path,header=0)\n #b.Skips first row.\n dx=df[1:]\n dx.rename(columns = {'0':'Country', '1': '# Summer', '2':'Gold_Summer','3': 'Silver_Summer',\n '4':'Bronze_Summer', '5':'Total_Summer', '6':'# Winter', '7':'Gold_Winter',\n '8':'Silver_Winter', '9':'Bronze_Winter', '10':'Total_Winter', '11':'# Games',\n '12':'Gold_Total','13': 'Silver_Total','14' :'Bronze_Total','15' :'Total'},inplace=True)\n\n return dx\n\nq01_rename_columns(path)\n\n\n\n" }, { "alpha_fraction": 0.720098614692688, "alphanum_fraction": 0.7509247660636902, "avg_line_length": 31.280000686645508, "blob_id": "e5a627db3ff7baa1592b5e8c5bc63be70567ab87", "content_id": "8aca2295732a3425326d097f5d7d65b0c31c3880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 116, "num_lines": 25, "path": "/q07_unusual_performances/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q07_unusual_performances/build.py\n# default imports\n\nfrom greyatomlib.olympics_project_new.q02_country_operations.build import q02_country_operations, q01_rename_columns\nimport pandas as pd\n\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nimport pandas as pd\n\nlow=0.05\nhigh=0.95\n\nquantile_df = OlympicsDF['Total'].quantile([low, high])\n\ndef q07_unusual_performances(df, lower_quantile, upper_quantile):\n df.drop(df.tail(1).index,inplace=True)\n\n good_countries = pd.DataFrame(df[df['Total']>upper_quantile])\n bad_countries = pd.DataFrame(df[df['Total']<=lower_quantile])\n\n return bad_countries['Country_Name'], good_countries['Country_Name']\n\nq07_unusual_performances(OlympicsDF,quantile_df.iloc[0], quantile_df.iloc[1])\n\n\n\n\n" }, { "alpha_fraction": 0.6413255333900452, "alphanum_fraction": 0.6764132380485535, "avg_line_length": 31.935483932495117, "blob_id": "cd2a40a68a2c6c7ac263252c023eb7a36dd3a759", "content_id": "02bb0ccd558b68feaf1c2bb9a55c486be22a8db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1026, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/q02_country_operations/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q02_country_operations/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q01_rename_columns.build import q01_rename_columns\n#Previous Functions\nimport pandas as pd\nimport re\nre.compile('<title>(.*)</title>')\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \n\n# def q02_country_operations(df):\n# df['Country Name']=df['Country'][:] \n# df['Country name']=(df['Country Name'].str.split('(',1).str)[0].str.strip\n \n# return df\n# # q02_country_operations(OlympicsDF).iloc[100,16]\n# q02_country_operations(OlympicsDF).iloc[100,16]\n# #print(OlympicsDF.iloc[100,16])\ndef q02_country_operations(OlympicsDF):\n country_name = []\n for country in OlympicsDF['Country']:\n position = re.search('\\(',country)\n if position!=None:\n country_name.append(country[:position.end()-2])\n else:\n country_name.append(country)\n OlympicsDF['Country Name'] = pd.Series(country_name)\n return OlympicsDF\n\n\nq02_country_operations(OlympicsDF)\n\n\n\n\n\n" }, { "alpha_fraction": 0.670127809047699, "alphanum_fraction": 0.7212460041046143, "avg_line_length": 42.10344696044922, "blob_id": "ad467806dfa0adf9359da833101d4d41ae801fc0", "content_id": "312f69f239ba11d2c8ba63bba443a7a9d3779071", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 144, "num_lines": 29, "path": "/q05_top_10_plotting/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q05_top_10_plotting/build.py\n# default imports\nimport matplotlib.pyplot as plt\nfrom greyatomlib.olympics_project_new.q04_find_top_10.build import q04_find_top_10, q03_better_event, q02_country_operations, q01_rename_columns\nplt.switch_backend('agg')\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \nTop10Summer,Top10Winter, Top10, Common =q04_find_top_10(OlympicsDF,'Total_Summer', 'Total_Winter','Total')\n\ndef q05_top_10_plotting(df, Top10Summer, Top10Winter, Top10):\n df = df.iloc[:len(df)-1,:]\n Total_Summer_score = df.sort_values('Total_Summer',ascending=False)[['Country_Name', 'Total_Summer']][0:10]\n plt.bar(Total_Summer_score['Country_Name'], Total_Summer_score['Total_Summer'])\n \n Total_Winter_score = df.sort_values('Total_Winter',ascending=False)[['Country_Name', 'Total_Winter']][0:10]\n plt.bar(Total_Winter_score['Country_Name'], Total_Winter_score['Total_Winter'])\n \n Total_score = df.sort_values('Total',ascending=False)[['Country_Name', 'Total']][0:10]\n plt.bar(Total_score['Country_Name'], Total_score['Total'])\n\n \n\n\n\n\n\nq05_top_10_plotting(OlympicsDF, Top10Summer, Top10Winter, Top10)\n\n\n" }, { "alpha_fraction": 0.6808510422706604, "alphanum_fraction": 0.7224371433258057, "avg_line_length": 45.90909194946289, "blob_id": "61c104dafd0fa3155fb21a2e71076eb52aec0c33", "content_id": "66b4ea47b19bea7cd7482daf435ee6371536a3b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 127, "num_lines": 22, "path": "/q04_find_top_10/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q04_find_top_10/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q03_better_event.build import q03_better_event,q02_country_operations, q01_rename_columns\nimport pandas as pd\nimport numpy as np\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \n\ndef q04_find_top_10(df, Total_Summer, Total_Winter, Total):\n #using only integer value by removing title i.e. len-1 \n df = df.iloc[:len(df)-1,:]\n Winter = list(df.sort_values('Total_Winter',ascending=False)['Country_Name'][0:10])\n Summer = list(df.sort_values('Total_Summer',ascending=False)['Country_Name'][0:10])\n sums = list(df.sort_values('Total',ascending=False)['Country_Name'][0:10])\n similar_data = set(Summer) & set(Winter) & set(sums)\n \n return (Summer, Winter,sums, similar_data)\n\nq04_find_top_10(OlympicsDF, OlympicsDF['Total_Summer'], OlympicsDF['Total_Winter'], OlympicsDF['Total'])\n#q04_find_top_10(q03_better_even)\n\n\n" }, { "alpha_fraction": 0.6403433680534363, "alphanum_fraction": 0.6892703771591187, "avg_line_length": 45.52000045776367, "blob_id": "e4bade0e3a7cdc901edd941aa2994fb6c0ba1b27", "content_id": "a00e02c0b76de84515e372f3943e2593fa8e8210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 144, "num_lines": 25, "path": "/q06_golden_winner/build.py", "repo_name": "sahusaurabh65/olympic_project_new", "src_encoding": "UTF-8", "text": "# %load q06_golden_winner/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q04_find_top_10.build import q04_find_top_10, q03_better_event, q02_country_operations, q01_rename_columns\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \nTop10Summer,Top10Winter, Top10, Common =q04_find_top_10(OlympicsDF,'Total_Summer', 'Total_Winter','Total')\n\ndef q06_golden_winner(df,Top10Summer,Top10Winter, Top10):\n s=df[df['Country_Name'].isin(Top10Summer)]\n s['Gold_Ratio']=s['Gold_Summer']/s['Total_Summer']\n s_ratio=list(s[s['Gold_Ratio']==s['Gold_Ratio'].max()]['Country_Name'])[0]\n \n w=df[df['Country_Name'].isin(Top10Winter)]\n w['Gold_Ratio']=w['Gold_Winter']/w['Total_Winter']\n w_ratio=list(w[w['Gold_Ratio']==w['Gold_Ratio'].max()]['Country_Name'])[0]\n \n t=df[df['Country_Name'].isin(Top10)]\n t['Gold_Ratio']=s['Gold_Total']/s['Total']\n t_ratio=list(t[t['Gold_Ratio']==t['Gold_Ratio'].max()]['Country_Name'])[0]\n\n return s_ratio,w_ratio,t_ratio\n\nq06_golden_winner(OlympicsDF, Top10Summer, Top10Winter, Top10)\n\n\n" } ]
7